mirror of
https://github.com/moghtech/komodo.git
synced 2025-12-05 19:17:36 -06:00
1.18.4 (#604)
* update easy deps * update otel deps * implement template in types + update resource meta * ts types * dev-2 * dev-3 default template query is include * Toggle resource is template in resource header * dev-4 support CopyServer * gen ts * style template selector in New Resource menu * fix new menu show 0 * add template market in omni search bar * fix some dynamic import behavior * template badge on dashboard * dev-5 * standardize interpolation methods with nice api * core use new interpolation methods * refactor git usage * dev-6 refactor interpolation / git methods * fix pull stack passed replacers * new types * remove redundant interpolation for build secret args * clean up periphery docker client * dev-7 include ports in container summary, see if they actually come through * show container ports in container table * refresh processes without tasks (more efficient) * dev-8 keep container stats cache, include with ContainerListItem * gen types * display more container ports * dev-9 fix repo clone when repo doesn't exist initially * Add ports display to more spots * fix function name * add Periphery full container stats api, may be used later * server container stats list * dev-10 * 1.18.4 release * Use reset instead of invalidate to fix GetUser spam on token expiry (#618) --------- Co-authored-by: Jacky Fong <hello@huzky.dev>
This commit is contained in:
223
Cargo.lock
generated
223
Cargo.lock
generated
@@ -165,9 +165,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "aws-config"
|
||||
version = "1.6.3"
|
||||
version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02a18fd934af6ae7ca52410d4548b98eb895aab0f1ea417d168d85db1434a141"
|
||||
checksum = "455e9fb7743c6f6267eb2830ccc08686fbb3d13c9a689369562fd4d4ef9ea462"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -230,9 +230,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-runtime"
|
||||
version = "1.5.7"
|
||||
version = "1.5.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c4063282c69991e57faab9e5cb21ae557e59f5b0fb285c196335243df8dc25c"
|
||||
checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-sigv4",
|
||||
@@ -254,9 +254,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ec2"
|
||||
version = "1.134.0"
|
||||
version = "1.139.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9a84e95f739e79d157409fa00e41008dabd181022193dabfabc68ddccbd6055"
|
||||
checksum = "3b92e3d00f89108bc36102fcb4f23fb0b59ee12a6dc62156c27ce40e42337127"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -277,9 +277,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sso"
|
||||
version = "1.70.0"
|
||||
version = "1.73.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83447efb7179d8e2ad2afb15ceb9c113debbc2ecdf109150e338e2e28b86190b"
|
||||
checksum = "b2ac1674cba7872061a29baaf02209fefe499ff034dfd91bd4cc59e4d7741489"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -299,9 +299,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ssooidc"
|
||||
version = "1.71.0"
|
||||
version = "1.74.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c5f9bfbbda5e2b9fe330de098f14558ee8b38346408efe9f2e9cee82dc1636a4"
|
||||
checksum = "3a6a22f077f5fd3e3c0270d4e1a110346cddf6769e9433eb9e6daceb4ca3b149"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -321,9 +321,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sts"
|
||||
version = "1.71.0"
|
||||
version = "1.74.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e17b984a66491ec08b4f4097af8911251db79296b3e4a763060b45805746264f"
|
||||
checksum = "19d440e1d368759bd10df0dbdddbfff6473d7cd73e9d9ef2363dc9995ac2d711"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -344,9 +344,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sigv4"
|
||||
version = "1.3.1"
|
||||
version = "1.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3503af839bd8751d0bdc5a46b9cac93a003a353e635b0c12cf2376b5b53e41ea"
|
||||
checksum = "ddfb9021f581b71870a17eac25b52335b82211cdc092e02b6876b2bcefa61666"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-smithy-http",
|
||||
@@ -419,15 +419,15 @@ dependencies = [
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pki-types",
|
||||
"tokio",
|
||||
"tower 0.5.2",
|
||||
"tower",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-json"
|
||||
version = "0.61.3"
|
||||
version = "0.61.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92144e45819cae7dc62af23eac5a038a58aa544432d2102609654376a900bd07"
|
||||
checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9"
|
||||
dependencies = [
|
||||
"aws-smithy-types",
|
||||
]
|
||||
@@ -477,9 +477,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime-api"
|
||||
version = "1.8.0"
|
||||
version = "1.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1e5d9e3a80a18afa109391fb5ad09c3daf887b516c6fd805a157c6ea7994a57"
|
||||
checksum = "bd8531b6d8882fd8f48f82a9754e682e29dd44cff27154af51fa3eb730f59efb"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-types",
|
||||
@@ -494,9 +494,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-types"
|
||||
version = "1.3.1"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "40076bd09fadbc12d5e026ae080d0930defa606856186e31d83ccc6a255eeaf3"
|
||||
checksum = "d498595448e43de7f4296b7b7a18a8a02c61ec9349128c80a368f7c3b4ab11a8"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"bytes",
|
||||
@@ -520,9 +520,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-xml"
|
||||
version = "0.60.9"
|
||||
version = "0.60.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc"
|
||||
checksum = "3db87b96cb1b16c024980f133968d52882ca0daaee3a086c6decc500f6c99728"
|
||||
dependencies = [
|
||||
"xmlparser",
|
||||
]
|
||||
@@ -573,7 +573,7 @@ dependencies = [
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-tungstenite 0.26.2",
|
||||
"tower 0.5.2",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -617,7 +617,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"rustversion",
|
||||
"serde",
|
||||
"tower 0.5.2",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
@@ -791,9 +791,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "bollard"
|
||||
version = "0.19.0"
|
||||
version = "0.19.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af706e9dc793491dd382c99c22fde6e9934433d4cc0d6a4b34eb2cdc57a5c917"
|
||||
checksum = "899ca34eb6924d6ec2a77c6f7f5c7339e60fd68235eaf91edd5a15f12958bb06"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bollard-stubs",
|
||||
@@ -824,9 +824,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "bollard-stubs"
|
||||
version = "1.48.2-rc.28.0.4"
|
||||
version = "1.48.3-rc.28.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79cdf0fccd5341b38ae0be74b74410bdd5eceeea8876dc149a13edfe57e3b259"
|
||||
checksum = "64ea257e555d16a2c01e5593f40b73865cdf12efbceda33c6d14a2d8d1490368"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -890,7 +890,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"tokio",
|
||||
@@ -993,9 +993,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.38"
|
||||
version = "4.5.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000"
|
||||
checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -1003,9 +1003,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.38"
|
||||
version = "4.5.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120"
|
||||
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -1015,9 +1015,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.32"
|
||||
version = "4.5.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7"
|
||||
checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -1057,10 +1057,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "command"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"formatting",
|
||||
"komodo_client",
|
||||
"run_command",
|
||||
"svi",
|
||||
@@ -1539,9 +1537,19 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "environment"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"formatting",
|
||||
"komodo_client",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "environment_file"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
@@ -1621,7 +1629,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "formatting"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"serror",
|
||||
]
|
||||
@@ -1783,7 +1791,7 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "git"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cache",
|
||||
@@ -1791,7 +1799,6 @@ dependencies = [
|
||||
"formatting",
|
||||
"komodo_client",
|
||||
"run_command",
|
||||
"svi",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
@@ -2432,6 +2439,15 @@ dependencies = [
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "interpolate"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
"svi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ipconfig"
|
||||
version = "0.3.2"
|
||||
@@ -2536,7 +2552,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_cli"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -2552,7 +2568,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_client"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2584,7 +2600,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_core"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
@@ -2614,6 +2630,7 @@ dependencies = [
|
||||
"hex",
|
||||
"hmac",
|
||||
"indexmap 2.9.0",
|
||||
"interpolate",
|
||||
"jsonwebtoken",
|
||||
"komodo_client",
|
||||
"logger",
|
||||
@@ -2653,9 +2670,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_periphery"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"async_timing_util",
|
||||
"axum",
|
||||
"axum-server",
|
||||
@@ -2666,11 +2684,13 @@ dependencies = [
|
||||
"command",
|
||||
"derive_variants",
|
||||
"dotenvy",
|
||||
"environment",
|
||||
"environment_file",
|
||||
"envy",
|
||||
"formatting",
|
||||
"futures",
|
||||
"git",
|
||||
"interpolate",
|
||||
"komodo_client",
|
||||
"logger",
|
||||
"merge_config_files",
|
||||
@@ -2686,7 +2706,6 @@ dependencies = [
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"serror",
|
||||
"svi",
|
||||
"sysinfo",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
@@ -2697,7 +2716,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_util"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dotenvy",
|
||||
@@ -2786,7 +2805,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "logger"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -3279,9 +3298,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.29.1"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c"
|
||||
checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
@@ -3293,25 +3312,23 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-http"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed"
|
||||
checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"http 1.3.1",
|
||||
"opentelemetry",
|
||||
"reqwest",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656"
|
||||
checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"http 1.3.1",
|
||||
"opentelemetry",
|
||||
"opentelemetry-http",
|
||||
@@ -3326,9 +3343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3"
|
||||
checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
@@ -3338,20 +3355,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "84b29a9f89f1a954936d5aa92f19b2feec3c8f3971d3e96206640db7f9706ae3"
|
||||
checksum = "83d059a296a47436748557a353c5e6c5705b9470ef6c95cfc52c21a8814ddac2"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.29.0"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b"
|
||||
checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b"
|
||||
dependencies = [
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
"futures-util",
|
||||
"glob",
|
||||
"opentelemetry",
|
||||
"percent-encoding",
|
||||
"rand 0.9.1",
|
||||
@@ -3359,7 +3375,6 @@ dependencies = [
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3541,7 +3556,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "periphery_client"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -3958,7 +3973,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.2",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tower-service",
|
||||
"url",
|
||||
@@ -4065,7 +4080,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "response"
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
@@ -4514,9 +4529,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.8"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1"
|
||||
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -4828,11 +4843,11 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||
|
||||
[[package]]
|
||||
name = "svi"
|
||||
version = "1.0.1"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ecfc625e6292e18db65d1a122cb1ea014dd909622ab0154f068a3990413ff017"
|
||||
checksum = "e8d4d1d576ce8f4b01ac90fd0d9f4c95bf5cebbc9c08e89499b152bef770cd98"
|
||||
dependencies = [
|
||||
"thiserror 1.0.69",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4879,9 +4894,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.35.1"
|
||||
version = "0.35.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "79251336d17c72d9762b8b54be4befe38d2db56fbbc0241396d70f173c39d47a"
|
||||
checksum = "3c3ffa3e4ff2b324a57f7aeb3c349656c7b127c3c189520251a648102a92496e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"memchr",
|
||||
@@ -5144,9 +5159,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.22"
|
||||
version = "0.8.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae"
|
||||
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
@@ -5156,18 +5171,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.9"
|
||||
version = "0.6.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
|
||||
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.22.26"
|
||||
version = "0.22.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
||||
dependencies = [
|
||||
"indexmap 2.9.0",
|
||||
"serde",
|
||||
@@ -5191,15 +5206,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_write"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076"
|
||||
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.12.3"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52"
|
||||
checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
@@ -5214,31 +5229,10 @@ dependencies = [
|
||||
"pin-project",
|
||||
"prost",
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pemfile 2.2.0",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.2",
|
||||
"tokio-stream",
|
||||
"tower 0.4.13",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap 1.9.3",
|
||||
"pin-project",
|
||||
"pin-project-lite",
|
||||
"rand 0.8.5",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -5252,9 +5246,12 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"indexmap 2.9.0",
|
||||
"pin-project-lite",
|
||||
"slab",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -5282,7 +5279,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -5346,9 +5343,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-opentelemetry"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444"
|
||||
checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
@@ -6072,9 +6069,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.7"
|
||||
version = "0.7.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5"
|
||||
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
28
Cargo.toml
28
Cargo.toml
@@ -8,7 +8,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.18.3"
|
||||
version = "1.18.4"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -20,6 +20,8 @@ homepage = "https://komo.do"
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
@@ -41,7 +43,7 @@ mongo_indexed = "2.0.1"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.1.2"
|
||||
mungos = "3.2.0"
|
||||
svi = "1.0.1"
|
||||
svi = "1.1.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.20", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
@@ -67,23 +69,23 @@ strum = { version = "0.27.1", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_yaml = "0.9.34"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.8.22"
|
||||
toml = "0.8.23"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.98"
|
||||
thiserror = "2.0.12"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.29.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { version = "0.30.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.30.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.29.0"
|
||||
tracing-opentelemetry = "0.30.0"
|
||||
opentelemetry = "0.29.1"
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.30.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.38", features = ["derive"] }
|
||||
clap = { version = "4.5.40", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
@@ -103,12 +105,12 @@ hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.0"
|
||||
sysinfo = "0.35.1"
|
||||
bollard = "0.19.1"
|
||||
sysinfo = "0.35.2"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.6.3"
|
||||
aws-sdk-ec2 = "1.134.0"
|
||||
aws-config = "1.8.0"
|
||||
aws-sdk-ec2 = "1.139.0"
|
||||
aws-credential-types = "1.2.3"
|
||||
|
||||
## CRON
|
||||
|
||||
@@ -18,6 +18,7 @@ path = "src/main.rs"
|
||||
komodo_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
formatting.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
|
||||
@@ -210,24 +210,22 @@ pub async fn send_alert(
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
|
||||
@@ -2,6 +2,7 @@ use ::slack::types::Block;
|
||||
use anyhow::{Context, anyhow};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant,
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
@@ -11,13 +12,11 @@ use komodo_client::entities::{
|
||||
stack::StackState,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use std::collections::HashSet;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
use crate::helpers::{
|
||||
interpolate::interpolate_variables_secrets_into_string,
|
||||
maintenance::is_in_maintenance,
|
||||
maintenance::is_in_maintenance, query::VariablesAndSecrets,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
@@ -167,18 +166,14 @@ async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let res = reqwest::Client::new()
|
||||
.post(url_interpolated)
|
||||
@@ -186,8 +181,10 @@ async fn send_custom_alert(
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
|
||||
@@ -432,23 +432,21 @@ pub async fn send_alert(
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack.send_message(text, blocks).await.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
|
||||
@@ -7,6 +7,7 @@ use std::{
|
||||
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_command;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
|
||||
@@ -31,11 +32,7 @@ use crate::{
|
||||
api::{execute::ExecuteRequest, user::UserArgs},
|
||||
config::core_config,
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
query::get_variables_and_secrets,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
random_string,
|
||||
update::update_update,
|
||||
},
|
||||
@@ -221,28 +218,22 @@ async fn interpolate(
|
||||
key: String,
|
||||
secret: String,
|
||||
) -> serror::Result<HashSet<(String, String)>> {
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets {
|
||||
variables,
|
||||
mut secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_KEY"), key);
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
secrets.insert(String::from("ACTION_API_KEY"), key);
|
||||
secrets.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
contents,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_string(contents)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
add_interp_update_log(update, &global_replacers, &secret_replacers);
|
||||
|
||||
Ok(secret_replacers)
|
||||
Ok(interpolator.secret_replacers)
|
||||
}
|
||||
|
||||
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
use std::{future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
@@ -39,13 +40,10 @@ use crate::{
|
||||
build_git_token,
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
query::{
|
||||
VariablesAndSecrets, get_deployment_state,
|
||||
get_variables_and_secrets,
|
||||
},
|
||||
query::{get_deployment_state, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::{init_execution_update, update_update},
|
||||
},
|
||||
@@ -99,9 +97,13 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
None
|
||||
};
|
||||
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets {
|
||||
mut variables,
|
||||
secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
|
||||
// Add the $VERSION to variables. Use with [[$VERSION]]
|
||||
vars_and_secrets.variables.insert(
|
||||
variables.insert(
|
||||
String::from("$VERSION"),
|
||||
build.config.version.to_string(),
|
||||
);
|
||||
@@ -207,51 +209,18 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
// INTERPOLATE VARIABLES
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.pre_build,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator.interpolate_build(&mut build)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.build_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.secret_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.dockerfile,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
@@ -268,6 +237,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}) => res,
|
||||
@@ -284,10 +255,10 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash =
|
||||
res.commit_hash.unwrap_or_default().to_string();
|
||||
res.commit_message.unwrap_or_default()
|
||||
res.res.commit_hash.unwrap_or_default().to_string();
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed build at clone repo | {e:#}");
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::{collections::HashSet, sync::OnceLock};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use cache::TimeoutCache;
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
@@ -23,13 +24,8 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::update_update,
|
||||
},
|
||||
@@ -180,53 +176,17 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !deployment.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_deployment(&mut deployment)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.ports,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.volumes,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.command,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshRepoCache},
|
||||
entities::{
|
||||
@@ -31,14 +32,8 @@ use crate::{
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::repo_cancel_channel,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
git_token, periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
@@ -123,12 +118,14 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => res.logs,
|
||||
Ok(res) => res.res.logs,
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"Clone Repo",
|
||||
@@ -156,7 +153,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_server_update_return(update).await
|
||||
handle_repo_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,14 +233,15 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => {
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.logs
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
res.res.logs
|
||||
}
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
@@ -273,12 +271,12 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_server_update_return(update).await
|
||||
handle_repo_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
async fn handle_server_update_return(
|
||||
async fn handle_repo_update_return(
|
||||
update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
// Need to manually update the update before cache refresh,
|
||||
@@ -457,6 +455,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect()
|
||||
}) => res,
|
||||
@@ -473,9 +473,10 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.commit_message.unwrap_or_default()
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
@@ -712,39 +713,17 @@ async fn interpolate(
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<HashSet<(String, String)>> {
|
||||
if !repo.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_repo(repo)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_clone,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_pull,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
Ok(secret_replacers)
|
||||
Ok(interpolator.secret_replacers)
|
||||
} else {
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshStackCache},
|
||||
entities::{
|
||||
@@ -19,14 +18,8 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
stack_git_token,
|
||||
update::{add_update_without_send, update_update},
|
||||
},
|
||||
@@ -123,60 +116,21 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !stack.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.file_contents,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
if !repo.config.skip_secret_interp {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
}
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.build_extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.pre_deploy,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.post_deploy,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
@@ -446,33 +400,25 @@ pub async fn pull_stack_inner(
|
||||
)?;
|
||||
|
||||
// interpolate variables / secrets
|
||||
if !stack.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let secret_replacers = if !stack.config.skip_secret_interp {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.file_contents,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
if let Some(update) = update {
|
||||
add_interp_update_log(
|
||||
update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
if !repo.config.skip_secret_interp {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
}
|
||||
if let Some(update) = update {
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
}
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = periphery_client(server)?
|
||||
@@ -482,6 +428,7 @@ pub async fn pull_stack_inner(
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -2,8 +2,11 @@ use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
ResourceTarget, action::Action, permission::PermissionLevel,
|
||||
procedure::Procedure, resource::ResourceQuery,
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
resource::{ResourceQuery, TemplatesQueryBehavior},
|
||||
schedule::Schedule,
|
||||
},
|
||||
};
|
||||
@@ -27,6 +30,7 @@ impl Resolve<ReadArgs> for ListSchedules {
|
||||
list_full_for_user::<Action>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
@@ -38,6 +42,7 @@ impl Resolve<ReadArgs> for ListSchedules {
|
||||
list_full_for_user::<Procedure>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::{path::PathBuf, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
CloneArgs, FileContents, NoData, Operation, all_logs_success,
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
build::{Build, BuildInfo, PartialBuildConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::core::CoreConfig,
|
||||
@@ -186,7 +186,9 @@ async fn write_dockerfile_contents_git(
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut clone_args: CloneArgs = if !build.config.files_on_host
|
||||
let mut clone_args: RepoExecutionArgs = if !build
|
||||
.config
|
||||
.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
(&crate::resource::get::<Repo>(&build.config.linked_repo).await?)
|
||||
@@ -252,15 +254,11 @@ async fn write_dockerfile_contents_git(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok(res) => update.logs.extend(res.logs),
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
@@ -477,7 +475,7 @@ async fn get_on_host_dockerfile(
|
||||
|
||||
async fn get_git_remote(
|
||||
build: &Build,
|
||||
mut clone_args: CloneArgs,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<
|
||||
Option<(
|
||||
Option<String>,
|
||||
@@ -494,9 +492,6 @@ async fn get_git_remote(
|
||||
let config = core_config();
|
||||
let repo_path = clone_args.unique_path(&config.repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
@@ -510,14 +505,10 @@ async fn get_git_remote(
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
let (res, _) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
@@ -538,8 +529,8 @@ async fn get_git_remote(
|
||||
Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
res.commit_hash,
|
||||
res.commit_message,
|
||||
)))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use anyhow::anyhow;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDescription {
|
||||
#[instrument(name = "UpdateDescription", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDescriptionResponse> {
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update description of System resource target"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_description::<Server>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_description::<Deployment>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_description::<Build>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_description::<Repo>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_description::<Builder>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_description::<Alerter>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_description::<Procedure>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::update_description::<Action>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_description::<ResourceSync>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_description::<Stack>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
}
|
||||
@@ -23,11 +23,11 @@ mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod description;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod resource;
|
||||
mod server;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
@@ -77,11 +77,12 @@ pub enum WriteRequest {
|
||||
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
|
||||
// ==== DESCRIPTION ====
|
||||
UpdateDescription(UpdateDescription),
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta(UpdateResourceMeta),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
CopyServer(CopyServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
@@ -175,7 +176,6 @@ pub enum WriteRequest {
|
||||
DeleteTag(DeleteTag),
|
||||
RenameTag(RenameTag),
|
||||
UpdateTagColor(UpdateTagColor),
|
||||
UpdateTagsOnResource(UpdateTagsOnResource),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable(CreateVariable),
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
CloneArgs, NoData, Operation,
|
||||
NoData, Operation, RepoExecutionArgs,
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
@@ -183,13 +182,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let mut clone_args: CloneArgs = (&repo).into();
|
||||
let mut clone_args: RepoExecutionArgs = (&repo).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
@@ -203,14 +199,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
let (res, _) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -222,8 +214,8 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
last_built_at: repo.info.last_built_at,
|
||||
built_hash: repo.info.built_hash,
|
||||
built_message: repo.info.built_message,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
latest_hash: res.commit_hash,
|
||||
latest_message: res.commit_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
|
||||
68
bin/core/src/api/write/resource.rs
Normal file
68
bin/core/src/api/write/resource.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
use anyhow::anyhow;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource::{self, ResourceMetaUpdate};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
#[instrument(name = "UpdateResourceMeta", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<UpdateResourceMetaResponse> {
|
||||
let meta = ResourceMetaUpdate {
|
||||
description: self.description,
|
||||
template: self.template,
|
||||
tags: self.tags,
|
||||
};
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!("cannot update meta of System resource target")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_meta::<Server>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_meta::<Deployment>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_meta::<Build>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_meta::<Repo>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_meta::<Builder>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_meta::<Alerter>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_meta::<Procedure>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::update_meta::<Action>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_meta::<ResourceSync>(&id, meta, args)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_meta::<Stack>(&id, meta, args).await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateResourceMetaResponse {})
|
||||
}
|
||||
}
|
||||
@@ -37,6 +37,25 @@ impl Resolve<WriteArgs> for CreateServer {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyServer {
|
||||
#[instrument(name = "CopyServer", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
let Server { config, .. } = get_check_permissions::<Server>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteServer {
|
||||
#[instrument(name = "DeleteServer", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Server> {
|
||||
|
||||
@@ -8,7 +8,7 @@ use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{read::ExportAllResourcesToToml, write::*},
|
||||
entities::{
|
||||
self, CloneArgs, NoData, Operation, ResourceTarget,
|
||||
self, NoData, Operation, RepoExecutionArgs, ResourceTarget,
|
||||
action::Action,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
@@ -265,7 +265,7 @@ async fn write_sync_file_contents_git(
|
||||
contents,
|
||||
} = req;
|
||||
|
||||
let mut clone_args: CloneArgs = if let Some(repo) = &repo {
|
||||
let mut clone_args: RepoExecutionArgs = if let Some(repo) = &repo {
|
||||
repo.into()
|
||||
} else {
|
||||
(&sync).into()
|
||||
@@ -325,15 +325,11 @@ async fn write_sync_file_contents_git(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok(res) => update.logs.extend(res.logs),
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
@@ -512,7 +508,7 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
// Resource path checked above for repo mode.
|
||||
unreachable!()
|
||||
};
|
||||
let args: CloneArgs = repo.into();
|
||||
let args: RepoExecutionArgs = repo.into();
|
||||
if let Err(e) =
|
||||
commit_git_sync(args, &resource_path, &res.toml, &mut update)
|
||||
.await
|
||||
@@ -530,7 +526,7 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
// Resource path checked above for repo mode.
|
||||
unreachable!()
|
||||
};
|
||||
let args: CloneArgs = (&sync).into();
|
||||
let args: RepoExecutionArgs = (&sync).into();
|
||||
if let Err(e) =
|
||||
commit_git_sync(args, &resource_path, &res.toml, &mut update)
|
||||
.await
|
||||
@@ -582,7 +578,7 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
}
|
||||
|
||||
async fn commit_git_sync(
|
||||
mut args: CloneArgs,
|
||||
mut args: RepoExecutionArgs,
|
||||
resource_path: &Path,
|
||||
toml: &str,
|
||||
update: &mut Update,
|
||||
@@ -600,18 +596,13 @@ async fn commit_git_sync(
|
||||
None
|
||||
};
|
||||
|
||||
let pull = git::pull_or_clone(
|
||||
let (pull_res, _) = git::pull_or_clone(
|
||||
args.clone(),
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await?;
|
||||
update.logs.extend(pull.logs);
|
||||
|
||||
update.logs.extend(pull_res.logs);
|
||||
if !all_logs_success(&update.logs) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -2,18 +2,13 @@ use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateTag, DeleteTag, RenameTag, UpdateTagColor,
|
||||
UpdateTagsOnResource, UpdateTagsOnResourceResponse,
|
||||
},
|
||||
api::write::{CreateTag, DeleteTag, RenameTag, UpdateTagColor},
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
@@ -30,7 +25,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{get_tag, get_tag_check_owner},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
@@ -124,13 +118,15 @@ impl Resolve<WriteArgs> for DeleteTag {
|
||||
|
||||
tokio::try_join!(
|
||||
resource::remove_tag_from_all::<Server>(&self.id),
|
||||
resource::remove_tag_from_all::<Deployment>(&self.id),
|
||||
resource::remove_tag_from_all::<Stack>(&self.id),
|
||||
resource::remove_tag_from_all::<Deployment>(&self.id),
|
||||
resource::remove_tag_from_all::<Build>(&self.id),
|
||||
resource::remove_tag_from_all::<Repo>(&self.id),
|
||||
resource::remove_tag_from_all::<Procedure>(&self.id),
|
||||
resource::remove_tag_from_all::<Action>(&self.id),
|
||||
resource::remove_tag_from_all::<ResourceSync>(&self.id),
|
||||
resource::remove_tag_from_all::<Builder>(&self.id),
|
||||
resource::remove_tag_from_all::<Alerter>(&self.id),
|
||||
resource::remove_tag_from_all::<Procedure>(&self.id),
|
||||
)?;
|
||||
|
||||
delete_one_by_id(&db_client().tags, &self.id, None).await?;
|
||||
@@ -138,112 +134,3 @@ impl Resolve<WriteArgs> for DeleteTag {
|
||||
Ok(tag)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateTagsOnResource {
|
||||
#[instrument(name = "UpdateTagsOnResource", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<UpdateTagsOnResourceResponse> {
|
||||
let WriteArgs { user } = args;
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(anyhow!("Invalid target type: System").into());
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
get_check_permissions::<Build>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Build>(&id, self.tags, args).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
get_check_permissions::<Builder>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Builder>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Deployment>(&id, self.tags, args)
|
||||
.await?
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
get_check_permissions::<Server>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Server>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
get_check_permissions::<Repo>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Repo>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Alerter>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Procedure>(&id, self.tags, args)
|
||||
.await?
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
get_check_permissions::<Action>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Action>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<ResourceSync>(&id, self.tags, args)
|
||||
.await?
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
get_check_permissions::<Stack>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Stack>(&id, self.tags, args).await?
|
||||
}
|
||||
};
|
||||
Ok(UpdateTagsOnResourceResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,7 +262,10 @@ impl Resolve<WriteArgs> for SetEveryoneUserGroup {
|
||||
Err(_) => doc! { "name": &self.user_group },
|
||||
};
|
||||
db.user_groups
|
||||
.update_one(filter.clone(), doc! { "$set": { "everyone": self.everyone } })
|
||||
.update_one(
|
||||
filter.clone(),
|
||||
doc! { "$set": { "everyone": self.everyone } },
|
||||
)
|
||||
.await
|
||||
.context("failed to set everyone on user group")?;
|
||||
let res = db
|
||||
|
||||
@@ -13,7 +13,9 @@ use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config, helpers::random_string, state::{db_client, jwt_client}
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
use self::client::github_oauth_client;
|
||||
@@ -81,7 +83,7 @@ async fn callback(
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
|
||||
|
||||
let mut username = github_user.login;
|
||||
// Modify username if it already exists
|
||||
if db_client
|
||||
|
||||
@@ -303,7 +303,7 @@ async fn callback(
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
|
||||
jwt_client()
|
||||
.encode(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::entities::{SystemCommand, update::Update};
|
||||
|
||||
use super::query::VariablesAndSecrets;
|
||||
|
||||
pub fn interpolate_variables_secrets_into_extra_args(
|
||||
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
|
||||
extra_args: &mut Vec<String>,
|
||||
global_replacers: &mut HashSet<(String, String)>,
|
||||
secret_replacers: &mut HashSet<(String, String)>,
|
||||
) -> anyhow::Result<()> {
|
||||
for arg in extra_args {
|
||||
if arg.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
arg,
|
||||
variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to interpolate global variables into extra arg '{arg}'",
|
||||
)
|
||||
})?;
|
||||
global_replacers.extend(more_replacers);
|
||||
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to interpolate core secrets into extra arg '{arg}'",
|
||||
)
|
||||
})?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
|
||||
// set arg with the result
|
||||
*arg = res;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn interpolate_variables_secrets_into_string(
|
||||
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
|
||||
target: &mut String,
|
||||
global_replacers: &mut HashSet<(String, String)>,
|
||||
secret_replacers: &mut HashSet<(String, String)>,
|
||||
) -> anyhow::Result<()> {
|
||||
if target.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
target,
|
||||
variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("Failed to interpolate core variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("Failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
|
||||
// set command with the result
|
||||
*target = res;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn interpolate_variables_secrets_into_system_command(
|
||||
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
|
||||
command: &mut SystemCommand,
|
||||
global_replacers: &mut HashSet<(String, String)>,
|
||||
secret_replacers: &mut HashSet<(String, String)>,
|
||||
) -> anyhow::Result<()> {
|
||||
if command.command.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&command.command,
|
||||
variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to interpolate global variables into command '{}'",
|
||||
command.command
|
||||
)
|
||||
})?;
|
||||
global_replacers.extend(more_replacers);
|
||||
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to interpolate core secrets into command '{}'",
|
||||
command.command
|
||||
)
|
||||
})?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
|
||||
// set command with the result
|
||||
command.command = res;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_interp_update_log(
|
||||
update: &mut Update,
|
||||
global_replacers: &HashSet<(String, String)>,
|
||||
secret_replacers: &HashSet<(String, String)>,
|
||||
) {
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
// Only show names of interpolated secrets
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,6 @@ pub mod all_resources;
|
||||
pub mod builder;
|
||||
pub mod cache;
|
||||
pub mod channel;
|
||||
pub mod interpolate;
|
||||
pub mod maintenance;
|
||||
pub mod matcher;
|
||||
pub mod procedure;
|
||||
|
||||
@@ -59,6 +59,7 @@ impl super::KomodoResource for Action {
|
||||
ActionListItem {
|
||||
name: action.name,
|
||||
id: action.id,
|
||||
template: action.template,
|
||||
tags: action.tags,
|
||||
resource_type: ResourceTargetVariant::Action,
|
||||
info: ActionListItemInfo {
|
||||
|
||||
@@ -40,6 +40,7 @@ impl super::KomodoResource for Alerter {
|
||||
AlerterListItem {
|
||||
name: alerter.name,
|
||||
id: alerter.id,
|
||||
template: alerter.template,
|
||||
tags: alerter.tags,
|
||||
resource_type: ResourceTargetVariant::Alerter,
|
||||
info: AlerterListItemInfo {
|
||||
|
||||
@@ -97,6 +97,7 @@ impl super::KomodoResource for Build {
|
||||
BuildListItem {
|
||||
name: build.name,
|
||||
id: build.id,
|
||||
template: build.template,
|
||||
tags: build.tags,
|
||||
resource_type: ResourceTargetVariant::Build,
|
||||
info: BuildListItemInfo {
|
||||
|
||||
@@ -64,6 +64,7 @@ impl super::KomodoResource for Builder {
|
||||
BuilderListItem {
|
||||
name: builder.name,
|
||||
id: builder.id,
|
||||
template: builder.template,
|
||||
tags: builder.tags,
|
||||
resource_type: ResourceTargetVariant::Builder,
|
||||
info: BuilderListItemInfo {
|
||||
|
||||
@@ -128,6 +128,7 @@ impl super::KomodoResource for Deployment {
|
||||
DeploymentListItem {
|
||||
name: deployment.name,
|
||||
id: deployment.id,
|
||||
template: deployment.template,
|
||||
tags: deployment.tags,
|
||||
resource_type: ResourceTargetVariant::Deployment,
|
||||
info: DeploymentListItemInfo {
|
||||
|
||||
@@ -491,12 +491,13 @@ pub async fn create<T: KomodoResource>(
|
||||
let resource = Resource::<T::Config, T::Info> {
|
||||
id: Default::default(),
|
||||
name,
|
||||
updated_at: start_ts,
|
||||
description: Default::default(),
|
||||
template: Default::default(),
|
||||
tags: Default::default(),
|
||||
config: config.into(),
|
||||
info: T::default_info().await?,
|
||||
base_permission: PermissionLevel::None.into(),
|
||||
updated_at: start_ts,
|
||||
};
|
||||
|
||||
let resource_id = T::coll()
|
||||
@@ -665,52 +666,60 @@ fn resource_target<T: KomodoResource>(id: String) -> ResourceTarget {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_description<T: KomodoResource>(
|
||||
pub struct ResourceMetaUpdate {
|
||||
pub description: Option<String>,
|
||||
pub template: Option<bool>,
|
||||
pub tags: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl ResourceMetaUpdate {
|
||||
pub fn is_none(&self) -> bool {
|
||||
self.description.is_none()
|
||||
&& self.template.is_none()
|
||||
&& self.tags.is_none()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_meta<T: KomodoResource>(
|
||||
id_or_name: &str,
|
||||
description: &str,
|
||||
user: &User,
|
||||
meta: ResourceMetaUpdate,
|
||||
args: &WriteArgs,
|
||||
) -> anyhow::Result<()> {
|
||||
get_check_permissions::<T>(
|
||||
id_or_name,
|
||||
user,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
T::coll()
|
||||
.update_one(
|
||||
id_or_name_filter(id_or_name),
|
||||
doc! { "$set": { "description": description } },
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_tags<T: KomodoResource>(
|
||||
id_or_name: &str,
|
||||
tags: Vec<String>,
|
||||
args: &WriteArgs,
|
||||
) -> anyhow::Result<()> {
|
||||
let futures = tags.iter().map(|tag| async {
|
||||
match get_tag(tag).await {
|
||||
Ok(tag) => Ok(tag.id),
|
||||
Err(_) => CreateTag {
|
||||
name: tag.to_string(),
|
||||
let mut set = Document::new();
|
||||
if let Some(description) = meta.description {
|
||||
set.insert("description", description);
|
||||
}
|
||||
if let Some(template) = meta.template {
|
||||
set.insert("template", template);
|
||||
}
|
||||
if let Some(tags) = meta.tags {
|
||||
// First normalize to tag ids only
|
||||
let futures = tags.iter().map(|tag| async {
|
||||
match get_tag(tag).await {
|
||||
Ok(tag) => Ok(tag.id),
|
||||
Err(_) => CreateTag {
|
||||
name: tag.to_string(),
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map(|tag| tag.id),
|
||||
}
|
||||
.resolve(args)
|
||||
});
|
||||
let tags = join_all(futures)
|
||||
.await
|
||||
.map(|tag| tag.id),
|
||||
}
|
||||
});
|
||||
let tags = join_all(futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
set.insert("tags", tags);
|
||||
}
|
||||
T::coll()
|
||||
.update_one(
|
||||
id_or_name_filter(id_or_name),
|
||||
doc! { "$set": { "tags": tags } },
|
||||
)
|
||||
.update_one(id_or_name_filter(id_or_name), doc! { "$set": set })
|
||||
.await?;
|
||||
refresh_all_resources_cache().await;
|
||||
Ok(())
|
||||
|
||||
@@ -72,6 +72,7 @@ impl super::KomodoResource for Procedure {
|
||||
ProcedureListItem {
|
||||
name: procedure.name,
|
||||
id: procedure.id,
|
||||
template: procedure.template,
|
||||
tags: procedure.tags,
|
||||
resource_type: ResourceTargetVariant::Procedure,
|
||||
info: ProcedureListItemInfo {
|
||||
|
||||
@@ -66,6 +66,7 @@ impl super::KomodoResource for Repo {
|
||||
RepoListItem {
|
||||
name: repo.name,
|
||||
id: repo.id,
|
||||
template: repo.template,
|
||||
tags: repo.tags,
|
||||
resource_type: ResourceTargetVariant::Repo,
|
||||
info: RepoListItemInfo {
|
||||
|
||||
@@ -65,6 +65,7 @@ impl super::KomodoResource for Server {
|
||||
ServerListItem {
|
||||
name: server.name,
|
||||
id: server.id,
|
||||
template: server.template,
|
||||
tags: server.tags,
|
||||
resource_type: ResourceTargetVariant::Server,
|
||||
info: ServerListItemInfo {
|
||||
|
||||
@@ -165,8 +165,9 @@ impl super::KomodoResource for Stack {
|
||||
};
|
||||
|
||||
StackListItem {
|
||||
id: stack.id,
|
||||
name: stack.name,
|
||||
id: stack.id,
|
||||
template: stack.template,
|
||||
tags: stack.tags,
|
||||
resource_type: ResourceTargetVariant::Stack,
|
||||
info: StackListItemInfo {
|
||||
|
||||
@@ -83,8 +83,9 @@ impl super::KomodoResource for ResourceSync {
|
||||
};
|
||||
|
||||
ResourceSyncListItem {
|
||||
id: resource_sync.id,
|
||||
name: resource_sync.name,
|
||||
id: resource_sync.id,
|
||||
template: resource_sync.template,
|
||||
tags: resource_sync.tags,
|
||||
resource_type: ResourceTargetVariant::ResourceSync,
|
||||
info: ResourceSyncListItemInfo {
|
||||
|
||||
@@ -324,9 +324,8 @@ fn find_next_occurrence(
|
||||
.timestamp_millis()
|
||||
}
|
||||
("", timezone) | (timezone, _) => {
|
||||
let tz: chrono_tz::Tz = timezone
|
||||
.parse()
|
||||
.context("Failed to parse timezone")?;
|
||||
let tz: chrono_tz::Tz =
|
||||
timezone.parse().context("Failed to parse timezone")?;
|
||||
let tz_time = chrono::Local::now().with_timezone(&tz);
|
||||
cron
|
||||
.find_next_occurrence(&tz_time, false)
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::{fs, path::PathBuf};
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, FileContents, repo::Repo, stack::Stack, update::Log,
|
||||
FileContents, RepoExecutionArgs, repo::Repo, stack::Stack,
|
||||
update::Log,
|
||||
};
|
||||
|
||||
use crate::{config::core_config, helpers::git_token};
|
||||
@@ -23,7 +24,7 @@ pub async fn get_repo_compose_contents(
|
||||
// Collect any files which are missing in the repo.
|
||||
mut missing_files: Option<&mut Vec<String>>,
|
||||
) -> anyhow::Result<RemoteComposeContents> {
|
||||
let clone_args: CloneArgs =
|
||||
let clone_args: RepoExecutionArgs =
|
||||
repo.map(Into::into).unwrap_or(stack.into());
|
||||
let (repo_path, _logs, hash, message) =
|
||||
ensure_remote_repo(clone_args)
|
||||
@@ -70,7 +71,7 @@ pub async fn get_repo_compose_contents(
|
||||
|
||||
/// Returns (destination, logs, hash, message)
|
||||
pub async fn ensure_remote_repo(
|
||||
mut clone_args: CloneArgs,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<(PathBuf, Vec<Log>, Option<String>, Option<String>)>
|
||||
{
|
||||
let config = core_config();
|
||||
@@ -90,20 +91,11 @@ pub async fn ensure_remote_repo(
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("Failed to clone stack repo")
|
||||
.map(|res| (repo_path, res.logs, res.hash, res.message))
|
||||
git::pull_or_clone(clone_args, &config.repo_directory, access_token)
|
||||
.await
|
||||
.context("Failed to clone stack repo")
|
||||
.map(|(res, _)| {
|
||||
(repo_path, res.logs, res.commit_hash, res.commit_message)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,18 +2,14 @@ use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use formatting::{Color, bold, colored, muted};
|
||||
use komodo_client::{
|
||||
api::write::{UpdateDescription, UpdateTagsOnResource},
|
||||
entities::{
|
||||
ResourceTargetVariant, tag::Tag, toml::ResourceToml, update::Log,
|
||||
user::sync_user,
|
||||
},
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant, tag::Tag, toml::ResourceToml, update::Log,
|
||||
user::sync_user,
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use partial_derive2::MaybeNone;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::api::write::WriteArgs;
|
||||
use crate::{api::write::WriteArgs, resource::ResourceMetaUpdate};
|
||||
|
||||
use super::{ResourceSyncTrait, SyncDeltas, ToUpdateItem};
|
||||
|
||||
@@ -97,6 +93,7 @@ pub async fn get_updates_for_execution<
|
||||
// or a change to tags / description
|
||||
if diff.is_none()
|
||||
&& resource.description == original.description
|
||||
&& resource.template == original.template
|
||||
&& resource.tags == original_tags
|
||||
{
|
||||
continue;
|
||||
@@ -109,6 +106,7 @@ pub async fn get_updates_for_execution<
|
||||
id: original.id.clone(),
|
||||
update_description: resource.description
|
||||
!= original.description,
|
||||
update_template: resource.template != original.template,
|
||||
update_tags: resource.tags != original_tags,
|
||||
resource,
|
||||
};
|
||||
@@ -143,8 +141,6 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
|
||||
|
||||
for resource in to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match crate::resource::create::<Self>(
|
||||
&resource.name,
|
||||
resource.config,
|
||||
@@ -164,18 +160,14 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
run_update_tags::<Self>(
|
||||
run_update_meta::<Self>(
|
||||
id.clone(),
|
||||
&name,
|
||||
tags,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
.await;
|
||||
run_update_description::<Self>(
|
||||
id,
|
||||
&name,
|
||||
description,
|
||||
ResourceMetaUpdate {
|
||||
description: Some(resource.description),
|
||||
template: Some(resource.template),
|
||||
tags: Some(resource.tags),
|
||||
},
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
@@ -193,30 +185,24 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_template,
|
||||
update_tags,
|
||||
} in to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
|
||||
if update_description {
|
||||
run_update_description::<Self>(
|
||||
let meta = ResourceMetaUpdate {
|
||||
description: update_description
|
||||
.then(|| resource.description.clone()),
|
||||
template: update_template.then_some(resource.template),
|
||||
tags: update_tags.then(|| resource.tags.clone()),
|
||||
};
|
||||
|
||||
if !meta.is_none() {
|
||||
run_update_meta::<Self>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if update_tags {
|
||||
run_update_tags::<Self>(
|
||||
id.clone(),
|
||||
&name,
|
||||
tags,
|
||||
meta,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
@@ -286,21 +272,20 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_update_tags<Resource: ResourceSyncTrait>(
|
||||
pub async fn run_update_meta<Resource: ResourceSyncTrait>(
|
||||
id: String,
|
||||
name: &str,
|
||||
tags: Vec<String>,
|
||||
meta: ResourceMetaUpdate,
|
||||
log: &mut String,
|
||||
has_error: &mut bool,
|
||||
) {
|
||||
// Update tags
|
||||
if let Err(e) = (UpdateTagsOnResource {
|
||||
target: Resource::resource_target(id),
|
||||
tags,
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: sync_user().to_owned(),
|
||||
})
|
||||
if let Err(e) = crate::resource::update_meta::<Resource>(
|
||||
&id,
|
||||
meta,
|
||||
&WriteArgs {
|
||||
user: sync_user().to_owned(),
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
*has_error = true;
|
||||
@@ -309,46 +294,11 @@ pub async fn run_update_tags<Resource: ResourceSyncTrait>(
|
||||
colored("ERROR", Color::Red),
|
||||
Resource::resource_type(),
|
||||
bold(name),
|
||||
e.error
|
||||
e
|
||||
))
|
||||
} else {
|
||||
log.push_str(&format!(
|
||||
"\n{}: {} {} '{}' tags",
|
||||
muted("INFO"),
|
||||
colored("updated", Color::Blue),
|
||||
Resource::resource_type(),
|
||||
bold(name)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_update_description<Resource: ResourceSyncTrait>(
|
||||
id: String,
|
||||
name: &str,
|
||||
description: String,
|
||||
log: &mut String,
|
||||
has_error: &mut bool,
|
||||
) {
|
||||
if let Err(e) = (UpdateDescription {
|
||||
target: Resource::resource_target(id.clone()),
|
||||
description,
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: sync_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
*has_error = true;
|
||||
log.push_str(&format!(
|
||||
"\n{}: failed to update description on {} '{}' | {:#}",
|
||||
colored("ERROR", Color::Red),
|
||||
Resource::resource_type(),
|
||||
bold(name),
|
||||
e.error
|
||||
))
|
||||
} else {
|
||||
log.push_str(&format!(
|
||||
"\n{}: {} {} '{}' description",
|
||||
"\n{}: {} {} '{}' meta",
|
||||
muted("INFO"),
|
||||
colored("updated", Color::Blue),
|
||||
Resource::resource_type(),
|
||||
|
||||
@@ -40,6 +40,7 @@ pub struct ToUpdateItem<T: Default> {
|
||||
pub id: String,
|
||||
pub resource: ResourceToml<T>,
|
||||
pub update_description: bool,
|
||||
pub update_template: bool,
|
||||
pub update_tags: bool,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::Context;
|
||||
use git::GitRes;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs,
|
||||
RepoExecutionArgs, RepoExecutionResponse,
|
||||
repo::Repo,
|
||||
sync::{ResourceSync, SyncFileContents},
|
||||
to_path_compatible_name,
|
||||
@@ -66,7 +65,7 @@ async fn get_files_on_host(
|
||||
|
||||
async fn get_repo(
|
||||
sync: &ResourceSync,
|
||||
mut clone_args: CloneArgs,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<RemoteResources> {
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
@@ -81,23 +80,19 @@ async fn get_repo(
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let GitRes {
|
||||
mut logs,
|
||||
hash,
|
||||
message,
|
||||
..
|
||||
} = git::pull_or_clone(
|
||||
let (
|
||||
RepoExecutionResponse {
|
||||
mut logs,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
..
|
||||
},
|
||||
_,
|
||||
) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -123,8 +118,8 @@ async fn get_repo(
|
||||
files,
|
||||
file_errors,
|
||||
logs,
|
||||
hash,
|
||||
message,
|
||||
hash: commit_hash,
|
||||
message: commit_message,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -24,12 +24,9 @@ use partial_derive2::{MaybeNone, PartialDiff};
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
resource::KomodoResource,
|
||||
resource::{KomodoResource, ResourceMetaUpdate},
|
||||
state::all_resources_cache,
|
||||
sync::{
|
||||
ToUpdateItem,
|
||||
execute::{run_update_description, run_update_tags},
|
||||
},
|
||||
sync::{ToUpdateItem, execute::run_update_meta},
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -741,28 +738,24 @@ impl ExecuteResourceSync for Procedure {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_template,
|
||||
update_tags,
|
||||
} in &to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
if *update_description {
|
||||
run_update_description::<Procedure>(
|
||||
|
||||
let meta = ResourceMetaUpdate {
|
||||
description: update_description
|
||||
.then(|| resource.description.clone()),
|
||||
template: update_template.then(|| resource.template),
|
||||
tags: update_tags.then(|| resource.tags.clone()),
|
||||
};
|
||||
|
||||
if !meta.is_none() {
|
||||
run_update_meta::<Procedure>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if *update_tags {
|
||||
run_update_tags::<Procedure>(
|
||||
id.clone(),
|
||||
&name,
|
||||
tags,
|
||||
meta,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
@@ -804,8 +797,6 @@ impl ExecuteResourceSync for Procedure {
|
||||
let mut to_pull = Vec::new();
|
||||
for resource in &to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match crate::resource::create::<Procedure>(
|
||||
&name,
|
||||
resource.config.clone(),
|
||||
@@ -827,18 +818,14 @@ impl ExecuteResourceSync for Procedure {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
run_update_tags::<Procedure>(
|
||||
run_update_meta::<Procedure>(
|
||||
id.clone(),
|
||||
&name,
|
||||
tags,
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
.await;
|
||||
run_update_description::<Procedure>(
|
||||
id,
|
||||
&name,
|
||||
description,
|
||||
ResourceMetaUpdate {
|
||||
description: Some(resource.description.clone()),
|
||||
template: Some(resource.template),
|
||||
tags: Some(resource.tags.clone()),
|
||||
},
|
||||
&mut log,
|
||||
&mut has_error,
|
||||
)
|
||||
|
||||
@@ -139,12 +139,13 @@ pub fn convert_resource<R: KomodoResource>(
|
||||
) -> ResourceToml<R::PartialConfig> {
|
||||
ResourceToml {
|
||||
name: resource.name,
|
||||
description: resource.description,
|
||||
template: resource.template,
|
||||
tags: resource
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|t| all_tags.get(t).map(|t| t.name.clone()))
|
||||
.collect(),
|
||||
description: resource.description,
|
||||
deploy,
|
||||
after,
|
||||
// The config still needs to be minimized.
|
||||
|
||||
@@ -65,8 +65,8 @@ pub async fn terminal(
|
||||
else {
|
||||
debug!("could not get stack status");
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!(
|
||||
"ERROR: could not get stack status"
|
||||
.send(Message::text(String::from(
|
||||
"ERROR: could not get stack status",
|
||||
)))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
|
||||
@@ -18,6 +18,8 @@ path = "src/main.rs"
|
||||
komodo_client.workspace = true
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
environment.workspace = true
|
||||
interpolate.workspace = true
|
||||
formatting.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
@@ -31,7 +33,6 @@ async_timing_util.workspace = true
|
||||
derive_variants.workspace = true
|
||||
resolver_api.workspace = true
|
||||
run_command.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
pin-project-lite.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
@@ -40,6 +41,7 @@ axum-server.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
tokio-util.workspace = true
|
||||
arc-swap.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
bollard.workspace = true
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
use std::{
|
||||
fmt::Write,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::{
|
||||
run_komodo_command, run_komodo_command_multiline,
|
||||
run_komodo_command_with_interpolation,
|
||||
run_komodo_command, run_komodo_command_with_sanitization,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
entities::{
|
||||
EnvironmentVar, Version,
|
||||
build::{Build, BuildConfig},
|
||||
environment_vars_from_str, get_image_name, optional_string,
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
},
|
||||
parsers::QUOTE_PATTERN,
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
EnvironmentVar, all_logs_success,
|
||||
build::{Build, BuildConfig},
|
||||
environment_vars_from_str, get_image_name, optional_string,
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::build::{
|
||||
self, GetDockerfileContentsOnHost,
|
||||
@@ -28,6 +22,9 @@ use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
build::{
|
||||
image_tags, parse_build_args, parse_secret_args, write_dockerfile,
|
||||
},
|
||||
config::periphery_config,
|
||||
docker::docker_login,
|
||||
helpers::{parse_extra_args, parse_labels},
|
||||
@@ -123,19 +120,30 @@ impl Resolve<super::Args> for build::Build {
|
||||
_: &super::Args,
|
||||
) -> serror::Result<Vec<Log>> {
|
||||
let build::Build {
|
||||
build,
|
||||
mut build,
|
||||
repo: linked_repo,
|
||||
registry_token,
|
||||
additional_tags,
|
||||
replacers: mut core_replacers,
|
||||
mut replacers,
|
||||
} = self;
|
||||
|
||||
let mut logs = Vec::new();
|
||||
|
||||
// Periphery side interpolation
|
||||
let mut interpolator =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
interpolator
|
||||
.interpolate_build(&mut build)?
|
||||
.push_logs(&mut logs);
|
||||
|
||||
replacers.extend(interpolator.secret_replacers);
|
||||
|
||||
let Build {
|
||||
name,
|
||||
config:
|
||||
BuildConfig {
|
||||
version,
|
||||
image_tag,
|
||||
skip_secret_interp,
|
||||
build_path,
|
||||
dockerfile_path,
|
||||
build_args,
|
||||
@@ -161,8 +169,6 @@ impl Resolve<super::Args> for build::Build {
|
||||
return Err(anyhow!("Build must be files on host mode, have a repo attached, or have dockerfile contents set to build").into());
|
||||
}
|
||||
|
||||
let mut logs = Vec::new();
|
||||
|
||||
// Maybe docker login
|
||||
let should_push = match docker_login(
|
||||
&image_registry.domain,
|
||||
@@ -191,7 +197,7 @@ impl Resolve<super::Args> for build::Build {
|
||||
} else {
|
||||
periphery_config()
|
||||
.build_dir()
|
||||
.join(to_path_compatible_name(&name))
|
||||
.join(to_path_compatible_name(name))
|
||||
.join(build_path)
|
||||
}
|
||||
.components()
|
||||
@@ -206,74 +212,36 @@ impl Resolve<super::Args> for build::Build {
|
||||
&& linked_repo.is_none()
|
||||
&& !dockerfile.is_empty()
|
||||
{
|
||||
let dockerfile = if *skip_secret_interp {
|
||||
dockerfile.to_string()
|
||||
} else {
|
||||
let (dockerfile, replacers) = svi::interpolate_variables(
|
||||
dockerfile,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
).context("Failed to interpolate variables into UI defined dockerfile")?;
|
||||
core_replacers.extend(replacers);
|
||||
dockerfile
|
||||
};
|
||||
|
||||
let full_dockerfile_path = build_path
|
||||
.join(&dockerfile_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = full_dockerfile_path.parent() {
|
||||
if !parent.exists() {
|
||||
tokio::fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize dockerfile parent directory {parent:?}"))?;
|
||||
}
|
||||
write_dockerfile(
|
||||
&build_path,
|
||||
&dockerfile_path,
|
||||
dockerfile,
|
||||
&mut logs,
|
||||
)
|
||||
.await;
|
||||
if !all_logs_success(&logs) {
|
||||
return Ok(logs);
|
||||
}
|
||||
|
||||
fs::write(&full_dockerfile_path, dockerfile).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to write dockerfile contents to {full_dockerfile_path:?}"
|
||||
)
|
||||
})?;
|
||||
|
||||
logs.push(Log::simple(
|
||||
"Write Dockerfile",
|
||||
format!(
|
||||
"Dockerfile contents written to {full_dockerfile_path:?}"
|
||||
),
|
||||
));
|
||||
};
|
||||
|
||||
// Pre Build
|
||||
if !pre_build.is_none() {
|
||||
let pre_build_path = build_path.join(&pre_build.path);
|
||||
if let Some(log) = if !skip_secret_interp {
|
||||
run_komodo_command_with_interpolation(
|
||||
"Pre Build",
|
||||
Some(pre_build_path.as_path()),
|
||||
&pre_build.command,
|
||||
true,
|
||||
&periphery_config().secrets,
|
||||
&core_replacers,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
run_komodo_command_multiline(
|
||||
"Pre Build",
|
||||
Some(pre_build_path.as_path()),
|
||||
&pre_build.command,
|
||||
)
|
||||
.await
|
||||
} {
|
||||
if let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Pre Build",
|
||||
pre_build_path.as_path(),
|
||||
&pre_build.command,
|
||||
true,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let success = log.success;
|
||||
logs.push(log);
|
||||
if !success {
|
||||
return Ok(logs);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Get command parts
|
||||
@@ -293,12 +261,8 @@ impl Resolve<super::Args> for build::Build {
|
||||
|
||||
let secret_args = environment_vars_from_str(secret_args)
|
||||
.context("Invalid secret_args")?;
|
||||
let command_secret_args = parse_secret_args(
|
||||
&secret_args,
|
||||
&build_path,
|
||||
*skip_secret_interp,
|
||||
)
|
||||
.await?;
|
||||
let command_secret_args =
|
||||
parse_secret_args(&secret_args, &build_path).await?;
|
||||
|
||||
let labels = parse_labels(
|
||||
&environment_vars_from_str(labels).context("Invalid labels")?,
|
||||
@@ -314,125 +278,22 @@ impl Resolve<super::Args> for build::Build {
|
||||
"docker{buildx} build{build_args}{command_secret_args}{extra_args}{labels}{image_tags}{maybe_push} -f {dockerfile_path} .",
|
||||
);
|
||||
|
||||
if *skip_secret_interp {
|
||||
let build_log = run_komodo_command(
|
||||
"Docker Build",
|
||||
build_path.as_ref(),
|
||||
command,
|
||||
)
|
||||
.await;
|
||||
logs.push(build_log);
|
||||
} else if let Some(log) = run_komodo_command_with_interpolation(
|
||||
if let Some(build_log) = run_komodo_command_with_sanitization(
|
||||
"Docker Build",
|
||||
build_path.as_ref(),
|
||||
command,
|
||||
false,
|
||||
&periphery_config().secrets,
|
||||
&core_replacers,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
logs.push(log)
|
||||
}
|
||||
logs.push(build_log);
|
||||
};
|
||||
|
||||
Ok(logs)
|
||||
}
|
||||
}
|
||||
|
||||
fn image_tags(
|
||||
image_name: &str,
|
||||
custom_tag: &str,
|
||||
version: &Version,
|
||||
additional: &[String],
|
||||
) -> String {
|
||||
let Version { major, minor, .. } = version;
|
||||
let custom_tag = if custom_tag.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("-{custom_tag}")
|
||||
};
|
||||
let additional = additional
|
||||
.iter()
|
||||
.map(|tag| format!(" -t {image_name}:{tag}{custom_tag}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
format!(
|
||||
" -t {image_name}:latest{custom_tag} -t {image_name}:{version}{custom_tag} -t {image_name}:{major}.{minor}{custom_tag} -t {image_name}:{major}{custom_tag}{additional}",
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
|
||||
build_args
|
||||
.iter()
|
||||
.map(|p| {
|
||||
if p.value.starts_with(QUOTE_PATTERN)
|
||||
&& p.value.ends_with(QUOTE_PATTERN)
|
||||
{
|
||||
// If the value already wrapped in quotes, don't wrap it again
|
||||
format!(" --build-arg {}={}", p.variable, p.value)
|
||||
} else {
|
||||
format!(" --build-arg {}=\"{}\"", p.variable, p.value)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
}
|
||||
|
||||
/// <https://docs.docker.com/build/building/secrets/#using-build-secrets>
|
||||
async fn parse_secret_args(
|
||||
secret_args: &[EnvironmentVar],
|
||||
build_dir: &Path,
|
||||
skip_secret_interp: bool,
|
||||
) -> anyhow::Result<String> {
|
||||
let periphery_config = periphery_config();
|
||||
let mut res = String::new();
|
||||
for EnvironmentVar { variable, value } in secret_args {
|
||||
// Check edge cases
|
||||
if variable.is_empty() {
|
||||
return Err(anyhow!("secret variable cannot be empty string"));
|
||||
} else if variable.contains('=') {
|
||||
return Err(anyhow!(
|
||||
"invalid variable {variable}. variable cannot contain '='"
|
||||
));
|
||||
}
|
||||
// Interpolate in value
|
||||
let value = if skip_secret_interp {
|
||||
value.to_string()
|
||||
} else {
|
||||
svi::interpolate_variables(
|
||||
value,
|
||||
&periphery_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context(
|
||||
"Failed to interpolate periphery secrets into build secrets",
|
||||
)?
|
||||
.0
|
||||
};
|
||||
// Write the value to file to mount
|
||||
let path = build_dir.join(variable);
|
||||
tokio::fs::write(&path, value).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to write build secret {variable} to {}",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
// Extend the command
|
||||
write!(
|
||||
&mut res,
|
||||
" --secret id={variable},src={}",
|
||||
path.display()
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to format build secret arguments for {variable}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<super::Args> for PruneBuilders {
|
||||
|
||||
@@ -1,27 +1,34 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use command::{
|
||||
run_komodo_command, run_komodo_command_with_sanitization,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use git::{GitRes, write_commit_file};
|
||||
use git::write_commit_file;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
FileContents, stack::ComposeProject, to_path_compatible_name,
|
||||
FileContents, RepoExecutionResponse, all_logs_success,
|
||||
stack::{
|
||||
ComposeFile, ComposeProject, ComposeService,
|
||||
ComposeServiceDeploy, StackServiceNames,
|
||||
},
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::{compose::*, git::RepoActionResponse};
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
compose::{
|
||||
docker_compose,
|
||||
up::compose_up,
|
||||
write::{WriteStackRes, write_stack},
|
||||
docker_compose, pull_or_clone_stack,
|
||||
up::{maybe_login_registry, validate_files},
|
||||
write::write_stack,
|
||||
},
|
||||
config::periphery_config,
|
||||
docker::docker_login,
|
||||
helpers::{log_grep, pull_or_clone_stack},
|
||||
helpers::{log_grep, parse_extra_args},
|
||||
};
|
||||
|
||||
impl Resolve<super::Args> for ListComposeProjects {
|
||||
@@ -241,7 +248,7 @@ impl Resolve<super::Args> for WriteCommitComposeContents {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<RepoActionResponse> {
|
||||
) -> serror::Result<RepoExecutionResponse> {
|
||||
let WriteCommitComposeContents {
|
||||
stack,
|
||||
repo,
|
||||
@@ -267,39 +274,20 @@ impl Resolve<super::Args> for WriteCommitComposeContents {
|
||||
"Write Compose File".to_string()
|
||||
};
|
||||
|
||||
let GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
..
|
||||
} = write_commit_file(
|
||||
write_commit_file(
|
||||
&msg,
|
||||
&root,
|
||||
&file_path,
|
||||
&contents,
|
||||
&stack.config.branch,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path: None,
|
||||
})
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl WriteStackRes for &mut ComposePullResponse {
|
||||
fn logs(&mut self) -> &mut Vec<Log> {
|
||||
&mut self.logs
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<super::Args> for ComposePull {
|
||||
#[instrument(
|
||||
name = "ComposePull",
|
||||
@@ -314,27 +302,42 @@ impl Resolve<super::Args> for ComposePull {
|
||||
_: &super::Args,
|
||||
) -> serror::Result<ComposePullResponse> {
|
||||
let ComposePull {
|
||||
stack,
|
||||
services,
|
||||
mut stack,
|
||||
repo,
|
||||
services,
|
||||
git_token,
|
||||
registry_token,
|
||||
mut replacers,
|
||||
} = self;
|
||||
|
||||
let mut res = ComposePullResponse::default();
|
||||
|
||||
let (run_directory, env_file_path, _replacers) =
|
||||
match write_stack(&stack, repo.as_ref(), git_token, &mut res)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
res.logs.push(Log::error(
|
||||
"Write Stack",
|
||||
format_serror(&e.into()),
|
||||
));
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
let mut interpolator =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
// Only interpolate Stack. Repo interpolation will be handled
|
||||
// by the CloneRepo / PullOrCloneRepo call.
|
||||
interpolator
|
||||
.interpolate_stack(&mut stack)?
|
||||
.push_logs(&mut res.logs);
|
||||
replacers.extend(interpolator.secret_replacers);
|
||||
|
||||
let (run_directory, env_file_path) = match write_stack(
|
||||
&stack,
|
||||
repo.as_ref(),
|
||||
git_token,
|
||||
replacers.clone(),
|
||||
&mut res,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("Write Stack", format_serror(&e.into())));
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
|
||||
// Canonicalize the path to ensure it exists, and is the cleanest path to the run directory.
|
||||
let run_directory = run_directory.canonicalize().context(
|
||||
@@ -353,12 +356,18 @@ impl Resolve<super::Args> for ComposePull {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Validate files
|
||||
for (path, full_path) in &file_paths {
|
||||
if !full_path.exists() {
|
||||
return Err(anyhow!("Missing compose file at {path}").into());
|
||||
}
|
||||
}
|
||||
|
||||
maybe_login_registry(&stack, registry_token, &mut res.logs).await;
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
let docker_compose = docker_compose();
|
||||
|
||||
let service_args = if services.is_empty() {
|
||||
@@ -373,26 +382,6 @@ impl Resolve<super::Args> for ComposePull {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
|
||||
// Login to the registry to pull private images, if provider / account are set
|
||||
if !stack.config.registry_provider.is_empty()
|
||||
&& !stack.config.registry_account.is_empty()
|
||||
{
|
||||
docker_login(
|
||||
&stack.config.registry_provider,
|
||||
&stack.config.registry_account,
|
||||
registry_token.as_deref(),
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"domain: {} | account: {}",
|
||||
stack.config.registry_provider,
|
||||
stack.config.registry_account
|
||||
)
|
||||
})
|
||||
.context("failed to login to image registry")?;
|
||||
}
|
||||
|
||||
let env_file = env_file_path
|
||||
.map(|path| format!(" --env-file {path}"))
|
||||
.unwrap_or_default();
|
||||
@@ -439,30 +428,265 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
_: &super::Args,
|
||||
) -> serror::Result<ComposeUpResponse> {
|
||||
let ComposeUp {
|
||||
stack,
|
||||
services,
|
||||
mut stack,
|
||||
repo,
|
||||
services,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers,
|
||||
mut replacers,
|
||||
} = self;
|
||||
|
||||
let mut res = ComposeUpResponse::default();
|
||||
if let Err(e) = compose_up(
|
||||
stack,
|
||||
services,
|
||||
repo,
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
// Only interpolate Stack. Repo interpolation will be handled
|
||||
// by the CloneRepo / PullOrCloneRepo call.
|
||||
interpolator
|
||||
.interpolate_stack(&mut stack)?
|
||||
.push_logs(&mut res.logs);
|
||||
replacers.extend(interpolator.secret_replacers);
|
||||
|
||||
let (run_directory, env_file_path) = match write_stack(
|
||||
&stack,
|
||||
repo.as_ref(),
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers.clone(),
|
||||
&mut res,
|
||||
replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.logs.push(Log::error(
|
||||
"Compose Up - Failed",
|
||||
format_serror(&e.into()),
|
||||
));
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("Write Stack", format_serror(&e.into())));
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
|
||||
// Canonicalize the path to ensure it exists, and is the cleanest path to the run directory.
|
||||
let run_directory = run_directory.canonicalize().context(
|
||||
"Failed to validate run directory on host after stack write (canonicalize error)",
|
||||
)?;
|
||||
|
||||
validate_files(&stack, &run_directory, &mut res).await;
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
maybe_login_registry(&stack, registry_token, &mut res.logs).await;
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
// Pre deploy
|
||||
if !stack.config.pre_deploy.is_none() {
|
||||
let pre_deploy_path =
|
||||
run_directory.join(&stack.config.pre_deploy.path);
|
||||
if let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Pre Deploy",
|
||||
pre_deploy_path.as_path(),
|
||||
&stack.config.pre_deploy.command,
|
||||
true,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.logs.push(log);
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let docker_compose = docker_compose();
|
||||
|
||||
let service_args = if services.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
|
||||
let file_args = if stack.config.file_paths.is_empty() {
|
||||
String::from("compose.yaml")
|
||||
} else {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
|
||||
// This will be the last project name, which is the one that needs to be destroyed.
|
||||
// Might be different from the current project name, if user renames stack / changes to custom project name.
|
||||
let last_project_name = stack.project_name(false);
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let env_file = env_file_path
|
||||
.map(|path| format!(" --env-file {path}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
let additional_env_files = stack
|
||||
.config
|
||||
.additional_env_files
|
||||
.iter()
|
||||
.fold(String::new(), |mut output, file| {
|
||||
let _ = write!(output, " --env-file {file}");
|
||||
output
|
||||
});
|
||||
|
||||
// Uses 'docker compose config' command to extract services (including image)
|
||||
// after performing interpolation
|
||||
{
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} config",
|
||||
);
|
||||
let Some(config_log) = run_komodo_command_with_sanitization(
|
||||
"Compose Config",
|
||||
run_directory.as_path(),
|
||||
command,
|
||||
false,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
// Only reachable if command is empty,
|
||||
// not the case since it is provided above.
|
||||
unreachable!()
|
||||
};
|
||||
if !config_log.success {
|
||||
res.logs.push(config_log);
|
||||
return Ok(res);
|
||||
}
|
||||
let compose =
|
||||
serde_yaml::from_str::<ComposeFile>(&config_log.stdout)
|
||||
.context("Failed to parse compose contents")?;
|
||||
// Record sanitized compose config output
|
||||
res.compose_config = Some(config_log.stdout);
|
||||
for (
|
||||
service_name,
|
||||
ComposeService {
|
||||
container_name,
|
||||
deploy,
|
||||
image,
|
||||
},
|
||||
) in compose.services
|
||||
{
|
||||
let image = image.unwrap_or_default();
|
||||
match deploy {
|
||||
Some(ComposeServiceDeploy {
|
||||
replicas: Some(replicas),
|
||||
}) if replicas > 1 => {
|
||||
for i in 1..1 + replicas {
|
||||
res.services.push(StackServiceNames {
|
||||
container_name: format!(
|
||||
"{project_name}-{service_name}-{i}"
|
||||
),
|
||||
service_name: format!("{service_name}-{i}"),
|
||||
image: image.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
res.services.push(StackServiceNames {
|
||||
container_name: container_name.unwrap_or_else(|| {
|
||||
format!("{project_name}-{service_name}")
|
||||
}),
|
||||
service_name,
|
||||
image,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if stack.config.run_build {
|
||||
let build_extra_args =
|
||||
parse_extra_args(&stack.config.build_extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} build{build_extra_args}{service_args}",
|
||||
);
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Compose Build",
|
||||
run_directory.as_path(),
|
||||
command,
|
||||
false,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
res.logs.push(log);
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
|
||||
// Pull images before deploying
|
||||
if stack.config.auto_pull {
|
||||
// Pull images before destroying to minimize downtime.
|
||||
// If this fails, do not continue.
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} pull{service_args}",
|
||||
);
|
||||
let log = run_komodo_command(
|
||||
"Compose Pull",
|
||||
run_directory.as_ref(),
|
||||
command,
|
||||
)
|
||||
.await;
|
||||
res.logs.push(log);
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
|
||||
if stack.config.destroy_before_deploy
|
||||
// Also check if project name changed, which also requires taking down.
|
||||
|| last_project_name != project_name
|
||||
{
|
||||
// Take down the existing containers.
|
||||
// This one tries to use the previously deployed service name, to ensure the right stack is taken down.
|
||||
crate::compose::down(&last_project_name, &services, &mut res)
|
||||
.await
|
||||
.context("failed to destroy existing containers")?;
|
||||
}
|
||||
|
||||
// Run compose up
|
||||
let extra_args = parse_extra_args(&stack.config.extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} up -d{extra_args}{service_args}",
|
||||
);
|
||||
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Compose Up",
|
||||
run_directory.as_path(),
|
||||
command,
|
||||
false,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
res.deployed = log.success;
|
||||
res.logs.push(log);
|
||||
|
||||
if res.deployed && !stack.config.post_deploy.is_none() {
|
||||
let post_deploy_path =
|
||||
run_directory.join(&stack.config.post_deploy.path);
|
||||
if let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Post Deploy",
|
||||
post_deploy_path.as_path(),
|
||||
&stack.config.post_deploy.command,
|
||||
true,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.logs.push(log);
|
||||
};
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_command;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::entities::{
|
||||
docker::container::{Container, ContainerListItem, ContainerStats},
|
||||
docker::{
|
||||
container::{Container, ContainerListItem, ContainerStats},
|
||||
stats::FullContainerStats,
|
||||
},
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::container::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
docker::{container_stats, docker_client, stop_container_command},
|
||||
docker::{
|
||||
docker_client, stats::get_container_stats, stop_container_command,
|
||||
},
|
||||
helpers::log_grep,
|
||||
};
|
||||
|
||||
@@ -80,22 +85,37 @@ impl Resolve<super::Args> for GetContainerStats {
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<ContainerStats> {
|
||||
let error = anyhow!("no stats matching {}", self.name);
|
||||
let mut stats = container_stats(Some(self.name)).await?;
|
||||
let stats = stats.pop().ok_or(error)?;
|
||||
let mut stats = get_container_stats(Some(self.name)).await?;
|
||||
let stats =
|
||||
stats.pop().context("No stats found for container")?;
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<super::Args> for GetFullContainerStats {
|
||||
#[instrument(name = "GetFullContainerStats", level = "debug")]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<FullContainerStats> {
|
||||
docker_client()
|
||||
.full_container_stats(&self.name)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<super::Args> for GetContainerStatsList {
|
||||
#[instrument(name = "GetContainerStatsList", level = "debug")]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<Vec<ContainerStats>> {
|
||||
Ok(container_stats(None).await?)
|
||||
Ok(get_container_stats(None).await?)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use anyhow::Context;
|
||||
use command::{
|
||||
run_komodo_command, run_komodo_command_with_interpolation,
|
||||
};
|
||||
use command::run_komodo_command_with_sanitization;
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
entities::{
|
||||
EnvironmentVar,
|
||||
@@ -36,12 +35,18 @@ impl Resolve<super::Args> for Deploy {
|
||||
)]
|
||||
async fn resolve(self, _: &super::Args) -> serror::Result<Log> {
|
||||
let Deploy {
|
||||
deployment,
|
||||
mut deployment,
|
||||
stop_signal,
|
||||
stop_time,
|
||||
registry_token,
|
||||
replacers: core_replacers,
|
||||
mut replacers,
|
||||
} = self;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
interpolator.interpolate_deployment(&mut deployment)?;
|
||||
replacers.extend(interpolator.secret_replacers);
|
||||
|
||||
let image = if let DeploymentImage::Image { image } =
|
||||
&deployment.config.image
|
||||
{
|
||||
@@ -76,6 +81,7 @@ impl Resolve<super::Args> for Deploy {
|
||||
|
||||
let _ = pull_image(image).await;
|
||||
debug!("image pulled");
|
||||
|
||||
let _ = (RemoveContainer {
|
||||
name: deployment.name.clone(),
|
||||
signal: stop_signal,
|
||||
@@ -87,26 +93,22 @@ impl Resolve<super::Args> for Deploy {
|
||||
|
||||
let command = docker_run_command(&deployment, image)
|
||||
.context("Unable to generate valid docker run command")?;
|
||||
debug!("docker run command: {command}");
|
||||
|
||||
if deployment.config.skip_secret_interp {
|
||||
Ok(run_komodo_command("Docker Run", None, command).await)
|
||||
} else {
|
||||
match run_komodo_command_with_interpolation(
|
||||
"Docker Run",
|
||||
None,
|
||||
command,
|
||||
false,
|
||||
&periphery_config().secrets,
|
||||
&core_replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(log) => Ok(log),
|
||||
// The None case can not be reached, as the command is always non-empty
|
||||
None => unreachable!(),
|
||||
}
|
||||
}
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Docker Run",
|
||||
None,
|
||||
command,
|
||||
false,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
// The none case is only for empty command,
|
||||
// this won't be the case given it is populated above.
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(log)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
use anyhow::Context;
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::http::StatusCode;
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::entities::{CloneArgs, LatestCommit, update::Log};
|
||||
use komodo_client::entities::{
|
||||
DefaultRepoFolder, LatestCommit, update::Log,
|
||||
};
|
||||
use periphery_client::api::git::{
|
||||
CloneRepo, DeleteRepo, GetLatestCommit, PullOrCloneRepo, PullRepo,
|
||||
RenameRepo, RepoActionResponse,
|
||||
CloneRepo, DeleteRepo, GetLatestCommit,
|
||||
PeripheryRepoExecutionResponse, PullOrCloneRepo, PullRepo,
|
||||
RenameRepo,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
use crate::{
|
||||
config::periphery_config, git::handle_post_repo_execution,
|
||||
};
|
||||
|
||||
impl Resolve<super::Args> for GetLatestCommit {
|
||||
#[instrument(name = "GetLatestCommit", level = "debug")]
|
||||
@@ -42,60 +48,33 @@ impl Resolve<super::Args> for CloneRepo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<RepoActionResponse> {
|
||||
) -> serror::Result<PeripheryRepoExecutionResponse> {
|
||||
let CloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment,
|
||||
env_file_path,
|
||||
on_clone,
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
} = self;
|
||||
let CloneArgs {
|
||||
provider, account, ..
|
||||
} = &args;
|
||||
let token = match (account, git_token) {
|
||||
(None, _) => None,
|
||||
(Some(_), Some(token)) => Some(token),
|
||||
(Some(account), None) => Some(
|
||||
crate::helpers::git_token(provider, account).map(ToString::to_string)
|
||||
.with_context(
|
||||
|| format!("Failed to get git token from periphery config | provider: {provider} | account: {account}")
|
||||
)?,
|
||||
),
|
||||
};
|
||||
let parent_dir = if args.is_build {
|
||||
periphery_config().build_dir()
|
||||
} else {
|
||||
periphery_config().repo_dir()
|
||||
};
|
||||
git::clone(
|
||||
args,
|
||||
&parent_dir,
|
||||
token,
|
||||
&environment,
|
||||
|
||||
let token = crate::helpers::git_token(git_token, &args)?;
|
||||
let root_repo_dir = default_folder(args.default_folder)?;
|
||||
|
||||
let res = git::clone(args, &root_repo_dir, token).await?;
|
||||
|
||||
handle_post_repo_execution(
|
||||
res,
|
||||
environment,
|
||||
&env_file_path,
|
||||
(!skip_secret_interp).then_some(&periphery_config().secrets),
|
||||
&replacers,
|
||||
on_clone,
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
)
|
||||
.await
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
}
|
||||
},
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -114,60 +93,32 @@ impl Resolve<super::Args> for PullRepo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<RepoActionResponse> {
|
||||
) -> serror::Result<PeripheryRepoExecutionResponse> {
|
||||
let PullRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment,
|
||||
env_file_path,
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
} = self;
|
||||
let CloneArgs {
|
||||
provider, account, ..
|
||||
} = &args;
|
||||
let token = match (account, git_token) {
|
||||
(None, _) => None,
|
||||
(Some(_), Some(token)) => Some(token),
|
||||
(Some(account), None) => Some(
|
||||
crate::helpers::git_token(provider, account).map(ToString::to_string)
|
||||
.with_context(
|
||||
|| format!("Failed to get git token from periphery config | provider: {provider} | account: {account}")
|
||||
)?,
|
||||
),
|
||||
};
|
||||
let parent_dir = if args.is_build {
|
||||
periphery_config().build_dir()
|
||||
} else {
|
||||
periphery_config().repo_dir()
|
||||
};
|
||||
git::pull(
|
||||
args,
|
||||
&parent_dir,
|
||||
token,
|
||||
&environment,
|
||||
|
||||
let token = crate::helpers::git_token(git_token, &args)?;
|
||||
let parent_dir = default_folder(args.default_folder)?;
|
||||
|
||||
let res = git::pull(args, &parent_dir, token).await?;
|
||||
|
||||
handle_post_repo_execution(
|
||||
res,
|
||||
environment,
|
||||
&env_file_path,
|
||||
(!skip_secret_interp).then_some(&periphery_config().secrets),
|
||||
&replacers,
|
||||
None,
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
)
|
||||
.await
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
}
|
||||
},
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -186,60 +137,34 @@ impl Resolve<super::Args> for PullOrCloneRepo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<RepoActionResponse> {
|
||||
) -> serror::Result<PeripheryRepoExecutionResponse> {
|
||||
let PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment,
|
||||
env_file_path,
|
||||
on_clone,
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
} = self;
|
||||
let CloneArgs {
|
||||
provider, account, ..
|
||||
} = &args;
|
||||
let token = match (account, git_token) {
|
||||
(None, _) => None,
|
||||
(Some(_), Some(token)) => Some(token),
|
||||
(Some(account), None) => Some(
|
||||
crate::helpers::git_token(provider, account).map(ToString::to_string)
|
||||
.with_context(
|
||||
|| format!("Failed to get git token from periphery config | provider: {provider} | account: {account}")
|
||||
)?,
|
||||
),
|
||||
};
|
||||
let parent_dir = if args.is_build {
|
||||
periphery_config().build_dir()
|
||||
} else {
|
||||
periphery_config().repo_dir()
|
||||
};
|
||||
git::pull_or_clone(
|
||||
args,
|
||||
&parent_dir,
|
||||
token,
|
||||
&environment,
|
||||
|
||||
let token = crate::helpers::git_token(git_token, &args)?;
|
||||
let parent_dir = default_folder(args.default_folder)?;
|
||||
|
||||
let (res, cloned) =
|
||||
git::pull_or_clone(args, &parent_dir, token).await?;
|
||||
|
||||
handle_post_repo_execution(
|
||||
res,
|
||||
environment,
|
||||
&env_file_path,
|
||||
(!skip_secret_interp).then_some(&periphery_config().secrets),
|
||||
&replacers,
|
||||
cloned.then_some(on_clone).flatten(),
|
||||
on_pull,
|
||||
skip_secret_interp,
|
||||
replacers,
|
||||
)
|
||||
.await
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
}
|
||||
},
|
||||
)
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -292,3 +217,21 @@ impl Resolve<super::Args> for DeleteRepo {
|
||||
Ok(log)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
fn default_folder(
|
||||
default_folder: DefaultRepoFolder,
|
||||
) -> serror::Result<PathBuf> {
|
||||
match default_folder {
|
||||
DefaultRepoFolder::Stacks => Ok(periphery_config().stack_dir()),
|
||||
DefaultRepoFolder::Builds => Ok(periphery_config().build_dir()),
|
||||
DefaultRepoFolder::Repos => Ok(periphery_config().repo_dir()),
|
||||
DefaultRepoFolder::NotApplicable => {
|
||||
Err(
|
||||
anyhow!("The clone args should not have a default_folder of NotApplicable using this method.")
|
||||
.status_code(StatusCode::BAD_REQUEST)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,6 +92,7 @@ pub enum PeripheryRequest {
|
||||
GetContainerLogSearch(GetContainerLogSearch),
|
||||
GetContainerStats(GetContainerStats),
|
||||
GetContainerStatsList(GetContainerStatsList),
|
||||
GetFullContainerStats(GetFullContainerStats),
|
||||
|
||||
// Container (Write)
|
||||
Deploy(Deploy),
|
||||
|
||||
132
bin/periphery/src/build.rs
Normal file
132
bin/periphery/src/build.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use std::{
|
||||
fmt::Write,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
entities::{EnvironmentVar, Version, update::Log},
|
||||
parsers::QUOTE_PATTERN,
|
||||
};
|
||||
|
||||
pub async fn write_dockerfile(
|
||||
build_path: &Path,
|
||||
dockerfile_path: &str,
|
||||
dockerfile: &str,
|
||||
logs: &mut Vec<Log>,
|
||||
) {
|
||||
if let Err(e) = async {
|
||||
if dockerfile.is_empty() {
|
||||
return Err(anyhow!("UI Defined dockerfile is empty"));
|
||||
}
|
||||
|
||||
let full_dockerfile_path = build_path
|
||||
.join(dockerfile_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = full_dockerfile_path.parent() {
|
||||
if !parent.exists() {
|
||||
tokio::fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize dockerfile parent directory {parent:?}"))?;
|
||||
}
|
||||
}
|
||||
|
||||
tokio::fs::write(&full_dockerfile_path, dockerfile).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to write dockerfile contents to {full_dockerfile_path:?}"
|
||||
)
|
||||
})?;
|
||||
|
||||
logs.push(Log::simple(
|
||||
"Write Dockerfile",
|
||||
format!(
|
||||
"Dockerfile contents written to {full_dockerfile_path:?}"
|
||||
),
|
||||
));
|
||||
|
||||
anyhow::Ok(())
|
||||
}.await {
|
||||
logs.push(Log::error("Write Dockerfile", format_serror(&e.into())));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn image_tags(
|
||||
image_name: &str,
|
||||
custom_tag: &str,
|
||||
version: &Version,
|
||||
additional: &[String],
|
||||
) -> String {
|
||||
let Version { major, minor, .. } = version;
|
||||
let custom_tag = if custom_tag.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("-{custom_tag}")
|
||||
};
|
||||
let additional = additional
|
||||
.iter()
|
||||
.map(|tag| format!(" -t {image_name}:{tag}{custom_tag}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
format!(
|
||||
" -t {image_name}:latest{custom_tag} -t {image_name}:{version}{custom_tag} -t {image_name}:{major}.{minor}{custom_tag} -t {image_name}:{major}{custom_tag}{additional}",
|
||||
)
|
||||
}
|
||||
|
||||
pub fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
|
||||
build_args
|
||||
.iter()
|
||||
.map(|p| {
|
||||
if p.value.starts_with(QUOTE_PATTERN)
|
||||
&& p.value.ends_with(QUOTE_PATTERN)
|
||||
{
|
||||
// If the value already wrapped in quotes, don't wrap it again
|
||||
format!(" --build-arg {}={}", p.variable, p.value)
|
||||
} else {
|
||||
format!(" --build-arg {}=\"{}\"", p.variable, p.value)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
}
|
||||
|
||||
/// <https://docs.docker.com/build/building/secrets/#using-build-secrets>
|
||||
pub async fn parse_secret_args(
|
||||
secret_args: &[EnvironmentVar],
|
||||
build_dir: &Path,
|
||||
) -> anyhow::Result<String> {
|
||||
let mut res = String::new();
|
||||
for EnvironmentVar { variable, value } in secret_args {
|
||||
// Check edge cases
|
||||
if variable.is_empty() {
|
||||
return Err(anyhow!("secret variable cannot be empty string"));
|
||||
} else if variable.contains('=') {
|
||||
return Err(anyhow!(
|
||||
"invalid variable {variable}. variable cannot contain '='"
|
||||
));
|
||||
}
|
||||
// Write the value to file to mount
|
||||
let path = build_dir.join(variable);
|
||||
tokio::fs::write(&path, value).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to write build secret {variable} to {}",
|
||||
path.display()
|
||||
)
|
||||
})?;
|
||||
// Extend the command
|
||||
write!(
|
||||
&mut res,
|
||||
" --secret id={variable},src={}",
|
||||
path.display()
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to format build secret arguments for {variable}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
@@ -1,6 +1,15 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use command::run_komodo_command;
|
||||
use periphery_client::api::compose::ComposeUpResponse;
|
||||
use komodo_client::entities::{
|
||||
RepoExecutionArgs, repo::Repo, stack::Stack,
|
||||
to_path_compatible_name,
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::ComposeUpResponse, git::PullOrCloneRepo,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
|
||||
@@ -15,7 +24,7 @@ pub fn docker_compose() -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
async fn compose_down(
|
||||
pub async fn down(
|
||||
project: &str,
|
||||
services: &[String],
|
||||
res: &mut ComposeUpResponse,
|
||||
@@ -42,3 +51,64 @@ async fn compose_down(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Only for git repo based Stacks.
|
||||
/// Returns path to root directory of the stack repo.
|
||||
///
|
||||
/// Both Stack and Repo environment, on clone, on pull are ignored.
|
||||
pub async fn pull_or_clone_stack(
|
||||
stack: &Stack,
|
||||
repo: Option<&Repo>,
|
||||
git_token: Option<String>,
|
||||
) -> anyhow::Result<PathBuf> {
|
||||
if stack.config.files_on_host {
|
||||
return Err(anyhow!(
|
||||
"Wrong method called for files on host stack"
|
||||
));
|
||||
}
|
||||
if repo.is_none() && stack.config.repo.is_empty() {
|
||||
return Err(anyhow!("Repo is not configured"));
|
||||
}
|
||||
|
||||
let (root, mut args) = if let Some(repo) = repo {
|
||||
let root = periphery_config()
|
||||
.repo_dir()
|
||||
.join(to_path_compatible_name(&repo.name))
|
||||
.join(&repo.config.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: RepoExecutionArgs = repo.into();
|
||||
(root, args)
|
||||
} else {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.join(&stack.config.clone_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: RepoExecutionArgs = stack.into();
|
||||
(root, args)
|
||||
};
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
let git_token = crate::helpers::git_token(git_token, &args)?;
|
||||
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
// All the extra pull functions
|
||||
// (env, on clone, on pull)
|
||||
// are disabled with this method.
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: Default::default(),
|
||||
on_pull: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?;
|
||||
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
@@ -1,63 +1,20 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::{
|
||||
run_komodo_command, run_komodo_command_multiline,
|
||||
run_komodo_command_with_interpolation,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::entities::{
|
||||
FileContents, all_logs_success,
|
||||
repo::Repo,
|
||||
stack::{
|
||||
ComposeFile, ComposeService, ComposeServiceDeploy, Stack,
|
||||
StackServiceNames,
|
||||
},
|
||||
update::Log,
|
||||
FileContents, stack::Stack, update::Log,
|
||||
};
|
||||
use periphery_client::api::compose::ComposeUpResponse;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
compose::compose_down, config::periphery_config,
|
||||
docker::docker_login, helpers::parse_extra_args,
|
||||
};
|
||||
use crate::docker::docker_login;
|
||||
|
||||
use super::{docker_compose, write::write_stack};
|
||||
|
||||
/// If this fn returns Err, the caller of `compose_up` has to write result to the log before return.
|
||||
pub async fn compose_up(
|
||||
stack: Stack,
|
||||
services: Vec<String>,
|
||||
repo: Option<Repo>,
|
||||
git_token: Option<String>,
|
||||
registry_token: Option<String>,
|
||||
pub async fn validate_files(
|
||||
stack: &Stack,
|
||||
run_directory: &Path,
|
||||
res: &mut ComposeUpResponse,
|
||||
core_replacers: Vec<(String, String)>,
|
||||
) -> anyhow::Result<()> {
|
||||
// Write the stack to local disk. For repos, will first delete any existing folder to ensure fresh deploy.
|
||||
// Will also set additional fields on the reponse.
|
||||
// Use the env_file_path in the compose command.
|
||||
let (run_directory, env_file_path, periphery_replacers) =
|
||||
write_stack(&stack, repo.as_ref(), git_token, &mut *res)
|
||||
.await
|
||||
.context("Failed to write / clone compose file")?;
|
||||
|
||||
let replacers =
|
||||
if let Some(periphery_replacers) = periphery_replacers {
|
||||
core_replacers
|
||||
.into_iter()
|
||||
.chain(periphery_replacers)
|
||||
.collect()
|
||||
} else {
|
||||
core_replacers
|
||||
};
|
||||
|
||||
// Canonicalize the path to ensure it exists, and is the cleanest path to the run directory.
|
||||
let run_directory = run_directory.canonicalize().context(
|
||||
"Failed to validate run directory on host after stack write (canonicalize error)",
|
||||
)?;
|
||||
|
||||
) {
|
||||
let file_paths = stack
|
||||
.file_paths()
|
||||
.iter()
|
||||
@@ -76,64 +33,56 @@ pub async fn compose_up(
|
||||
}
|
||||
}
|
||||
if !res.missing_files.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"A compose file doesn't exist after writing stack. Ensure the run_directory and file_paths are correct."
|
||||
res.logs.push(Log::error(
|
||||
"Validate Files",
|
||||
format_serror(
|
||||
&anyhow!(
|
||||
"Ensure the run_directory and file_paths are correct."
|
||||
)
|
||||
.context("A compose file doesn't exist after writing stack.")
|
||||
.into(),
|
||||
),
|
||||
));
|
||||
return;
|
||||
}
|
||||
|
||||
for (path, full_path) in &file_paths {
|
||||
let file_contents = match fs::read_to_string(&full_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
let file_contents =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!(
|
||||
"failed to read compose file contents at {full_path:?}"
|
||||
"Failed to read compose file contents at {full_path:?}"
|
||||
)
|
||||
}) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("read compose file", error.clone()));
|
||||
// This should only happen for repo stacks, ie remote error
|
||||
res.remote_errors.push(FileContents {
|
||||
path: path.to_string(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
"failed to read compose file at {full_path:?}, stopping run"
|
||||
));
|
||||
}
|
||||
};
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("Read Compose File", error.clone()));
|
||||
// This should only happen for repo stacks, ie remote error
|
||||
res.remote_errors.push(FileContents {
|
||||
path: path.to_string(),
|
||||
contents: error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
};
|
||||
res.file_contents.push(FileContents {
|
||||
path: path.to_string(),
|
||||
contents: file_contents,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let docker_compose = docker_compose();
|
||||
|
||||
let service_args = if services.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
|
||||
let file_args = if stack.config.file_paths.is_empty() {
|
||||
String::from("compose.yaml")
|
||||
} else {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
// This will be the last project name, which is the one that needs to be destroyed.
|
||||
// Might be different from the current project name, if user renames stack / changes to custom project name.
|
||||
let last_project_name = stack.project_name(false);
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
// Login to the registry to pull private images, if provider / account are set
|
||||
pub async fn maybe_login_registry(
|
||||
stack: &Stack,
|
||||
registry_token: Option<String>,
|
||||
logs: &mut Vec<Log>,
|
||||
) {
|
||||
if !stack.config.registry_provider.is_empty()
|
||||
&& !stack.config.registry_account.is_empty()
|
||||
{
|
||||
docker_login(
|
||||
if let Err(e) = docker_login(
|
||||
&stack.config.registry_provider,
|
||||
&stack.config.registry_account,
|
||||
registry_token.as_deref(),
|
||||
@@ -141,245 +90,16 @@ pub async fn compose_up(
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"domain: {} | account: {}",
|
||||
"Domain: '{}' | Account: '{}'",
|
||||
stack.config.registry_provider, stack.config.registry_account
|
||||
)
|
||||
})
|
||||
.context("failed to login to image registry")?;
|
||||
}
|
||||
|
||||
let env_file = env_file_path
|
||||
.map(|path| format!(" --env-file {path}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
let additional_env_files = stack
|
||||
.config
|
||||
.additional_env_files
|
||||
.iter()
|
||||
.fold(String::new(), |mut output, file| {
|
||||
let _ = write!(output, " --env-file {file}");
|
||||
output
|
||||
});
|
||||
|
||||
// Pre deploy command
|
||||
let pre_deploy_path =
|
||||
run_directory.join(&stack.config.pre_deploy.path);
|
||||
if let Some(log) = if stack.config.skip_secret_interp {
|
||||
run_komodo_command_multiline(
|
||||
"Pre Deploy",
|
||||
pre_deploy_path.as_ref(),
|
||||
&stack.config.pre_deploy.command,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
run_komodo_command_with_interpolation(
|
||||
"Pre Deploy",
|
||||
pre_deploy_path.as_ref(),
|
||||
&stack.config.pre_deploy.command,
|
||||
true,
|
||||
&periphery_config().secrets,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
} {
|
||||
res.logs.push(log);
|
||||
}
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Err(anyhow!(
|
||||
"Failed at running pre_deploy command, stopping the run."
|
||||
));
|
||||
}
|
||||
|
||||
// Uses 'docker compose config' command to extract services (including image)
|
||||
// after performing interpolation
|
||||
{
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} config",
|
||||
);
|
||||
let config_log = run_komodo_command(
|
||||
"Compose Config",
|
||||
run_directory.as_ref(),
|
||||
command,
|
||||
)
|
||||
.await;
|
||||
if !config_log.success {
|
||||
res.logs.push(config_log);
|
||||
return Err(anyhow!(
|
||||
"Failed to validate compose files, stopping the run."
|
||||
));
|
||||
}
|
||||
// Record sanitized compose config output
|
||||
res.compose_config =
|
||||
Some(svi::replace_in_string(&config_log.stdout, &replacers));
|
||||
let compose =
|
||||
serde_yaml::from_str::<ComposeFile>(&config_log.stdout)
|
||||
.context("Failed to parse compose contents")?;
|
||||
for (
|
||||
service_name,
|
||||
ComposeService {
|
||||
container_name,
|
||||
deploy,
|
||||
image,
|
||||
},
|
||||
) in compose.services
|
||||
.context("Failed to login to image registry")
|
||||
{
|
||||
let image = image.unwrap_or_default();
|
||||
match deploy {
|
||||
Some(ComposeServiceDeploy {
|
||||
replicas: Some(replicas),
|
||||
}) if replicas > 1 => {
|
||||
for i in 1..1 + replicas {
|
||||
res.services.push(StackServiceNames {
|
||||
container_name: format!(
|
||||
"{project_name}-{service_name}-{i}"
|
||||
),
|
||||
service_name: format!("{service_name}-{i}"),
|
||||
image: image.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
res.services.push(StackServiceNames {
|
||||
container_name: container_name.unwrap_or_else(|| {
|
||||
format!("{project_name}-{service_name}")
|
||||
}),
|
||||
service_name,
|
||||
image,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build images before deploying.
|
||||
// If this fails, do not continue.
|
||||
if stack.config.run_build {
|
||||
let build_extra_args =
|
||||
parse_extra_args(&stack.config.build_extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} build{build_extra_args}{service_args}",
|
||||
);
|
||||
if stack.config.skip_secret_interp {
|
||||
let log = run_komodo_command(
|
||||
"Compose Build",
|
||||
run_directory.as_ref(),
|
||||
command,
|
||||
)
|
||||
.await;
|
||||
res.logs.push(log);
|
||||
} else if let Some(log) = run_komodo_command_with_interpolation(
|
||||
"Compose Build",
|
||||
run_directory.as_ref(),
|
||||
command,
|
||||
false,
|
||||
&periphery_config().secrets,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.logs.push(log);
|
||||
}
|
||||
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Err(anyhow!(
|
||||
"Failed to build required images, stopping the run."
|
||||
logs.push(Log::error(
|
||||
"Login to Registry",
|
||||
format_serror(&e.into()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Pull images before deploying
|
||||
if stack.config.auto_pull {
|
||||
// Pull images before destroying to minimize downtime.
|
||||
// If this fails, do not continue.
|
||||
let log = run_komodo_command(
|
||||
"Compose Pull",
|
||||
run_directory.as_ref(),
|
||||
format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} pull{service_args}",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
res.logs.push(log);
|
||||
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Err(anyhow!(
|
||||
"Failed to pull required images, stopping the run."
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if stack.config.destroy_before_deploy
|
||||
// Also check if project name changed, which also requires taking down.
|
||||
|| last_project_name != project_name
|
||||
{
|
||||
// Take down the existing containers.
|
||||
// This one tries to use the previously deployed service name, to ensure the right stack is taken down.
|
||||
compose_down(&last_project_name, &services, res)
|
||||
.await
|
||||
.context("failed to destroy existing containers")?;
|
||||
}
|
||||
|
||||
// Run compose up
|
||||
let extra_args = parse_extra_args(&stack.config.extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} up -d{extra_args}{service_args}",
|
||||
);
|
||||
|
||||
let log = if stack.config.skip_secret_interp {
|
||||
run_komodo_command("Compose Up", run_directory.as_ref(), command)
|
||||
.await
|
||||
} else {
|
||||
match run_komodo_command_with_interpolation(
|
||||
"Compose Up",
|
||||
run_directory.as_ref(),
|
||||
command,
|
||||
false,
|
||||
&periphery_config().secrets,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Some(log) => log,
|
||||
// The command is definitely non-empty, the result will never be None.
|
||||
None => unreachable!(),
|
||||
}
|
||||
};
|
||||
|
||||
res.deployed = log.success;
|
||||
|
||||
// push the compose up command logs to keep the correct order
|
||||
res.logs.push(log);
|
||||
|
||||
if res.deployed {
|
||||
let post_deploy_path =
|
||||
run_directory.join(&stack.config.post_deploy.path);
|
||||
if let Some(log) = if stack.config.skip_secret_interp {
|
||||
run_komodo_command_multiline(
|
||||
"Post Deploy",
|
||||
post_deploy_path.as_ref(),
|
||||
&stack.config.post_deploy.command,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
run_komodo_command_with_interpolation(
|
||||
"Post Deploy",
|
||||
post_deploy_path.as_ref(),
|
||||
&stack.config.post_deploy.command,
|
||||
true,
|
||||
&periphery_config().secrets,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
} {
|
||||
res.logs.push(log)
|
||||
}
|
||||
if !all_logs_success(&res.logs) {
|
||||
return Err(anyhow!(
|
||||
"Failed at running post_deploy command, stopping the run."
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2,20 +2,18 @@ use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use git::environment;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, EnvironmentVar, FileContents, all_logs_success,
|
||||
environment_vars_from_str, repo::Repo, stack::Stack,
|
||||
to_path_compatible_name, update::Log,
|
||||
FileContents, RepoExecutionArgs, all_logs_success, repo::Repo,
|
||||
stack::Stack, to_path_compatible_name, update::Log,
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::ComposeUpResponse,
|
||||
git::{CloneRepo, PullOrCloneRepo, RepoActionResponse},
|
||||
compose::{ComposePullResponse, ComposeUpResponse},
|
||||
git::{CloneRepo, PullOrCloneRepo},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
use crate::{config::periphery_config, helpers};
|
||||
|
||||
pub trait WriteStackRes {
|
||||
fn logs(&mut self) -> &mut Vec<Log>;
|
||||
@@ -39,93 +37,47 @@ impl WriteStackRes for &mut ComposeUpResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteStackRes for &mut ComposePullResponse {
|
||||
fn logs(&mut self) -> &mut Vec<Log> {
|
||||
&mut self.logs
|
||||
}
|
||||
}
|
||||
|
||||
/// Either writes the stack file_contents to a file, or clones the repo.
|
||||
/// Performs variable replacement on env and writes file.
|
||||
/// Asssumes all interpolation is already complete.
|
||||
/// Returns (run_directory, env_file_path, periphery_replacers)
|
||||
pub async fn write_stack<'a>(
|
||||
stack: &'a Stack,
|
||||
repo: Option<&Repo>,
|
||||
git_token: Option<String>,
|
||||
mut res: impl WriteStackRes,
|
||||
replacers: Vec<(String, String)>,
|
||||
res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&'a str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let (env_interpolated, env_replacers) =
|
||||
if stack.config.skip_secret_interp {
|
||||
(stack.config.environment.clone(), None)
|
||||
} else {
|
||||
let (environment, replacers) = svi::interpolate_variables(
|
||||
&stack.config.environment,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context(
|
||||
"Failed to interpolate Periphery secrets into Environment",
|
||||
)?;
|
||||
(environment, Some(replacers))
|
||||
};
|
||||
match &env_replacers {
|
||||
Some(replacers) if !replacers.is_empty() => {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Environment (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let env_vars = environment_vars_from_str(&env_interpolated)
|
||||
.context("Invalid environment variables")?;
|
||||
|
||||
if stack.config.files_on_host {
|
||||
write_stack_files_on_host(stack, env_vars, env_replacers, res)
|
||||
.await
|
||||
write_stack_files_on_host(stack, res).await
|
||||
} else if let Some(repo) = repo {
|
||||
write_stack_linked_repo(
|
||||
stack,
|
||||
repo,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
write_stack_linked_repo(stack, repo, git_token, replacers, res)
|
||||
.await
|
||||
} else if !stack.config.repo.is_empty() {
|
||||
write_stack_inline_repo(
|
||||
stack,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
write_stack_inline_repo(stack, git_token, res).await
|
||||
} else {
|
||||
write_stack_ui_defined(stack, env_vars, env_replacers, res).await
|
||||
write_stack_ui_defined(stack, res).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_stack_files_on_host(
|
||||
stack: &Stack,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let run_directory = periphery_config()
|
||||
.stack_dir()
|
||||
@@ -133,38 +85,38 @@ async fn write_stack_files_on_host(
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
let env_file_path = environment::write_env_file(
|
||||
&stack.config.env_vars()?,
|
||||
run_directory.as_path(),
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
Ok((
|
||||
run_directory,
|
||||
// Env file paths are expected to be already relative to run directory,
|
||||
// so need to pass original env_file_path here.
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
.await;
|
||||
if all_logs_success(res.logs()) {
|
||||
Ok((
|
||||
run_directory,
|
||||
// Env file paths are expected to be already relative to run directory,
|
||||
// so need to pass original env_file_path here.
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
))
|
||||
} else {
|
||||
Err(anyhow!("Failed to write env file, stopping run."))
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_stack_linked_repo<'a>(
|
||||
stack: &'a Stack,
|
||||
repo: &Repo,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
res: impl WriteStackRes,
|
||||
replacers: Vec<(String, String)>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&'a str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let root = periphery_config()
|
||||
.repo_dir()
|
||||
@@ -173,35 +125,95 @@ async fn write_stack_linked_repo<'a>(
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let mut args: CloneArgs = repo.into();
|
||||
let mut args: RepoExecutionArgs = repo.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
write_stack_repo(
|
||||
stack,
|
||||
args,
|
||||
root,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
let git_token = stack_git_token(git_token, &args, &mut res)?;
|
||||
|
||||
let env_file_path = root
|
||||
.join(&repo.config.env_file_path)
|
||||
.components()
|
||||
.collect::<PathBuf>()
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
let on_clone = (!repo.config.on_clone.is_none())
|
||||
.then_some(repo.config.on_clone.clone());
|
||||
let on_pull = (!repo.config.on_pull.is_none())
|
||||
.then_some(repo.config.on_pull.clone());
|
||||
|
||||
let clone_res = if stack.config.reclone {
|
||||
CloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path,
|
||||
on_clone,
|
||||
on_pull,
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers,
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?
|
||||
} else {
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path,
|
||||
on_clone,
|
||||
on_pull,
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers,
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?
|
||||
};
|
||||
|
||||
res.logs().extend(clone_res.res.logs);
|
||||
res.set_commit_hash(clone_res.res.commit_hash);
|
||||
res.set_commit_message(clone_res.res.commit_message);
|
||||
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Ok((root, None));
|
||||
}
|
||||
|
||||
let run_directory = root
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let env_file_path = environment::write_env_file(
|
||||
&stack.config.env_vars()?,
|
||||
run_directory.as_path(),
|
||||
&stack.config.env_file_path,
|
||||
res.logs(),
|
||||
)
|
||||
.await
|
||||
.await;
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Failed to write env file, stopping run"));
|
||||
}
|
||||
|
||||
Ok((
|
||||
run_directory,
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
))
|
||||
}
|
||||
|
||||
async fn write_stack_inline_repo(
|
||||
stack: &Stack,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
res: impl WriteStackRes,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
@@ -210,160 +222,82 @@ async fn write_stack_inline_repo(
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let mut args: CloneArgs = stack.into();
|
||||
let mut args: RepoExecutionArgs = stack.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
write_stack_repo(
|
||||
stack,
|
||||
args,
|
||||
root,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
}
|
||||
let git_token = stack_git_token(git_token, &args, &mut res)?;
|
||||
|
||||
async fn write_stack_repo(
|
||||
stack: &Stack,
|
||||
args: CloneArgs,
|
||||
root: PathBuf,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let git_token = match git_token {
|
||||
Some(token) => Some(token),
|
||||
None => {
|
||||
if let Some(account) = &args.account {
|
||||
match crate::helpers::git_token(
|
||||
args.account.as_deref().unwrap_or("github.com"),
|
||||
account,
|
||||
) {
|
||||
Ok(token) => Some(token.to_string()),
|
||||
Err(e) => {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("no git token", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
"failed to find required git token, stopping run"
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let env_file_path = root
|
||||
.join(&stack.config.run_directory)
|
||||
.join(if stack.config.env_file_path.is_empty() {
|
||||
".env"
|
||||
} else {
|
||||
&stack.config.env_file_path
|
||||
})
|
||||
.components()
|
||||
.collect::<PathBuf>()
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
let clone_or_pull_res = if stack.config.reclone {
|
||||
let clone_res = if stack.config.reclone {
|
||||
CloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: Default::default(),
|
||||
on_pull: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?
|
||||
} else {
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: Default::default(),
|
||||
on_pull: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?
|
||||
};
|
||||
|
||||
let RepoActionResponse {
|
||||
logs,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
env_file_path,
|
||||
path: _,
|
||||
} = match clone_or_pull_res {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let error = format_serror(
|
||||
&e.error.context("Failed to pull stack repo").into(),
|
||||
);
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("Pull Stack Repo", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!("Failed to pull stack repo, stopping run"));
|
||||
}
|
||||
};
|
||||
|
||||
res.logs().extend(logs);
|
||||
res.set_commit_hash(commit_hash);
|
||||
res.set_commit_message(commit_message);
|
||||
res.logs().extend(clone_res.res.logs);
|
||||
res.set_commit_hash(clone_res.res.commit_hash);
|
||||
res.set_commit_message(clone_res.res.commit_message);
|
||||
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Stopped after repo pull failure"));
|
||||
return Ok((root, None));
|
||||
}
|
||||
|
||||
let run_directory = root
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let env_file_path = environment::write_env_file(
|
||||
&stack.config.env_vars()?,
|
||||
run_directory.as_path(),
|
||||
&stack.config.env_file_path,
|
||||
res.logs(),
|
||||
)
|
||||
.await;
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Failed to write env file, stopping run"));
|
||||
}
|
||||
|
||||
Ok((
|
||||
root
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect(),
|
||||
run_directory,
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
}
|
||||
|
||||
async fn write_stack_ui_defined(
|
||||
stack: &Stack,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
if stack.config.file_contents.trim().is_empty() {
|
||||
return Err(anyhow!(
|
||||
@@ -383,13 +317,17 @@ async fn write_stack_ui_defined(
|
||||
"failed to create stack run directory at {run_directory:?}"
|
||||
)
|
||||
})?;
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
let env_file_path = environment::write_env_file(
|
||||
&stack.config.env_vars()?,
|
||||
run_directory.as_path(),
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
.await;
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Failed to write env file, stopping run"));
|
||||
}
|
||||
|
||||
let file_path = run_directory
|
||||
.join(
|
||||
stack
|
||||
@@ -403,33 +341,7 @@ async fn write_stack_ui_defined(
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let (file_contents, file_replacers) = if !stack
|
||||
.config
|
||||
.skip_secret_interp
|
||||
{
|
||||
let (contents, replacers) = svi::interpolate_variables(
|
||||
&stack.config.file_contents,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context("failed to interpolate secrets into file contents")?;
|
||||
if !replacers.is_empty() {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Compose file (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
));
|
||||
}
|
||||
(contents, Some(replacers))
|
||||
} else {
|
||||
(stack.config.file_contents.clone(), None)
|
||||
};
|
||||
|
||||
fs::write(&file_path, &file_contents)
|
||||
fs::write(&file_path, &stack.config.file_contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write compose file to {file_path:?}")
|
||||
@@ -440,13 +352,23 @@ async fn write_stack_ui_defined(
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
match (env_replacers, file_replacers) {
|
||||
(Some(env_replacers), Some(file_replacers)) => Some(
|
||||
env_replacers.into_iter().chain(file_replacers).collect(),
|
||||
),
|
||||
(Some(env_replacers), None) => Some(env_replacers),
|
||||
(None, Some(file_replacers)) => Some(file_replacers),
|
||||
(None, None) => None,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
fn stack_git_token<R: WriteStackRes>(
|
||||
core_token: Option<String>,
|
||||
args: &RepoExecutionArgs,
|
||||
res: &mut R,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
helpers::git_token(core_token, args).map_err(|e| {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("Missing git token", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
anyhow!("failed to find required git token, stopping run")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -51,6 +51,9 @@ pub fn periphery_config() -> &'static PeripheryConfig {
|
||||
stats_polling_rate: env
|
||||
.periphery_stats_polling_rate
|
||||
.unwrap_or(config.stats_polling_rate),
|
||||
container_stats_polling_rate: env
|
||||
.periphery_container_stats_polling_rate
|
||||
.unwrap_or(config.container_stats_polling_rate),
|
||||
legacy_compose_cli: env
|
||||
.periphery_legacy_compose_cli
|
||||
.unwrap_or(config.legacy_compose_cli),
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
759
bin/periphery/src/docker/containers.rs
Normal file
759
bin/periphery/src/docker/containers.rs
Normal file
@@ -0,0 +1,759 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use bollard::query_parameters::{
|
||||
InspectContainerOptions, ListContainersOptions,
|
||||
};
|
||||
use komodo_client::entities::docker::{
|
||||
ContainerConfig, GraphDriverData, HealthConfig, PortBinding,
|
||||
container::*,
|
||||
};
|
||||
|
||||
use super::{DockerClient, stats::container_stats};
|
||||
|
||||
impl DockerClient {
|
||||
pub async fn list_containers(
|
||||
&self,
|
||||
) -> anyhow::Result<Vec<ContainerListItem>> {
|
||||
let containers = self
|
||||
.docker
|
||||
.list_containers(Some(ListContainersOptions {
|
||||
all: true,
|
||||
..Default::default()
|
||||
}))
|
||||
.await?;
|
||||
let stats = container_stats().load();
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.flat_map(|container| {
|
||||
let name = container
|
||||
.names
|
||||
.context("no names on container")?
|
||||
.pop()
|
||||
.context("no names on container (empty vec)")?
|
||||
.replace('/', "");
|
||||
let stats = stats.get(&name).cloned();
|
||||
anyhow::Ok(ContainerListItem {
|
||||
server_id: None,
|
||||
name,
|
||||
stats,
|
||||
id: container.id,
|
||||
image: container.image,
|
||||
image_id: container.image_id,
|
||||
created: container.created,
|
||||
size_rw: container.size_rw,
|
||||
size_root_fs: container.size_root_fs,
|
||||
state: convert_summary_container_state(
|
||||
container.state.context("no container state")?,
|
||||
),
|
||||
status: container.status,
|
||||
network_mode: container
|
||||
.host_config
|
||||
.and_then(|config| config.network_mode),
|
||||
networks: container
|
||||
.network_settings
|
||||
.and_then(|settings| {
|
||||
settings.networks.map(|networks| {
|
||||
let mut keys =
|
||||
networks.into_keys().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
})
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
ports: container
|
||||
.ports
|
||||
.map(|ports| {
|
||||
ports.into_iter().map(convert_port).collect()
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
volumes: container
|
||||
.mounts
|
||||
.map(|settings| {
|
||||
settings
|
||||
.into_iter()
|
||||
.filter_map(|mount| mount.name)
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
labels: container.labels.unwrap_or_default(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let container_id_to_network = containers
|
||||
.iter()
|
||||
.filter_map(|c| Some((c.id.clone()?, c.network_mode.clone()?)))
|
||||
.collect::<HashMap<_, _>>();
|
||||
// Fix containers which use `container:container_id` network_mode,
|
||||
// by replacing with the referenced network mode.
|
||||
containers.iter_mut().for_each(|container| {
|
||||
let Some(network_name) = &container.network_mode else {
|
||||
return;
|
||||
};
|
||||
let Some(container_id) =
|
||||
network_name.strip_prefix("container:")
|
||||
else {
|
||||
return;
|
||||
};
|
||||
container.network_mode =
|
||||
container_id_to_network.get(container_id).cloned();
|
||||
});
|
||||
Ok(containers)
|
||||
}
|
||||
|
||||
pub async fn inspect_container(
|
||||
&self,
|
||||
container_name: &str,
|
||||
) -> anyhow::Result<Container> {
|
||||
let container = self
|
||||
.docker
|
||||
.inspect_container(
|
||||
container_name,
|
||||
InspectContainerOptions { size: true }.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(Container {
|
||||
id: container.id,
|
||||
created: container.created,
|
||||
path: container.path,
|
||||
args: container.args.unwrap_or_default(),
|
||||
state: container.state.map(|state| ContainerState {
|
||||
status: state
|
||||
.status
|
||||
.map(convert_container_state_status)
|
||||
.unwrap_or_default(),
|
||||
running: state.running,
|
||||
paused: state.paused,
|
||||
restarting: state.restarting,
|
||||
oom_killed: state.oom_killed,
|
||||
dead: state.dead,
|
||||
pid: state.pid,
|
||||
exit_code: state.exit_code,
|
||||
error: state.error,
|
||||
started_at: state.started_at,
|
||||
finished_at: state.finished_at,
|
||||
health: state.health.map(|health| ContainerHealth {
|
||||
status: health
|
||||
.status
|
||||
.map(convert_health_status)
|
||||
.unwrap_or_default(),
|
||||
failing_streak: health.failing_streak,
|
||||
log: health
|
||||
.log
|
||||
.map(|log| {
|
||||
log
|
||||
.into_iter()
|
||||
.map(convert_health_check_result)
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
}),
|
||||
}),
|
||||
image: container.image,
|
||||
resolv_conf_path: container.resolv_conf_path,
|
||||
hostname_path: container.hostname_path,
|
||||
hosts_path: container.hosts_path,
|
||||
log_path: container.log_path,
|
||||
name: container.name,
|
||||
restart_count: container.restart_count,
|
||||
driver: container.driver,
|
||||
platform: container.platform,
|
||||
mount_label: container.mount_label,
|
||||
process_label: container.process_label,
|
||||
app_armor_profile: container.app_armor_profile,
|
||||
exec_ids: container.exec_ids.unwrap_or_default(),
|
||||
host_config: container.host_config.map(|config| HostConfig {
|
||||
cpu_shares: config.cpu_shares,
|
||||
memory: config.memory,
|
||||
cgroup_parent: config.cgroup_parent,
|
||||
blkio_weight: config.blkio_weight,
|
||||
blkio_weight_device: config
|
||||
.blkio_weight_device
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|device| ResourcesBlkioWeightDevice {
|
||||
path: device.path,
|
||||
weight: device.weight,
|
||||
})
|
||||
.collect(),
|
||||
blkio_device_read_bps: config
|
||||
.blkio_device_read_bps
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|bp| ThrottleDevice {
|
||||
path: bp.path,
|
||||
rate: bp.rate,
|
||||
})
|
||||
.collect(),
|
||||
blkio_device_write_bps: config
|
||||
.blkio_device_write_bps
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|bp| ThrottleDevice {
|
||||
path: bp.path,
|
||||
rate: bp.rate,
|
||||
})
|
||||
.collect(),
|
||||
blkio_device_read_iops: config
|
||||
.blkio_device_read_iops
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|iops| ThrottleDevice {
|
||||
path: iops.path,
|
||||
rate: iops.rate,
|
||||
})
|
||||
.collect(),
|
||||
blkio_device_write_iops: config
|
||||
.blkio_device_write_iops
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|iops| ThrottleDevice {
|
||||
path: iops.path,
|
||||
rate: iops.rate,
|
||||
})
|
||||
.collect(),
|
||||
cpu_period: config.cpu_period,
|
||||
cpu_quota: config.cpu_quota,
|
||||
cpu_realtime_period: config.cpu_realtime_period,
|
||||
cpu_realtime_runtime: config.cpu_realtime_runtime,
|
||||
cpuset_cpus: config.cpuset_cpus,
|
||||
cpuset_mems: config.cpuset_mems,
|
||||
devices: config
|
||||
.devices
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|device| DeviceMapping {
|
||||
path_on_host: device.path_on_host,
|
||||
path_in_container: device.path_in_container,
|
||||
cgroup_permissions: device.cgroup_permissions,
|
||||
})
|
||||
.collect(),
|
||||
device_cgroup_rules: config
|
||||
.device_cgroup_rules
|
||||
.unwrap_or_default(),
|
||||
device_requests: config
|
||||
.device_requests
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|request| DeviceRequest {
|
||||
driver: request.driver,
|
||||
count: request.count,
|
||||
device_ids: request.device_ids.unwrap_or_default(),
|
||||
capabilities: request.capabilities.unwrap_or_default(),
|
||||
options: request.options.unwrap_or_default(),
|
||||
})
|
||||
.collect(),
|
||||
kernel_memory_tcp: config.kernel_memory_tcp,
|
||||
memory_reservation: config.memory_reservation,
|
||||
memory_swap: config.memory_swap,
|
||||
memory_swappiness: config.memory_swappiness,
|
||||
nano_cpus: config.nano_cpus,
|
||||
oom_kill_disable: config.oom_kill_disable,
|
||||
init: config.init,
|
||||
pids_limit: config.pids_limit,
|
||||
ulimits: config
|
||||
.ulimits
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|ulimit| ResourcesUlimits {
|
||||
name: ulimit.name,
|
||||
soft: ulimit.soft,
|
||||
hard: ulimit.hard,
|
||||
})
|
||||
.collect(),
|
||||
cpu_count: config.cpu_count,
|
||||
cpu_percent: config.cpu_percent,
|
||||
io_maximum_iops: config.io_maximum_iops,
|
||||
io_maximum_bandwidth: config.io_maximum_bandwidth,
|
||||
binds: config.binds.unwrap_or_default(),
|
||||
container_id_file: config.container_id_file,
|
||||
log_config: config.log_config.map(|config| {
|
||||
HostConfigLogConfig {
|
||||
typ: config.typ,
|
||||
config: config.config.unwrap_or_default(),
|
||||
}
|
||||
}),
|
||||
network_mode: config.network_mode,
|
||||
port_bindings: config
|
||||
.port_bindings
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k,
|
||||
v.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|v| PortBinding {
|
||||
host_ip: v.host_ip,
|
||||
host_port: v.host_port,
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
restart_policy: config.restart_policy.map(|policy| {
|
||||
RestartPolicy {
|
||||
name: policy
|
||||
.name
|
||||
.map(convert_restart_policy)
|
||||
.unwrap_or_default(),
|
||||
maximum_retry_count: policy.maximum_retry_count,
|
||||
}
|
||||
}),
|
||||
auto_remove: config.auto_remove,
|
||||
volume_driver: config.volume_driver,
|
||||
volumes_from: config.volumes_from.unwrap_or_default(),
|
||||
mounts: config
|
||||
.mounts
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|mount| ContainerMount {
|
||||
target: mount.target,
|
||||
source: mount.source,
|
||||
typ: mount
|
||||
.typ
|
||||
.map(convert_mount_type)
|
||||
.unwrap_or_default(),
|
||||
read_only: mount.read_only,
|
||||
consistency: mount.consistency,
|
||||
bind_options: mount.bind_options.map(|options| {
|
||||
MountBindOptions {
|
||||
propagation: options
|
||||
.propagation
|
||||
.map(convert_mount_propogation)
|
||||
.unwrap_or_default(),
|
||||
non_recursive: options.non_recursive,
|
||||
create_mountpoint: options.create_mountpoint,
|
||||
read_only_non_recursive: options
|
||||
.read_only_non_recursive,
|
||||
read_only_force_recursive: options
|
||||
.read_only_force_recursive,
|
||||
}
|
||||
}),
|
||||
volume_options: mount.volume_options.map(|options| {
|
||||
MountVolumeOptions {
|
||||
no_copy: options.no_copy,
|
||||
labels: options.labels.unwrap_or_default(),
|
||||
driver_config: options.driver_config.map(|config| {
|
||||
MountVolumeOptionsDriverConfig {
|
||||
name: config.name,
|
||||
options: config.options.unwrap_or_default(),
|
||||
}
|
||||
}),
|
||||
subpath: options.subpath,
|
||||
}
|
||||
}),
|
||||
tmpfs_options: mount.tmpfs_options.map(|options| {
|
||||
MountTmpfsOptions {
|
||||
size_bytes: options.size_bytes,
|
||||
mode: options.mode,
|
||||
}
|
||||
}),
|
||||
})
|
||||
.collect(),
|
||||
console_size: config
|
||||
.console_size
|
||||
.map(|v| v.into_iter().map(|s| s as i32).collect())
|
||||
.unwrap_or_default(),
|
||||
annotations: config.annotations.unwrap_or_default(),
|
||||
cap_add: config.cap_add.unwrap_or_default(),
|
||||
cap_drop: config.cap_drop.unwrap_or_default(),
|
||||
cgroupns_mode: config
|
||||
.cgroupns_mode
|
||||
.map(convert_cgroupns_mode),
|
||||
dns: config.dns.unwrap_or_default(),
|
||||
dns_options: config.dns_options.unwrap_or_default(),
|
||||
dns_search: config.dns_search.unwrap_or_default(),
|
||||
extra_hosts: config.extra_hosts.unwrap_or_default(),
|
||||
group_add: config.group_add.unwrap_or_default(),
|
||||
ipc_mode: config.ipc_mode,
|
||||
cgroup: config.cgroup,
|
||||
links: config.links.unwrap_or_default(),
|
||||
oom_score_adj: config.oom_score_adj,
|
||||
pid_mode: config.pid_mode,
|
||||
privileged: config.privileged,
|
||||
publish_all_ports: config.publish_all_ports,
|
||||
readonly_rootfs: config.readonly_rootfs,
|
||||
security_opt: config.security_opt.unwrap_or_default(),
|
||||
storage_opt: config.storage_opt.unwrap_or_default(),
|
||||
tmpfs: config.tmpfs.unwrap_or_default(),
|
||||
uts_mode: config.uts_mode,
|
||||
userns_mode: config.userns_mode,
|
||||
shm_size: config.shm_size,
|
||||
sysctls: config.sysctls.unwrap_or_default(),
|
||||
runtime: config.runtime,
|
||||
isolation: config
|
||||
.isolation
|
||||
.map(convert_isolation_mode)
|
||||
.unwrap_or_default(),
|
||||
masked_paths: config.masked_paths.unwrap_or_default(),
|
||||
readonly_paths: config.readonly_paths.unwrap_or_default(),
|
||||
}),
|
||||
graph_driver: container.graph_driver.map(|driver| {
|
||||
GraphDriverData {
|
||||
name: driver.name,
|
||||
data: driver.data,
|
||||
}
|
||||
}),
|
||||
size_rw: container.size_rw,
|
||||
size_root_fs: container.size_root_fs,
|
||||
mounts: container
|
||||
.mounts
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|mount| MountPoint {
|
||||
typ: mount
|
||||
.typ
|
||||
.map(convert_mount_point_type)
|
||||
.unwrap_or_default(),
|
||||
name: mount.name,
|
||||
source: mount.source,
|
||||
destination: mount.destination,
|
||||
driver: mount.driver,
|
||||
mode: mount.mode,
|
||||
rw: mount.rw,
|
||||
propagation: mount.propagation,
|
||||
})
|
||||
.collect(),
|
||||
config: container.config.map(|config| ContainerConfig {
|
||||
hostname: config.hostname,
|
||||
domainname: config.domainname,
|
||||
user: config.user,
|
||||
attach_stdin: config.attach_stdin,
|
||||
attach_stdout: config.attach_stdout,
|
||||
attach_stderr: config.attach_stderr,
|
||||
exposed_ports: config
|
||||
.exposed_ports
|
||||
.unwrap_or_default()
|
||||
.into_keys()
|
||||
.map(|k| (k, Default::default()))
|
||||
.collect(),
|
||||
tty: config.tty,
|
||||
open_stdin: config.open_stdin,
|
||||
stdin_once: config.stdin_once,
|
||||
env: config.env.unwrap_or_default(),
|
||||
cmd: config.cmd.unwrap_or_default(),
|
||||
healthcheck: config.healthcheck.map(|health| HealthConfig {
|
||||
test: health.test.unwrap_or_default(),
|
||||
interval: health.interval,
|
||||
timeout: health.timeout,
|
||||
retries: health.retries,
|
||||
start_period: health.start_period,
|
||||
start_interval: health.start_interval,
|
||||
}),
|
||||
args_escaped: config.args_escaped,
|
||||
image: config.image,
|
||||
volumes: config
|
||||
.volumes
|
||||
.unwrap_or_default()
|
||||
.into_keys()
|
||||
.map(|k| (k, Default::default()))
|
||||
.collect(),
|
||||
working_dir: config.working_dir,
|
||||
entrypoint: config.entrypoint.unwrap_or_default(),
|
||||
network_disabled: config.network_disabled,
|
||||
mac_address: config.mac_address,
|
||||
on_build: config.on_build.unwrap_or_default(),
|
||||
labels: config.labels.unwrap_or_default(),
|
||||
stop_signal: config.stop_signal,
|
||||
stop_timeout: config.stop_timeout,
|
||||
shell: config.shell.unwrap_or_default(),
|
||||
}),
|
||||
network_settings: container.network_settings.map(|settings| {
|
||||
NetworkSettings {
|
||||
bridge: settings.bridge,
|
||||
sandbox_id: settings.sandbox_id,
|
||||
ports: settings
|
||||
.ports
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k,
|
||||
v.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|v| PortBinding {
|
||||
host_ip: v.host_ip,
|
||||
host_port: v.host_port,
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
sandbox_key: settings.sandbox_key,
|
||||
networks: settings
|
||||
.networks
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
(
|
||||
k,
|
||||
EndpointSettings {
|
||||
ipam_config: v.ipam_config.map(|ipam| {
|
||||
EndpointIpamConfig {
|
||||
ipv4_address: ipam.ipv4_address,
|
||||
ipv6_address: ipam.ipv6_address,
|
||||
link_local_ips: ipam
|
||||
.link_local_ips
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
}),
|
||||
links: v.links.unwrap_or_default(),
|
||||
mac_address: v.mac_address,
|
||||
aliases: v.aliases.unwrap_or_default(),
|
||||
network_id: v.network_id,
|
||||
endpoint_id: v.endpoint_id,
|
||||
gateway: v.gateway,
|
||||
ip_address: v.ip_address,
|
||||
ip_prefix_len: v.ip_prefix_len,
|
||||
ipv6_gateway: v.ipv6_gateway,
|
||||
global_ipv6_address: v.global_ipv6_address,
|
||||
global_ipv6_prefix_len: v.global_ipv6_prefix_len,
|
||||
driver_opts: v.driver_opts.unwrap_or_default(),
|
||||
dns_names: v.dns_names.unwrap_or_default(),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_summary_container_state(
|
||||
state: bollard::secret::ContainerSummaryStateEnum,
|
||||
) -> ContainerStateStatusEnum {
|
||||
match state {
|
||||
bollard::secret::ContainerSummaryStateEnum::EMPTY => {
|
||||
ContainerStateStatusEnum::Empty
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::CREATED => {
|
||||
ContainerStateStatusEnum::Created
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::RUNNING => {
|
||||
ContainerStateStatusEnum::Running
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::PAUSED => {
|
||||
ContainerStateStatusEnum::Paused
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::RESTARTING => {
|
||||
ContainerStateStatusEnum::Restarting
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::EXITED => {
|
||||
ContainerStateStatusEnum::Exited
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::REMOVING => {
|
||||
ContainerStateStatusEnum::Removing
|
||||
}
|
||||
bollard::secret::ContainerSummaryStateEnum::DEAD => {
|
||||
ContainerStateStatusEnum::Dead
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_container_state_status(
|
||||
state: bollard::secret::ContainerStateStatusEnum,
|
||||
) -> ContainerStateStatusEnum {
|
||||
match state {
|
||||
bollard::secret::ContainerStateStatusEnum::EMPTY => {
|
||||
ContainerStateStatusEnum::Empty
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::CREATED => {
|
||||
ContainerStateStatusEnum::Created
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::RUNNING => {
|
||||
ContainerStateStatusEnum::Running
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::PAUSED => {
|
||||
ContainerStateStatusEnum::Paused
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::RESTARTING => {
|
||||
ContainerStateStatusEnum::Restarting
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::EXITED => {
|
||||
ContainerStateStatusEnum::Exited
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::REMOVING => {
|
||||
ContainerStateStatusEnum::Removing
|
||||
}
|
||||
bollard::secret::ContainerStateStatusEnum::DEAD => {
|
||||
ContainerStateStatusEnum::Dead
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_port_type(
|
||||
typ: bollard::secret::PortTypeEnum,
|
||||
) -> PortTypeEnum {
|
||||
match typ {
|
||||
bollard::secret::PortTypeEnum::EMPTY => PortTypeEnum::EMPTY,
|
||||
bollard::secret::PortTypeEnum::TCP => PortTypeEnum::TCP,
|
||||
bollard::secret::PortTypeEnum::UDP => PortTypeEnum::UDP,
|
||||
bollard::secret::PortTypeEnum::SCTP => PortTypeEnum::SCTP,
|
||||
}
|
||||
}
|
||||
fn convert_port(port: bollard::secret::Port) -> Port {
|
||||
Port {
|
||||
ip: port.ip,
|
||||
private_port: port.private_port,
|
||||
public_port: port.public_port,
|
||||
typ: port.typ.map(convert_port_type).unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_health_status(
|
||||
status: bollard::secret::HealthStatusEnum,
|
||||
) -> HealthStatusEnum {
|
||||
match status {
|
||||
bollard::secret::HealthStatusEnum::EMPTY => {
|
||||
HealthStatusEnum::Empty
|
||||
}
|
||||
bollard::secret::HealthStatusEnum::NONE => HealthStatusEnum::None,
|
||||
bollard::secret::HealthStatusEnum::STARTING => {
|
||||
HealthStatusEnum::Starting
|
||||
}
|
||||
bollard::secret::HealthStatusEnum::HEALTHY => {
|
||||
HealthStatusEnum::Healthy
|
||||
}
|
||||
bollard::secret::HealthStatusEnum::UNHEALTHY => {
|
||||
HealthStatusEnum::Unhealthy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_health_check_result(
|
||||
check: bollard::secret::HealthcheckResult,
|
||||
) -> HealthcheckResult {
|
||||
HealthcheckResult {
|
||||
start: check.start,
|
||||
end: check.end,
|
||||
exit_code: check.exit_code,
|
||||
output: check.output,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_restart_policy(
|
||||
policy: bollard::secret::RestartPolicyNameEnum,
|
||||
) -> RestartPolicyNameEnum {
|
||||
match policy {
|
||||
bollard::secret::RestartPolicyNameEnum::EMPTY => {
|
||||
RestartPolicyNameEnum::Empty
|
||||
}
|
||||
bollard::secret::RestartPolicyNameEnum::NO => {
|
||||
RestartPolicyNameEnum::No
|
||||
}
|
||||
bollard::secret::RestartPolicyNameEnum::ALWAYS => {
|
||||
RestartPolicyNameEnum::Always
|
||||
}
|
||||
bollard::secret::RestartPolicyNameEnum::UNLESS_STOPPED => {
|
||||
RestartPolicyNameEnum::UnlessStopped
|
||||
}
|
||||
bollard::secret::RestartPolicyNameEnum::ON_FAILURE => {
|
||||
RestartPolicyNameEnum::OnFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_mount_type(
|
||||
typ: bollard::secret::MountTypeEnum,
|
||||
) -> MountTypeEnum {
|
||||
match typ {
|
||||
bollard::secret::MountTypeEnum::EMPTY => MountTypeEnum::Empty,
|
||||
bollard::secret::MountTypeEnum::BIND => MountTypeEnum::Bind,
|
||||
bollard::secret::MountTypeEnum::VOLUME => MountTypeEnum::Volume,
|
||||
bollard::secret::MountTypeEnum::IMAGE => MountTypeEnum::Image,
|
||||
bollard::secret::MountTypeEnum::TMPFS => MountTypeEnum::Tmpfs,
|
||||
bollard::secret::MountTypeEnum::NPIPE => MountTypeEnum::Npipe,
|
||||
bollard::secret::MountTypeEnum::CLUSTER => MountTypeEnum::Cluster,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_mount_point_type(
|
||||
typ: bollard::secret::MountPointTypeEnum,
|
||||
) -> MountTypeEnum {
|
||||
match typ {
|
||||
bollard::secret::MountPointTypeEnum::EMPTY => {
|
||||
MountTypeEnum::Empty
|
||||
}
|
||||
bollard::secret::MountPointTypeEnum::BIND => MountTypeEnum::Bind,
|
||||
bollard::secret::MountPointTypeEnum::VOLUME => {
|
||||
MountTypeEnum::Volume
|
||||
}
|
||||
bollard::secret::MountPointTypeEnum::IMAGE => {
|
||||
MountTypeEnum::Image
|
||||
}
|
||||
bollard::secret::MountPointTypeEnum::TMPFS => {
|
||||
MountTypeEnum::Tmpfs
|
||||
}
|
||||
bollard::secret::MountPointTypeEnum::NPIPE => {
|
||||
MountTypeEnum::Npipe
|
||||
}
|
||||
bollard::secret::MountPointTypeEnum::CLUSTER => {
|
||||
MountTypeEnum::Cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_mount_propogation(
|
||||
propogation: bollard::secret::MountBindOptionsPropagationEnum,
|
||||
) -> MountBindOptionsPropagationEnum {
|
||||
match propogation {
|
||||
bollard::secret::MountBindOptionsPropagationEnum::EMPTY => {
|
||||
MountBindOptionsPropagationEnum::Empty
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::PRIVATE => {
|
||||
MountBindOptionsPropagationEnum::Private
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::RPRIVATE => {
|
||||
MountBindOptionsPropagationEnum::Rprivate
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::SHARED => {
|
||||
MountBindOptionsPropagationEnum::Shared
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::RSHARED => {
|
||||
MountBindOptionsPropagationEnum::Rshared
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::SLAVE => {
|
||||
MountBindOptionsPropagationEnum::Slave
|
||||
}
|
||||
bollard::secret::MountBindOptionsPropagationEnum::RSLAVE => {
|
||||
MountBindOptionsPropagationEnum::Rslave
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_cgroupns_mode(
|
||||
mode: bollard::secret::HostConfigCgroupnsModeEnum,
|
||||
) -> HostConfigCgroupnsModeEnum {
|
||||
match mode {
|
||||
bollard::secret::HostConfigCgroupnsModeEnum::EMPTY => {
|
||||
HostConfigCgroupnsModeEnum::Empty
|
||||
}
|
||||
bollard::secret::HostConfigCgroupnsModeEnum::PRIVATE => {
|
||||
HostConfigCgroupnsModeEnum::Private
|
||||
}
|
||||
bollard::secret::HostConfigCgroupnsModeEnum::HOST => {
|
||||
HostConfigCgroupnsModeEnum::Host
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_isolation_mode(
|
||||
isolation: bollard::secret::HostConfigIsolationEnum,
|
||||
) -> HostConfigIsolationEnum {
|
||||
match isolation {
|
||||
bollard::secret::HostConfigIsolationEnum::EMPTY => {
|
||||
HostConfigIsolationEnum::Empty
|
||||
}
|
||||
bollard::secret::HostConfigIsolationEnum::DEFAULT => {
|
||||
HostConfigIsolationEnum::Default
|
||||
}
|
||||
bollard::secret::HostConfigIsolationEnum::PROCESS => {
|
||||
HostConfigIsolationEnum::Process
|
||||
}
|
||||
bollard::secret::HostConfigIsolationEnum::HYPERV => {
|
||||
HostConfigIsolationEnum::Hyperv
|
||||
}
|
||||
}
|
||||
}
|
||||
143
bin/periphery/src/docker/images.rs
Normal file
143
bin/periphery/src/docker/images.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use bollard::query_parameters::ListImagesOptions;
|
||||
use komodo_client::entities::docker::{
|
||||
ContainerConfig, GraphDriverData, HealthConfig,
|
||||
container::ContainerListItem, image::*,
|
||||
};
|
||||
|
||||
use super::DockerClient;
|
||||
|
||||
impl DockerClient {
|
||||
pub async fn list_images(
|
||||
&self,
|
||||
containers: &[ContainerListItem],
|
||||
) -> anyhow::Result<Vec<ImageListItem>> {
|
||||
let images = self
|
||||
.docker
|
||||
.list_images(Option::<ListImagesOptions>::None)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|image| {
|
||||
let in_use = containers.iter().any(|container| {
|
||||
container
|
||||
.image_id
|
||||
.as_ref()
|
||||
.map(|id| id == &image.id)
|
||||
.unwrap_or_default()
|
||||
});
|
||||
ImageListItem {
|
||||
name: image
|
||||
.repo_tags
|
||||
.into_iter()
|
||||
.next()
|
||||
.unwrap_or_else(|| image.id.clone()),
|
||||
id: image.id,
|
||||
parent_id: image.parent_id,
|
||||
created: image.created,
|
||||
size: image.size,
|
||||
in_use,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(images)
|
||||
}
|
||||
|
||||
pub async fn inspect_image(
|
||||
&self,
|
||||
image_name: &str,
|
||||
) -> anyhow::Result<Image> {
|
||||
let image = self.docker.inspect_image(image_name).await?;
|
||||
Ok(Image {
|
||||
id: image.id,
|
||||
repo_tags: image.repo_tags.unwrap_or_default(),
|
||||
repo_digests: image.repo_digests.unwrap_or_default(),
|
||||
parent: image.parent,
|
||||
comment: image.comment,
|
||||
created: image.created,
|
||||
docker_version: image.docker_version,
|
||||
author: image.author,
|
||||
architecture: image.architecture,
|
||||
variant: image.variant,
|
||||
os: image.os,
|
||||
os_version: image.os_version,
|
||||
size: image.size,
|
||||
graph_driver: image.graph_driver.map(|driver| {
|
||||
GraphDriverData {
|
||||
name: driver.name,
|
||||
data: driver.data,
|
||||
}
|
||||
}),
|
||||
root_fs: image.root_fs.map(|fs| ImageInspectRootFs {
|
||||
typ: fs.typ,
|
||||
layers: fs.layers.unwrap_or_default(),
|
||||
}),
|
||||
metadata: image.metadata.map(|metadata| ImageInspectMetadata {
|
||||
last_tag_time: metadata.last_tag_time,
|
||||
}),
|
||||
config: image.config.map(|config| ContainerConfig {
|
||||
hostname: config.hostname,
|
||||
domainname: config.domainname,
|
||||
user: config.user,
|
||||
attach_stdin: config.attach_stdin,
|
||||
attach_stdout: config.attach_stdout,
|
||||
attach_stderr: config.attach_stderr,
|
||||
exposed_ports: config
|
||||
.exposed_ports
|
||||
.unwrap_or_default()
|
||||
.into_keys()
|
||||
.map(|k| (k, Default::default()))
|
||||
.collect(),
|
||||
tty: config.tty,
|
||||
open_stdin: config.open_stdin,
|
||||
stdin_once: config.stdin_once,
|
||||
env: config.env.unwrap_or_default(),
|
||||
cmd: config.cmd.unwrap_or_default(),
|
||||
healthcheck: config.healthcheck.map(|health| HealthConfig {
|
||||
test: health.test.unwrap_or_default(),
|
||||
interval: health.interval,
|
||||
timeout: health.timeout,
|
||||
retries: health.retries,
|
||||
start_period: health.start_period,
|
||||
start_interval: health.start_interval,
|
||||
}),
|
||||
args_escaped: config.args_escaped,
|
||||
image: config.image,
|
||||
volumes: config
|
||||
.volumes
|
||||
.unwrap_or_default()
|
||||
.into_keys()
|
||||
.map(|k| (k, Default::default()))
|
||||
.collect(),
|
||||
working_dir: config.working_dir,
|
||||
entrypoint: config.entrypoint.unwrap_or_default(),
|
||||
network_disabled: config.network_disabled,
|
||||
mac_address: config.mac_address,
|
||||
on_build: config.on_build.unwrap_or_default(),
|
||||
labels: config.labels.unwrap_or_default(),
|
||||
stop_signal: config.stop_signal,
|
||||
stop_timeout: config.stop_timeout,
|
||||
shell: config.shell.unwrap_or_default(),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn image_history(
|
||||
&self,
|
||||
image_name: &str,
|
||||
) -> anyhow::Result<Vec<ImageHistoryResponseItem>> {
|
||||
let res = self
|
||||
.docker
|
||||
.image_history(image_name)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|image| ImageHistoryResponseItem {
|
||||
id: image.id,
|
||||
created: image.created,
|
||||
created_by: image.created_by,
|
||||
tags: image.tags,
|
||||
size: image.size,
|
||||
comment: image.comment,
|
||||
})
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
89
bin/periphery/src/docker/mod.rs
Normal file
89
bin/periphery/src/docker/mod.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use bollard::Docker;
|
||||
use command::run_komodo_command;
|
||||
use komodo_client::entities::{TerminationSignal, update::Log};
|
||||
use run_command::async_run_command;
|
||||
|
||||
pub mod stats;
|
||||
|
||||
mod containers;
|
||||
mod images;
|
||||
mod networks;
|
||||
mod volumes;
|
||||
|
||||
pub fn docker_client() -> &'static DockerClient {
|
||||
static DOCKER_CLIENT: OnceLock<DockerClient> = OnceLock::new();
|
||||
DOCKER_CLIENT.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub struct DockerClient {
|
||||
docker: Docker,
|
||||
}
|
||||
|
||||
impl Default for DockerClient {
|
||||
fn default() -> DockerClient {
|
||||
DockerClient {
|
||||
docker: Docker::connect_with_defaults()
|
||||
.expect("failed to connect to docker daemon"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether build result should be pushed after build
|
||||
#[instrument(skip(registry_token))]
|
||||
pub async fn docker_login(
|
||||
domain: &str,
|
||||
account: &str,
|
||||
// For local token override from core.
|
||||
registry_token: Option<&str>,
|
||||
) -> anyhow::Result<bool> {
|
||||
if domain.is_empty() || account.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let registry_token = match registry_token {
|
||||
Some(token) => token,
|
||||
None => crate::helpers::registry_token(domain, account)?,
|
||||
};
|
||||
let log = async_run_command(&format!(
|
||||
"echo {registry_token} | docker login {domain} --username '{account}' --password-stdin",
|
||||
))
|
||||
.await;
|
||||
if log.success() {
|
||||
Ok(true)
|
||||
} else {
|
||||
let mut e = anyhow!("End of trace");
|
||||
for line in
|
||||
log.stderr.split('\n').filter(|line| !line.is_empty()).rev()
|
||||
{
|
||||
e = e.context(line.to_string());
|
||||
}
|
||||
for line in
|
||||
log.stdout.split('\n').filter(|line| !line.is_empty()).rev()
|
||||
{
|
||||
e = e.context(line.to_string());
|
||||
}
|
||||
Err(e.context(format!("Registry {domain} login error")))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn pull_image(image: &str) -> Log {
|
||||
let command = format!("docker pull {image}");
|
||||
run_komodo_command("Docker Pull", None, command).await
|
||||
}
|
||||
|
||||
pub fn stop_container_command(
|
||||
container_name: &str,
|
||||
signal: Option<TerminationSignal>,
|
||||
time: Option<i32>,
|
||||
) -> String {
|
||||
let signal = signal
|
||||
.map(|signal| format!(" --signal {signal}"))
|
||||
.unwrap_or_default();
|
||||
let time = time
|
||||
.map(|time| format!(" --time {time}"))
|
||||
.unwrap_or_default();
|
||||
format!("docker stop{signal}{time} {container_name}")
|
||||
}
|
||||
120
bin/periphery/src/docker/networks.rs
Normal file
120
bin/periphery/src/docker/networks.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use bollard::query_parameters::{
|
||||
InspectNetworkOptions, ListNetworksOptions,
|
||||
};
|
||||
use komodo_client::entities::docker::{
|
||||
container::ContainerListItem, network::*,
|
||||
};
|
||||
|
||||
use super::DockerClient;
|
||||
|
||||
impl DockerClient {
|
||||
pub async fn list_networks(
|
||||
&self,
|
||||
containers: &[ContainerListItem],
|
||||
) -> anyhow::Result<Vec<NetworkListItem>> {
|
||||
let networks = self
|
||||
.docker
|
||||
.list_networks(Option::<ListNetworksOptions>::None)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|network| {
|
||||
let (ipam_driver, ipam_subnet, ipam_gateway) =
|
||||
if let Some(ipam) = network.ipam {
|
||||
let (subnet, gateway) = if let Some(config) = ipam
|
||||
.config
|
||||
.and_then(|configs| configs.into_iter().next())
|
||||
{
|
||||
(config.subnet, config.gateway)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
(ipam.driver, subnet, gateway)
|
||||
} else {
|
||||
(None, None, None)
|
||||
};
|
||||
let in_use = match &network.name {
|
||||
Some(name) => containers.iter().any(|container| {
|
||||
container.networks.iter().any(|_name| name == _name)
|
||||
}),
|
||||
None => false,
|
||||
};
|
||||
NetworkListItem {
|
||||
name: network.name,
|
||||
id: network.id,
|
||||
created: network.created,
|
||||
scope: network.scope,
|
||||
driver: network.driver,
|
||||
enable_ipv6: network.enable_ipv6,
|
||||
ipam_driver,
|
||||
ipam_subnet,
|
||||
ipam_gateway,
|
||||
internal: network.internal,
|
||||
attachable: network.attachable,
|
||||
ingress: network.ingress,
|
||||
in_use,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(networks)
|
||||
}
|
||||
|
||||
pub async fn inspect_network(
|
||||
&self,
|
||||
network_name: &str,
|
||||
) -> anyhow::Result<Network> {
|
||||
let network = self
|
||||
.docker
|
||||
.inspect_network(
|
||||
network_name,
|
||||
InspectNetworkOptions {
|
||||
verbose: true,
|
||||
..Default::default()
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(Network {
|
||||
name: network.name,
|
||||
id: network.id,
|
||||
created: network.created,
|
||||
scope: network.scope,
|
||||
driver: network.driver,
|
||||
enable_ipv6: network.enable_ipv6,
|
||||
ipam: network.ipam.map(|ipam| Ipam {
|
||||
driver: ipam.driver,
|
||||
config: ipam
|
||||
.config
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|config| IpamConfig {
|
||||
subnet: config.subnet,
|
||||
ip_range: config.ip_range,
|
||||
gateway: config.gateway,
|
||||
auxiliary_addresses: config
|
||||
.auxiliary_addresses
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
.collect(),
|
||||
options: ipam.options.unwrap_or_default(),
|
||||
}),
|
||||
internal: network.internal,
|
||||
attachable: network.attachable,
|
||||
ingress: network.ingress,
|
||||
containers: network
|
||||
.containers
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|(container_id, container)| NetworkContainer {
|
||||
container_id,
|
||||
name: container.name,
|
||||
endpoint_id: container.endpoint_id,
|
||||
mac_address: container.mac_address,
|
||||
ipv4_address: container.ipv4_address,
|
||||
ipv6_address: container.ipv6_address,
|
||||
})
|
||||
.collect(),
|
||||
options: network.options.unwrap_or_default(),
|
||||
labels: network.labels.unwrap_or_default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
248
bin/periphery/src/docker/stats.rs
Normal file
248
bin/periphery/src/docker/stats.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use arc_swap::ArcSwap;
|
||||
use async_timing_util::wait_until_timelength;
|
||||
use bollard::{models, query_parameters::StatsOptionsBuilder};
|
||||
use futures::StreamExt;
|
||||
use komodo_client::entities::docker::{
|
||||
container::ContainerStats,
|
||||
stats::{
|
||||
ContainerBlkioStatEntry, ContainerBlkioStats, ContainerCpuStats,
|
||||
ContainerCpuUsage, ContainerMemoryStats, ContainerNetworkStats,
|
||||
ContainerPidsStats, ContainerStorageStats,
|
||||
ContainerThrottlingData, FullContainerStats,
|
||||
},
|
||||
};
|
||||
use run_command::async_run_command;
|
||||
|
||||
use crate::{config::periphery_config, docker::DockerClient};
|
||||
|
||||
pub type ContainerStatsMap = HashMap<String, ContainerStats>;
|
||||
|
||||
pub fn container_stats() -> &'static ArcSwap<ContainerStatsMap> {
|
||||
static CONTAINER_STATS: OnceLock<ArcSwap<ContainerStatsMap>> =
|
||||
OnceLock::new();
|
||||
CONTAINER_STATS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub fn spawn_polling_thread() {
|
||||
tokio::spawn(async move {
|
||||
let polling_rate = periphery_config()
|
||||
.container_stats_polling_rate
|
||||
.to_string()
|
||||
.parse()
|
||||
.expect("invalid stats polling rate");
|
||||
update_container_stats().await;
|
||||
loop {
|
||||
let _ts = wait_until_timelength(polling_rate, 200).await;
|
||||
update_container_stats().await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn update_container_stats() {
|
||||
match get_container_stats(None).await {
|
||||
Ok(stats) => {
|
||||
container_stats().store(Arc::new(
|
||||
stats.into_iter().map(|s| (s.name.clone(), s)).collect(),
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to refresh container stats cache | {e:#}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_container_stats(
|
||||
container_name: Option<String>,
|
||||
) -> anyhow::Result<Vec<ContainerStats>> {
|
||||
let format = "--format \"{{ json . }}\"";
|
||||
let container_name = match container_name {
|
||||
Some(name) => format!(" {name}"),
|
||||
None => "".to_string(),
|
||||
};
|
||||
let command =
|
||||
format!("docker stats{container_name} --no-stream {format}");
|
||||
let output = async_run_command(&command).await;
|
||||
if output.success() {
|
||||
output
|
||||
.stdout
|
||||
.split('\n')
|
||||
.filter(|e| !e.is_empty())
|
||||
.map(|e| {
|
||||
let parsed = serde_json::from_str(e)
|
||||
.context(format!("failed at parsing entry {e}"))?;
|
||||
Ok(parsed)
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
Err(anyhow!("{}", output.stderr.replace('\n', " | ")))
|
||||
}
|
||||
}
|
||||
|
||||
impl DockerClient {
|
||||
/// Calls for stats once, similar to --no-stream on the cli
|
||||
pub async fn full_container_stats(
|
||||
&self,
|
||||
container_name: &str,
|
||||
) -> anyhow::Result<FullContainerStats> {
|
||||
let mut res = self.docker.stats(
|
||||
container_name,
|
||||
StatsOptionsBuilder::new().stream(false).build().into(),
|
||||
);
|
||||
let stats = res
|
||||
.next()
|
||||
.await
|
||||
.with_context(|| format!("Unable to get container stats for {container_name} (got None)"))?
|
||||
.with_context(|| format!("Unable to get container stats for {container_name}"))?;
|
||||
Ok(FullContainerStats {
|
||||
name: stats.name.unwrap_or(container_name.to_string()),
|
||||
id: stats.id,
|
||||
read: stats.read,
|
||||
preread: stats.preread,
|
||||
pids_stats: stats.pids_stats.map(convert_pids_stats),
|
||||
blkio_stats: stats.blkio_stats.map(convert_blkio_stats),
|
||||
num_procs: stats.num_procs,
|
||||
storage_stats: stats.storage_stats.map(convert_storage_stats),
|
||||
cpu_stats: stats.cpu_stats.map(convert_cpu_stats),
|
||||
precpu_stats: stats.precpu_stats.map(convert_cpu_stats),
|
||||
memory_stats: stats.memory_stats.map(convert_memory_stats),
|
||||
networks: stats.networks.map(convert_network_stats),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_pids_stats(
|
||||
pids_stats: models::ContainerPidsStats,
|
||||
) -> ContainerPidsStats {
|
||||
ContainerPidsStats {
|
||||
current: pids_stats.current,
|
||||
limit: pids_stats.limit,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_blkio_stats(
|
||||
blkio_stats: models::ContainerBlkioStats,
|
||||
) -> ContainerBlkioStats {
|
||||
ContainerBlkioStats {
|
||||
io_service_bytes_recursive: blkio_stats
|
||||
.io_service_bytes_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_serviced_recursive: blkio_stats
|
||||
.io_serviced_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_queue_recursive: blkio_stats
|
||||
.io_queue_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_service_time_recursive: blkio_stats
|
||||
.io_service_time_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_wait_time_recursive: blkio_stats
|
||||
.io_wait_time_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_merged_recursive: blkio_stats
|
||||
.io_merged_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
io_time_recursive: blkio_stats
|
||||
.io_time_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
sectors_recursive: blkio_stats
|
||||
.sectors_recursive
|
||||
.map(convert_blkio_stat_entries),
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_blkio_stat_entries(
|
||||
blkio_stat_entries: Vec<models::ContainerBlkioStatEntry>,
|
||||
) -> Vec<ContainerBlkioStatEntry> {
|
||||
blkio_stat_entries
|
||||
.into_iter()
|
||||
.map(|blkio_stat_entry| ContainerBlkioStatEntry {
|
||||
major: blkio_stat_entry.major,
|
||||
minor: blkio_stat_entry.minor,
|
||||
op: blkio_stat_entry.op,
|
||||
value: blkio_stat_entry.value,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn convert_storage_stats(
|
||||
storage_stats: models::ContainerStorageStats,
|
||||
) -> ContainerStorageStats {
|
||||
ContainerStorageStats {
|
||||
read_count_normalized: storage_stats.read_count_normalized,
|
||||
read_size_bytes: storage_stats.read_size_bytes,
|
||||
write_count_normalized: storage_stats.write_count_normalized,
|
||||
write_size_bytes: storage_stats.write_size_bytes,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_cpu_stats(
|
||||
cpu_stats: models::ContainerCpuStats,
|
||||
) -> ContainerCpuStats {
|
||||
ContainerCpuStats {
|
||||
cpu_usage: cpu_stats.cpu_usage.map(convert_cpu_usage),
|
||||
system_cpu_usage: cpu_stats.system_cpu_usage,
|
||||
online_cpus: cpu_stats.online_cpus,
|
||||
throttling_data: cpu_stats
|
||||
.throttling_data
|
||||
.map(convert_cpu_throttling_data),
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_cpu_usage(
|
||||
cpu_usage: models::ContainerCpuUsage,
|
||||
) -> ContainerCpuUsage {
|
||||
ContainerCpuUsage {
|
||||
total_usage: cpu_usage.total_usage,
|
||||
percpu_usage: cpu_usage.percpu_usage,
|
||||
usage_in_kernelmode: cpu_usage.usage_in_kernelmode,
|
||||
usage_in_usermode: cpu_usage.usage_in_usermode,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_cpu_throttling_data(
|
||||
cpu_throttling_data: models::ContainerThrottlingData,
|
||||
) -> ContainerThrottlingData {
|
||||
ContainerThrottlingData {
|
||||
periods: cpu_throttling_data.periods,
|
||||
throttled_periods: cpu_throttling_data.throttled_periods,
|
||||
throttled_time: cpu_throttling_data.throttled_time,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_memory_stats(
|
||||
memory_stats: models::ContainerMemoryStats,
|
||||
) -> ContainerMemoryStats {
|
||||
ContainerMemoryStats {
|
||||
usage: memory_stats.usage,
|
||||
max_usage: memory_stats.max_usage,
|
||||
stats: memory_stats.stats,
|
||||
failcnt: memory_stats.failcnt,
|
||||
limit: memory_stats.limit,
|
||||
commitbytes: memory_stats.commitbytes,
|
||||
commitpeakbytes: memory_stats.commitpeakbytes,
|
||||
privateworkingset: memory_stats.privateworkingset,
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_network_stats(
|
||||
network_stats: models::ContainerNetworkStats,
|
||||
) -> ContainerNetworkStats {
|
||||
ContainerNetworkStats {
|
||||
rx_bytes: network_stats.rx_bytes,
|
||||
rx_packets: network_stats.rx_packets,
|
||||
rx_errors: network_stats.rx_errors,
|
||||
rx_dropped: network_stats.rx_dropped,
|
||||
tx_bytes: network_stats.tx_bytes,
|
||||
tx_packets: network_stats.tx_packets,
|
||||
tx_errors: network_stats.tx_errors,
|
||||
tx_dropped: network_stats.tx_dropped,
|
||||
endpoint_id: network_stats.endpoint_id,
|
||||
instance_id: network_stats.instance_id,
|
||||
}
|
||||
}
|
||||
155
bin/periphery/src/docker/volumes.rs
Normal file
155
bin/periphery/src/docker/volumes.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use bollard::query_parameters::ListVolumesOptions;
|
||||
use komodo_client::entities::docker::{
|
||||
PortBinding, container::ContainerListItem, volume::*,
|
||||
};
|
||||
|
||||
use crate::docker::DockerClient;
|
||||
|
||||
impl DockerClient {
|
||||
pub async fn list_volumes(
|
||||
&self,
|
||||
containers: &[ContainerListItem],
|
||||
) -> anyhow::Result<Vec<VolumeListItem>> {
|
||||
let volumes = self
|
||||
.docker
|
||||
.list_volumes(Option::<ListVolumesOptions>::None)
|
||||
.await?
|
||||
.volumes
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|volume| {
|
||||
let scope = volume
|
||||
.scope
|
||||
.map(|scope| match scope {
|
||||
bollard::secret::VolumeScopeEnum::EMPTY => {
|
||||
VolumeScopeEnum::Empty
|
||||
}
|
||||
bollard::secret::VolumeScopeEnum::LOCAL => {
|
||||
VolumeScopeEnum::Local
|
||||
}
|
||||
bollard::secret::VolumeScopeEnum::GLOBAL => {
|
||||
VolumeScopeEnum::Global
|
||||
}
|
||||
})
|
||||
.unwrap_or(VolumeScopeEnum::Empty);
|
||||
let in_use = containers.iter().any(|container| {
|
||||
container.volumes.iter().any(|name| &volume.name == name)
|
||||
});
|
||||
VolumeListItem {
|
||||
name: volume.name,
|
||||
driver: volume.driver,
|
||||
mountpoint: volume.mountpoint,
|
||||
created: volume.created_at,
|
||||
size: volume.usage_data.map(|data| data.size),
|
||||
scope,
|
||||
in_use,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
Ok(volumes)
|
||||
}
|
||||
|
||||
pub async fn inspect_volume(
|
||||
&self,
|
||||
volume_name: &str,
|
||||
) -> anyhow::Result<Volume> {
|
||||
let volume = self.docker.inspect_volume(volume_name).await?;
|
||||
Ok(Volume {
|
||||
name: volume.name,
|
||||
driver: volume.driver,
|
||||
mountpoint: volume.mountpoint,
|
||||
created_at: volume.created_at,
|
||||
status: volume.status.unwrap_or_default().into_keys().map(|k| (k, Default::default())).collect(),
|
||||
labels: volume.labels,
|
||||
scope: volume
|
||||
.scope
|
||||
.map(|scope| match scope {
|
||||
bollard::secret::VolumeScopeEnum::EMPTY => {
|
||||
VolumeScopeEnum::Empty
|
||||
}
|
||||
bollard::secret::VolumeScopeEnum::LOCAL => {
|
||||
VolumeScopeEnum::Local
|
||||
}
|
||||
bollard::secret::VolumeScopeEnum::GLOBAL => {
|
||||
VolumeScopeEnum::Global
|
||||
}
|
||||
})
|
||||
.unwrap_or_default(),
|
||||
cluster_volume: volume.cluster_volume.map(|volume| {
|
||||
ClusterVolume {
|
||||
id: volume.id,
|
||||
version: volume.version.map(|version| ObjectVersion {
|
||||
index: version.index,
|
||||
}),
|
||||
created_at: volume.created_at,
|
||||
updated_at: volume.updated_at,
|
||||
spec: volume.spec.map(|spec| ClusterVolumeSpec {
|
||||
group: spec.group,
|
||||
access_mode: spec.access_mode.map(|mode| {
|
||||
ClusterVolumeSpecAccessMode {
|
||||
scope: mode.scope.map(|scope| match scope {
|
||||
bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::EMPTY => ClusterVolumeSpecAccessModeScopeEnum::Empty,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::SINGLE => ClusterVolumeSpecAccessModeScopeEnum::Single,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeScopeEnum::MULTI => ClusterVolumeSpecAccessModeScopeEnum::Multi,
|
||||
}).unwrap_or_default(),
|
||||
sharing: mode.sharing.map(|sharing| match sharing {
|
||||
bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::EMPTY => ClusterVolumeSpecAccessModeSharingEnum::Empty,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::NONE => ClusterVolumeSpecAccessModeSharingEnum::None,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::READONLY => ClusterVolumeSpecAccessModeSharingEnum::Readonly,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::ONEWRITER => ClusterVolumeSpecAccessModeSharingEnum::Onewriter,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeSharingEnum::ALL => ClusterVolumeSpecAccessModeSharingEnum::All,
|
||||
}).unwrap_or_default(),
|
||||
secrets: mode.secrets.unwrap_or_default().into_iter().map(|secret| ClusterVolumeSpecAccessModeSecrets {
|
||||
key: secret.key,
|
||||
secret: secret.secret,
|
||||
}).collect(),
|
||||
accessibility_requirements: mode
|
||||
.accessibility_requirements.map(|req| ClusterVolumeSpecAccessModeAccessibilityRequirements {
|
||||
requisite: req.requisite.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(),
|
||||
preferred: req.preferred.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(),
|
||||
}),
|
||||
capacity_range: mode.capacity_range.map(|range| ClusterVolumeSpecAccessModeCapacityRange {
|
||||
required_bytes: range.required_bytes,
|
||||
limit_bytes: range.limit_bytes,
|
||||
}),
|
||||
availability: mode.availability.map(|availability| match availability {
|
||||
bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::EMPTY => ClusterVolumeSpecAccessModeAvailabilityEnum::Empty,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::ACTIVE => ClusterVolumeSpecAccessModeAvailabilityEnum::Active,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::PAUSE => ClusterVolumeSpecAccessModeAvailabilityEnum::Pause,
|
||||
bollard::secret::ClusterVolumeSpecAccessModeAvailabilityEnum::DRAIN => ClusterVolumeSpecAccessModeAvailabilityEnum::Drain,
|
||||
}).unwrap_or_default(),
|
||||
}
|
||||
}),
|
||||
}),
|
||||
info: volume.info.map(|info| ClusterVolumeInfo {
|
||||
capacity_bytes: info.capacity_bytes,
|
||||
volume_context: info.volume_context.unwrap_or_default(),
|
||||
volume_id: info.volume_id,
|
||||
accessible_topology: info.accessible_topology.unwrap_or_default().into_iter().map(|map| map.into_iter().map(|(k, v)| (k, v.unwrap_or_default().into_iter().map(|p| PortBinding { host_ip: p.host_ip, host_port: p.host_port }).collect())).collect()).collect(),
|
||||
}),
|
||||
publish_status: volume
|
||||
.publish_status
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.map(|status| ClusterVolumePublishStatus {
|
||||
node_id: status.node_id,
|
||||
state: status.state.map(|state| match state {
|
||||
bollard::secret::ClusterVolumePublishStatusStateEnum::EMPTY => ClusterVolumePublishStatusStateEnum::Empty,
|
||||
bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_PUBLISH => ClusterVolumePublishStatusStateEnum::PendingPublish,
|
||||
bollard::secret::ClusterVolumePublishStatusStateEnum::PUBLISHED => ClusterVolumePublishStatusStateEnum::Published,
|
||||
bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_NODE_UNPUBLISH => ClusterVolumePublishStatusStateEnum::PendingNodeUnpublish,
|
||||
bollard::secret::ClusterVolumePublishStatusStateEnum::PENDING_CONTROLLER_UNPUBLISH => ClusterVolumePublishStatusStateEnum::PendingControllerUnpublish,
|
||||
}).unwrap_or_default(),
|
||||
publish_context: status.publish_context.unwrap_or_default(),
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}),
|
||||
options: volume.options,
|
||||
usage_data: volume.usage_data.map(|data| VolumeUsageData {
|
||||
size: data.size,
|
||||
ref_count: data.ref_count,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
94
bin/periphery/src/git.rs
Normal file
94
bin/periphery/src/git.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use command::run_komodo_command_with_sanitization;
|
||||
use environment::write_env_file;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
EnvironmentVar, RepoExecutionResponse, SystemCommand,
|
||||
all_logs_success,
|
||||
};
|
||||
use periphery_client::api::git::PeripheryRepoExecutionResponse;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
|
||||
pub async fn handle_post_repo_execution(
|
||||
mut res: RepoExecutionResponse,
|
||||
mut environment: Vec<EnvironmentVar>,
|
||||
env_file_path: &str,
|
||||
mut on_clone: Option<SystemCommand>,
|
||||
mut on_pull: Option<SystemCommand>,
|
||||
skip_secret_interp: bool,
|
||||
mut replacers: Vec<(String, String)>,
|
||||
) -> anyhow::Result<PeripheryRepoExecutionResponse> {
|
||||
if !skip_secret_interp {
|
||||
let mut interpolotor =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
interpolotor.interpolate_env_vars(&mut environment)?;
|
||||
if let Some(on_clone) = on_clone.as_mut() {
|
||||
interpolotor.interpolate_string(&mut on_clone.command)?;
|
||||
}
|
||||
if let Some(on_pull) = on_pull.as_mut() {
|
||||
interpolotor.interpolate_string(&mut on_pull.command)?;
|
||||
}
|
||||
replacers.extend(interpolotor.secret_replacers);
|
||||
}
|
||||
|
||||
let env_file_path = write_env_file(
|
||||
&environment,
|
||||
&res.path,
|
||||
env_file_path,
|
||||
&mut res.logs,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut res = PeripheryRepoExecutionResponse { res, env_file_path };
|
||||
|
||||
if let Some(on_clone) = on_clone {
|
||||
if !on_clone.is_none() {
|
||||
let path = res
|
||||
.res
|
||||
.path
|
||||
.join(on_clone.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
if let Some(log) = run_komodo_command_with_sanitization(
|
||||
"On Clone",
|
||||
path.as_path(),
|
||||
on_clone.command,
|
||||
true,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.res.logs.push(log);
|
||||
if !all_logs_success(&res.res.logs) {
|
||||
return Ok(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(on_pull) = on_pull {
|
||||
if !on_pull.is_none() {
|
||||
let path = res
|
||||
.res
|
||||
.path
|
||||
.join(on_pull.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
if let Some(log) = run_komodo_command_with_sanitization(
|
||||
"On Pull",
|
||||
path.as_path(),
|
||||
on_pull.command,
|
||||
true,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
{
|
||||
res.res.logs.push(log);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
@@ -1,19 +1,12 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
entities::{
|
||||
CloneArgs, EnvironmentVar, SearchCombinator, repo::Repo,
|
||||
stack::Stack, to_path_compatible_name,
|
||||
},
|
||||
entities::{EnvironmentVar, RepoExecutionArgs, SearchCombinator},
|
||||
parsers::QUOTE_PATTERN,
|
||||
};
|
||||
use periphery_client::api::git::PullOrCloneRepo;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
|
||||
pub fn git_token(
|
||||
pub fn git_token_simple(
|
||||
domain: &str,
|
||||
account_username: &str,
|
||||
) -> anyhow::Result<&'static str> {
|
||||
@@ -24,7 +17,21 @@ pub fn git_token(
|
||||
.and_then(|provider| {
|
||||
provider.accounts.iter().find(|account| account.username == account_username).map(|account| account.token.as_str())
|
||||
})
|
||||
.with_context(|| format!("did not find token in config for git account {account_username} | domain {domain}"))
|
||||
.with_context(|| format!("Did not find token in config for git account {account_username} | domain {domain}"))
|
||||
}
|
||||
|
||||
pub fn git_token(
|
||||
core_token: Option<String>,
|
||||
args: &RepoExecutionArgs,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if core_token.is_some() {
|
||||
return Ok(core_token);
|
||||
}
|
||||
let Some(account) = &args.account else {
|
||||
return Ok(None);
|
||||
};
|
||||
let token = git_token_simple(&args.provider, account)?;
|
||||
Ok(Some(token.to_string()))
|
||||
}
|
||||
|
||||
pub fn registry_token(
|
||||
@@ -85,74 +92,3 @@ pub fn log_grep(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns path to root directory of the stack repo.
|
||||
pub async fn pull_or_clone_stack(
|
||||
stack: &Stack,
|
||||
repo: Option<&Repo>,
|
||||
git_token: Option<String>,
|
||||
) -> anyhow::Result<PathBuf> {
|
||||
if stack.config.files_on_host {
|
||||
return Err(anyhow!(
|
||||
"Wrong method called for files on host stack"
|
||||
));
|
||||
}
|
||||
if repo.is_none() && stack.config.repo.is_empty() {
|
||||
return Err(anyhow!("Repo is not configured"));
|
||||
}
|
||||
|
||||
let (root, mut args) = if let Some(repo) = repo {
|
||||
let root = periphery_config()
|
||||
.repo_dir()
|
||||
.join(to_path_compatible_name(&repo.name))
|
||||
.join(&repo.config.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: CloneArgs = repo.into();
|
||||
(root, args)
|
||||
} else {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.join(&stack.config.clone_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: CloneArgs = stack.into();
|
||||
(root, args)
|
||||
};
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
let git_token = match git_token {
|
||||
Some(token) => Some(token),
|
||||
None => {
|
||||
if let Some(account) = &args.account {
|
||||
match crate::helpers::git_token(&args.provider, account) {
|
||||
Ok(token) => Some(token.to_string()),
|
||||
Err(e) => {
|
||||
return Err(
|
||||
e.context("Failed to find required git token"),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: vec![],
|
||||
env_file_path: stack.config.env_file_path.clone(),
|
||||
skip_secret_interp: stack.config.skip_secret_interp,
|
||||
// repo replacer only needed for on_clone / on_pull,
|
||||
// which aren't available for stacks
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
.map_err(|e| e.error)?;
|
||||
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
@@ -9,9 +9,11 @@ use axum_server::tls_rustls::RustlsConfig;
|
||||
use config::periphery_config;
|
||||
|
||||
mod api;
|
||||
mod build;
|
||||
mod compose;
|
||||
mod config;
|
||||
mod docker;
|
||||
mod git;
|
||||
mod helpers;
|
||||
mod ssl;
|
||||
mod stats;
|
||||
@@ -30,7 +32,8 @@ async fn app() -> anyhow::Result<()> {
|
||||
info!("{:?}", config.sanitized());
|
||||
}
|
||||
|
||||
stats::spawn_system_stats_polling_thread();
|
||||
stats::spawn_polling_thread();
|
||||
docker::stats::spawn_polling_thread();
|
||||
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
|
||||
@@ -4,7 +4,7 @@ use async_timing_util::wait_until_timelength;
|
||||
use komodo_client::entities::stats::{
|
||||
SingleDiskUsage, SystemInformation, SystemProcess, SystemStats,
|
||||
};
|
||||
use sysinfo::{ProcessesToUpdate, System};
|
||||
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate, System};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
@@ -17,7 +17,7 @@ pub fn stats_client() -> &'static RwLock<StatsClient> {
|
||||
|
||||
/// This should be called before starting the server in main.rs.
|
||||
/// Keeps the cached stats up to date
|
||||
pub fn spawn_system_stats_polling_thread() {
|
||||
pub fn spawn_polling_thread() {
|
||||
tokio::spawn(async move {
|
||||
let polling_rate = periphery_config()
|
||||
.stats_polling_rate
|
||||
@@ -74,7 +74,11 @@ impl StatsClient {
|
||||
fn refresh(&mut self) {
|
||||
self.system.refresh_cpu_all();
|
||||
self.system.refresh_memory();
|
||||
self.system.refresh_processes(ProcessesToUpdate::All, true);
|
||||
self.system.refresh_processes_specifics(
|
||||
ProcessesToUpdate::All,
|
||||
true,
|
||||
ProcessRefreshKind::everything().without_tasks(),
|
||||
);
|
||||
self.disks.refresh(true);
|
||||
self.networks.refresh(true);
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
deserializers::string_list_deserializer,
|
||||
entities::{resource::TagBehavior, schedule::Schedule},
|
||||
entities::{resource::TagQueryBehavior, schedule::Schedule},
|
||||
};
|
||||
|
||||
use super::KomodoReadRequest;
|
||||
@@ -25,7 +25,7 @@ pub struct ListSchedules {
|
||||
pub tags: Vec<String>,
|
||||
/// 'All' or 'Any'
|
||||
#[serde(default)]
|
||||
pub tag_behavior: TagBehavior,
|
||||
pub tag_behavior: TagQueryBehavior,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
use derive_empty_traits::EmptyTraits;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::{NoData, ResourceTarget};
|
||||
|
||||
use super::KomodoWriteRequest;
|
||||
|
||||
/// Update a resources description.
|
||||
/// Response: [NoData].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(UpdateDescriptionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct UpdateDescription {
|
||||
/// The target resource to set description for.
|
||||
pub target: ResourceTarget,
|
||||
/// The new description.
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type UpdateDescriptionResponse = NoData;
|
||||
@@ -4,11 +4,11 @@ mod api_key;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod description;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod resource;
|
||||
mod server;
|
||||
mod stack;
|
||||
mod sync;
|
||||
@@ -23,11 +23,11 @@ pub use api_key::*;
|
||||
pub use build::*;
|
||||
pub use builder::*;
|
||||
pub use deployment::*;
|
||||
pub use description::*;
|
||||
pub use permissions::*;
|
||||
pub use procedure::*;
|
||||
pub use provider::*;
|
||||
pub use repo::*;
|
||||
pub use resource::*;
|
||||
pub use server::*;
|
||||
pub use stack::*;
|
||||
pub use sync::*;
|
||||
|
||||
37
client/core/rs/src/api/write/resource.rs
Normal file
37
client/core/rs/src/api/write/resource.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use derive_empty_traits::EmptyTraits;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::{NoData, ResourceTarget};
|
||||
|
||||
use super::KomodoWriteRequest;
|
||||
|
||||
/// Update a resources common meta fields.
|
||||
/// - description
|
||||
/// - template
|
||||
/// - tags
|
||||
/// Response: [NoData].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(UpdateResourceMetaResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct UpdateResourceMeta {
|
||||
/// The target resource to set update meta.
|
||||
pub target: ResourceTarget,
|
||||
/// New description to set,
|
||||
/// or null for no update
|
||||
pub description: Option<String>,
|
||||
/// New template value (true or false),
|
||||
/// or null for no update
|
||||
pub template: Option<bool>,
|
||||
/// The exact tags to set,
|
||||
/// or null for no update
|
||||
pub tags: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type UpdateResourceMetaResponse = NoData;
|
||||
@@ -31,6 +31,24 @@ pub struct CreateServer {
|
||||
|
||||
//
|
||||
|
||||
/// Creates a new server with given `name` and the configuration
|
||||
/// of the server at the given `id`. Response: [Server].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(Server)]
|
||||
#[error(serror::Error)]
|
||||
pub struct CopyServer {
|
||||
/// The name of the new server.
|
||||
pub name: String,
|
||||
/// The id of the server to copy.
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Deletes the server at the given id, and returns the deleted server.
|
||||
/// Response: [Server]
|
||||
#[typeshare]
|
||||
|
||||
@@ -3,10 +3,7 @@ use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::{
|
||||
NoData, ResourceTarget,
|
||||
tag::{Tag, TagColor},
|
||||
};
|
||||
use crate::entities::tag::{Tag, TagColor};
|
||||
|
||||
use super::KomodoWriteRequest;
|
||||
|
||||
@@ -73,25 +70,3 @@ pub struct UpdateTagColor {
|
||||
/// The new color for the tag.
|
||||
pub color: TagColor,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Update the tags on a resource.
|
||||
/// Response: [NoData]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(UpdateTagsOnResourceResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct UpdateTagsOnResource {
|
||||
pub target: ResourceTarget,
|
||||
/// Tag Ids
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type UpdateTagsOnResourceResponse = NoData;
|
||||
|
||||
//
|
||||
|
||||
@@ -130,6 +130,8 @@ pub struct Env {
|
||||
pub periphery_disable_container_exec: Option<bool>,
|
||||
/// Override `stats_polling_rate`
|
||||
pub periphery_stats_polling_rate: Option<Timelength>,
|
||||
/// Override `container_stats_polling_rate`
|
||||
pub periphery_container_stats_polling_rate: Option<Timelength>,
|
||||
/// Override `legacy_compose_cli`
|
||||
pub periphery_legacy_compose_cli: Option<bool>,
|
||||
|
||||
@@ -222,10 +224,17 @@ pub struct PeripheryConfig {
|
||||
pub disable_container_exec: bool,
|
||||
|
||||
/// The rate at which the system stats will be polled to update the cache.
|
||||
/// Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
/// Default: `5-sec`
|
||||
#[serde(default = "default_stats_polling_rate")]
|
||||
pub stats_polling_rate: Timelength,
|
||||
|
||||
/// The rate at which the container stats will be polled to update the cache.
|
||||
/// Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
/// Default: `30-sec`
|
||||
#[serde(default = "default_container_stats_polling_rate")]
|
||||
pub container_stats_polling_rate: Timelength,
|
||||
|
||||
/// Whether stack actions should use `docker-compose ...`
|
||||
/// instead of `docker compose ...`.
|
||||
/// Default: false
|
||||
@@ -308,6 +317,10 @@ fn default_stats_polling_rate() -> Timelength {
|
||||
Timelength::FiveSeconds
|
||||
}
|
||||
|
||||
fn default_container_stats_polling_rate() -> Timelength {
|
||||
Timelength::ThirtySeconds
|
||||
}
|
||||
|
||||
fn default_ssl_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
@@ -324,6 +337,8 @@ impl Default for PeripheryConfig {
|
||||
disable_terminals: Default::default(),
|
||||
disable_container_exec: Default::default(),
|
||||
stats_polling_rate: default_stats_polling_rate(),
|
||||
container_stats_polling_rate:
|
||||
default_container_stats_polling_rate(),
|
||||
legacy_compose_cli: Default::default(),
|
||||
logging: Default::default(),
|
||||
pretty_startup_config: Default::default(),
|
||||
@@ -353,6 +368,7 @@ impl PeripheryConfig {
|
||||
disable_terminals: self.disable_terminals,
|
||||
disable_container_exec: self.disable_container_exec,
|
||||
stats_polling_rate: self.stats_polling_rate,
|
||||
container_stats_polling_rate: self.container_stats_polling_rate,
|
||||
legacy_compose_cli: self.legacy_compose_cli,
|
||||
logging: self.logging.clone(),
|
||||
pretty_startup_config: self.pretty_startup_config,
|
||||
|
||||
@@ -16,6 +16,7 @@ use crate::{
|
||||
option_string_list_deserializer, option_term_labels_deserializer,
|
||||
string_list_deserializer, term_labels_deserializer,
|
||||
},
|
||||
entities::{EnvironmentVar, environment_vars_from_str},
|
||||
parsers::parse_key_value_list,
|
||||
};
|
||||
|
||||
@@ -208,6 +209,11 @@ impl DeploymentConfig {
|
||||
pub fn builder() -> DeploymentConfigBuilder {
|
||||
DeploymentConfigBuilder::default()
|
||||
}
|
||||
|
||||
pub fn env_vars(&self) -> anyhow::Result<Vec<EnvironmentVar>> {
|
||||
environment_vars_from_str(&self.environment)
|
||||
.context("Invalid environment")
|
||||
}
|
||||
}
|
||||
|
||||
fn default_send_alerts() -> bool {
|
||||
|
||||
@@ -8,6 +8,7 @@ use crate::entities::{I64, Usize};
|
||||
|
||||
use super::{ContainerConfig, GraphDriverData, PortBinding};
|
||||
|
||||
/// Container summary returned by container list apis.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
@@ -37,8 +38,12 @@ pub struct ContainerListItem {
|
||||
pub network_mode: Option<String>,
|
||||
/// The network names attached to container
|
||||
pub networks: Vec<String>,
|
||||
/// Port mappings for the container
|
||||
pub ports: Vec<Port>,
|
||||
/// The volume names attached to container
|
||||
pub volumes: Vec<String>,
|
||||
/// The container stats, if they can be retreived.
|
||||
pub stats: Option<ContainerStats>,
|
||||
/// The labels attached to container.
|
||||
/// It's too big to send with container list,
|
||||
/// can get it using InspectContainer
|
||||
@@ -1151,7 +1156,7 @@ pub struct EndpointIpamConfig {
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ContainerStats {
|
||||
#[serde(alias = "Name")]
|
||||
pub name: String,
|
||||
|
||||
@@ -8,6 +8,7 @@ use super::I64;
|
||||
pub mod container;
|
||||
pub mod image;
|
||||
pub mod network;
|
||||
pub mod stats;
|
||||
pub mod volume;
|
||||
|
||||
/// PortBinding represents a binding between a host IP address and a host port.
|
||||
|
||||
304
client/core/rs/src/entities/docker/stats.rs
Normal file
304
client/core/rs/src/entities/docker/stats.rs
Normal file
@@ -0,0 +1,304 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::U64;
|
||||
|
||||
/// Statistics sample for a container.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct FullContainerStats {
|
||||
/// Name of the container
|
||||
pub name: String,
|
||||
|
||||
/// ID of the container
|
||||
pub id: Option<String>,
|
||||
|
||||
/// Date and time at which this sample was collected.
|
||||
/// The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
pub read: Option<String>,
|
||||
|
||||
/// Date and time at which this first sample was collected.
|
||||
/// This field is not propagated if the \"one-shot\" option is set.
|
||||
/// If the \"one-shot\" option is set, this field may be omitted, empty,
|
||||
/// or set to a default date (`0001-01-01T00:00:00Z`).
|
||||
/// The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
pub preread: Option<String>,
|
||||
|
||||
/// PidsStats contains Linux-specific stats of a container's process-IDs (PIDs).
|
||||
/// This type is Linux-specific and omitted for Windows containers.
|
||||
pub pids_stats: Option<ContainerPidsStats>,
|
||||
|
||||
/// BlkioStats stores all IO service stats for data read and write.
|
||||
/// This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
/// On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
/// This type is only populated on Linux and omitted for Windows containers.
|
||||
pub blkio_stats: Option<ContainerBlkioStats>,
|
||||
|
||||
/// The number of processors on the system.
|
||||
/// This field is Windows-specific and always zero for Linux containers.
|
||||
pub num_procs: Option<u32>,
|
||||
|
||||
#[serde(rename = "storage_stats")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub storage_stats: Option<ContainerStorageStats>,
|
||||
|
||||
#[serde(rename = "cpu_stats")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub cpu_stats: Option<ContainerCpuStats>,
|
||||
|
||||
#[serde(rename = "precpu_stats")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub precpu_stats: Option<ContainerCpuStats>,
|
||||
|
||||
#[serde(rename = "memory_stats")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub memory_stats: Option<ContainerMemoryStats>,
|
||||
|
||||
/// Network statistics for the container per interface. This field is omitted if the container has no networking enabled.
|
||||
#[serde(rename = "networks")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub networks: Option<ContainerNetworkStats>,
|
||||
}
|
||||
|
||||
/// PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). This type is Linux-specific and omitted for Windows containers.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerPidsStats {
|
||||
/// Current is the number of PIDs in the cgroup.
|
||||
pub current: Option<U64>,
|
||||
|
||||
/// Limit is the hard limit on the number of pids in the cgroup. A \"Limit\" of 0 means that there is no limit.
|
||||
pub limit: Option<U64>,
|
||||
}
|
||||
|
||||
/// BlkioStats stores all IO service stats for data read and write.
|
||||
/// This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
/// On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
/// This type is only populated on Linux and omitted for Windows containers.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerBlkioStats {
|
||||
#[serde(rename = "io_service_bytes_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_service_bytes_recursive:
|
||||
Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_serviced_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_serviced_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_queue_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_queue_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_service_time_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_service_time_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_wait_time_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_wait_time_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_merged_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_merged_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "io_time_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub io_time_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
|
||||
/// This field is only available when using Linux containers with cgroups v1.
|
||||
/// It is omitted or `null` when using cgroups v2.
|
||||
#[serde(rename = "sectors_recursive")]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sectors_recursive: Option<Vec<ContainerBlkioStatEntry>>,
|
||||
}
|
||||
|
||||
/// Blkio stats entry. This type is Linux-specific and omitted for Windows containers.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerBlkioStatEntry {
|
||||
pub major: Option<U64>,
|
||||
pub minor: Option<U64>,
|
||||
pub op: Option<String>,
|
||||
pub value: Option<U64>,
|
||||
}
|
||||
|
||||
/// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
/// This type is Windows-specific and omitted for Linux containers.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerStorageStats {
|
||||
pub read_count_normalized: Option<U64>,
|
||||
pub read_size_bytes: Option<U64>,
|
||||
pub write_count_normalized: Option<U64>,
|
||||
pub write_size_bytes: Option<U64>,
|
||||
}
|
||||
|
||||
/// CPU related info of the container
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerCpuStats {
|
||||
/// All CPU stats aggregated since container inception.
|
||||
pub cpu_usage: Option<ContainerCpuUsage>,
|
||||
|
||||
/// System Usage.
|
||||
/// This field is Linux-specific and omitted for Windows containers.
|
||||
pub system_cpu_usage: Option<U64>,
|
||||
|
||||
/// Number of online CPUs.
|
||||
/// This field is Linux-specific and omitted for Windows containers.
|
||||
pub online_cpus: Option<u32>,
|
||||
|
||||
/// CPU throttling stats of the container.
|
||||
/// This type is Linux-specific and omitted for Windows containers.
|
||||
pub throttling_data: Option<ContainerThrottlingData>,
|
||||
}
|
||||
|
||||
/// All CPU stats aggregated since container inception.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerCpuUsage {
|
||||
/// Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows).
|
||||
pub total_usage: Option<U64>,
|
||||
|
||||
/// Total CPU time (in nanoseconds) consumed per core (Linux).
|
||||
/// This field is Linux-specific when using cgroups v1.
|
||||
/// It is omitted when using cgroups v2 and Windows containers.
|
||||
pub percpu_usage: Option<Vec<U64>>,
|
||||
|
||||
/// Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux),
|
||||
/// or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
/// Not populated for Windows containers using Hyper-V isolation.
|
||||
pub usage_in_kernelmode: Option<U64>,
|
||||
|
||||
/// Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux),
|
||||
/// or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
/// Not populated for Windows containers using Hyper-V isolation.
|
||||
pub usage_in_usermode: Option<U64>,
|
||||
}
|
||||
|
||||
/// CPU throttling stats of the container.
|
||||
/// This type is Linux-specific and omitted for Windows containers.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerThrottlingData {
|
||||
/// Number of periods with throttling active.
|
||||
pub periods: Option<U64>,
|
||||
|
||||
/// Number of periods when the container hit its throttling limit.
|
||||
pub throttled_periods: Option<U64>,
|
||||
|
||||
/// Aggregated time (in nanoseconds) the container was throttled for.
|
||||
pub throttled_time: Option<U64>,
|
||||
}
|
||||
|
||||
/// Aggregates all memory stats since container inception on Linux.
|
||||
/// Windows returns stats for commit and private working set only.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerMemoryStats {
|
||||
/// Current `res_counter` usage for memory.
|
||||
/// This field is Linux-specific and omitted for Windows containers.
|
||||
pub usage: Option<U64>,
|
||||
|
||||
/// Maximum usage ever recorded.
|
||||
/// This field is Linux-specific and only supported on cgroups v1.
|
||||
/// It is omitted when using cgroups v2 and for Windows containers.
|
||||
pub max_usage: Option<U64>,
|
||||
|
||||
/// All the stats exported via memory.stat. when using cgroups v2.
|
||||
/// This field is Linux-specific and omitted for Windows containers.
|
||||
pub stats: Option<HashMap<String, U64>>,
|
||||
|
||||
/// Number of times memory usage hits limits. This field is Linux-specific and only supported on cgroups v1. It is omitted when using cgroups v2 and for Windows containers.
|
||||
pub failcnt: Option<U64>,
|
||||
|
||||
/// This field is Linux-specific and omitted for Windows containers.
|
||||
pub limit: Option<U64>,
|
||||
|
||||
/// Committed bytes.
|
||||
/// This field is Windows-specific and omitted for Linux containers.
|
||||
pub commitbytes: Option<U64>,
|
||||
|
||||
/// Peak committed bytes.
|
||||
/// This field is Windows-specific and omitted for Linux containers.
|
||||
pub commitpeakbytes: Option<U64>,
|
||||
|
||||
/// Private working set.
|
||||
/// This field is Windows-specific and omitted for Linux containers.
|
||||
pub privateworkingset: Option<U64>,
|
||||
}
|
||||
|
||||
/// Aggregates the network stats of one container
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
|
||||
)]
|
||||
pub struct ContainerNetworkStats {
|
||||
/// Bytes received. Windows and Linux.
|
||||
pub rx_bytes: Option<U64>,
|
||||
|
||||
/// Packets received. Windows and Linux.
|
||||
pub rx_packets: Option<U64>,
|
||||
|
||||
/// Received errors. Not used on Windows.
|
||||
/// This field is Linux-specific and always zero for Windows containers.
|
||||
pub rx_errors: Option<U64>,
|
||||
|
||||
/// Incoming packets dropped. Windows and Linux.
|
||||
pub rx_dropped: Option<U64>,
|
||||
|
||||
/// Bytes sent. Windows and Linux.
|
||||
pub tx_bytes: Option<U64>,
|
||||
|
||||
/// Packets sent. Windows and Linux.
|
||||
pub tx_packets: Option<U64>,
|
||||
|
||||
/// Sent errors. Not used on Windows.
|
||||
/// This field is Linux-specific and always zero for Windows containers.
|
||||
pub tx_errors: Option<U64>,
|
||||
|
||||
/// Outgoing packets dropped. Windows and Linux.
|
||||
pub tx_dropped: Option<U64>,
|
||||
|
||||
/// Endpoint ID. Not used on Linux.
|
||||
/// This field is Windows-specific and omitted for Linux containers.
|
||||
pub endpoint_id: Option<String>,
|
||||
|
||||
/// Instance ID. Not used on Linux.
|
||||
/// This field is Windows-specific and omitted for Linux containers.
|
||||
pub instance_id: Option<String>,
|
||||
}
|
||||
@@ -18,7 +18,7 @@ use strum::{AsRefStr, Display, EnumString};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
deserializers::file_contents_deserializer,
|
||||
deserializers::file_contents_deserializer, entities::update::Log,
|
||||
parsers::parse_key_value_list,
|
||||
};
|
||||
|
||||
@@ -442,34 +442,49 @@ fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize,
|
||||
)]
|
||||
pub enum DefaultRepoFolder {
|
||||
/// /${root_directory}/stacks
|
||||
Stacks,
|
||||
/// /${root_directory}/builds
|
||||
Builds,
|
||||
/// /${root_directory}/repos
|
||||
Repos,
|
||||
/// If the repo is only cloned
|
||||
/// in the core repo cache (resource sync),
|
||||
/// this isn't relevant.
|
||||
NotApplicable,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CloneArgs {
|
||||
pub struct RepoExecutionArgs {
|
||||
/// Resource name (eg Build name, Repo name)
|
||||
pub name: String,
|
||||
/// Git provider domain. Default: `github.com`
|
||||
pub provider: String,
|
||||
/// Use https (vs http).
|
||||
pub https: bool,
|
||||
/// Configure the account used to access repo (if private)
|
||||
pub account: Option<String>,
|
||||
/// Full repo identifier. {namespace}/{repo_name}
|
||||
/// Its optional to force checking and produce error if not defined.
|
||||
pub repo: Option<String>,
|
||||
/// Git Branch. Default: `main`
|
||||
pub branch: String,
|
||||
/// Specific commit hash. Optional
|
||||
pub commit: Option<String>,
|
||||
/// Use PERIPHERY_BUILD_DIR as the parent folder for the clone.
|
||||
pub is_build: bool,
|
||||
/// The clone destination path
|
||||
pub destination: Option<String>,
|
||||
/// Command to run after the repo has been cloned
|
||||
pub on_clone: Option<SystemCommand>,
|
||||
/// Command to run after the repo has been pulled
|
||||
pub on_pull: Option<SystemCommand>,
|
||||
/// Configure the account used to access repo (if private)
|
||||
pub account: Option<String>,
|
||||
/// The default folder to use.
|
||||
/// Depends on the resource type.
|
||||
pub default_folder: DefaultRepoFolder,
|
||||
}
|
||||
|
||||
impl CloneArgs {
|
||||
impl RepoExecutionArgs {
|
||||
pub fn path(&self, root_repo_dir: &Path) -> PathBuf {
|
||||
match &self.destination {
|
||||
Some(destination) => root_repo_dir
|
||||
@@ -519,164 +534,190 @@ impl CloneArgs {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::build::Build> for CloneArgs {
|
||||
fn from(build: &self::build::Build) -> CloneArgs {
|
||||
CloneArgs {
|
||||
name: build.name.clone(),
|
||||
provider: optional_string(&build.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
repo: optional_string(&build.config.repo),
|
||||
branch: optional_string(&build.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&build.config.commit),
|
||||
is_build: true,
|
||||
destination: None,
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
https: build.config.git_https,
|
||||
account: optional_string(&build.config.git_account),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::repo::Repo> for CloneArgs {
|
||||
fn from(repo: &self::repo::Repo) -> CloneArgs {
|
||||
CloneArgs {
|
||||
name: repo.name.clone(),
|
||||
provider: optional_string(&repo.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
repo: optional_string(&repo.config.repo),
|
||||
branch: optional_string(&repo.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&repo.config.commit),
|
||||
is_build: false,
|
||||
destination: optional_string(&repo.config.path),
|
||||
on_clone: repo.config.on_clone.clone().into_option(),
|
||||
on_pull: repo.config.on_pull.clone().into_option(),
|
||||
https: repo.config.git_https,
|
||||
account: optional_string(&repo.config.git_account),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::sync::ResourceSync> for CloneArgs {
|
||||
fn from(sync: &self::sync::ResourceSync) -> Self {
|
||||
CloneArgs {
|
||||
name: sync.name.clone(),
|
||||
provider: optional_string(&sync.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
repo: optional_string(&sync.config.repo),
|
||||
branch: optional_string(&sync.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&sync.config.commit),
|
||||
is_build: false,
|
||||
destination: None,
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
https: sync.config.git_https,
|
||||
account: optional_string(&sync.config.git_account),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::stack::Stack> for CloneArgs {
|
||||
impl From<&self::stack::Stack> for RepoExecutionArgs {
|
||||
fn from(stack: &self::stack::Stack) -> Self {
|
||||
CloneArgs {
|
||||
RepoExecutionArgs {
|
||||
name: stack.name.clone(),
|
||||
provider: optional_string(&stack.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
https: stack.config.git_https,
|
||||
account: optional_string(&stack.config.git_account),
|
||||
repo: optional_string(&stack.config.repo),
|
||||
branch: optional_string(&stack.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&stack.config.commit),
|
||||
is_build: false,
|
||||
destination: optional_string(&stack.config.clone_path),
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
https: stack.config.git_https,
|
||||
account: optional_string(&stack.config.git_account),
|
||||
default_folder: DefaultRepoFolder::Stacks,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::build::Build> for RepoExecutionArgs {
|
||||
fn from(build: &self::build::Build) -> RepoExecutionArgs {
|
||||
RepoExecutionArgs {
|
||||
name: build.name.clone(),
|
||||
provider: optional_string(&build.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
https: build.config.git_https,
|
||||
account: optional_string(&build.config.git_account),
|
||||
repo: optional_string(&build.config.repo),
|
||||
branch: optional_string(&build.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&build.config.commit),
|
||||
destination: None,
|
||||
default_folder: DefaultRepoFolder::Builds,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::repo::Repo> for RepoExecutionArgs {
|
||||
fn from(repo: &self::repo::Repo) -> RepoExecutionArgs {
|
||||
RepoExecutionArgs {
|
||||
name: repo.name.clone(),
|
||||
provider: optional_string(&repo.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
https: repo.config.git_https,
|
||||
account: optional_string(&repo.config.git_account),
|
||||
repo: optional_string(&repo.config.repo),
|
||||
branch: optional_string(&repo.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&repo.config.commit),
|
||||
destination: optional_string(&repo.config.path),
|
||||
default_folder: DefaultRepoFolder::Repos,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&self::sync::ResourceSync> for RepoExecutionArgs {
|
||||
fn from(sync: &self::sync::ResourceSync) -> Self {
|
||||
RepoExecutionArgs {
|
||||
name: sync.name.clone(),
|
||||
provider: optional_string(&sync.config.git_provider)
|
||||
.unwrap_or_else(|| String::from("github.com")),
|
||||
https: sync.config.git_https,
|
||||
account: optional_string(&sync.config.git_account),
|
||||
repo: optional_string(&sync.config.repo),
|
||||
branch: optional_string(&sync.config.branch)
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&sync.config.commit),
|
||||
destination: None,
|
||||
default_folder: DefaultRepoFolder::NotApplicable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct RepoExecutionResponse {
|
||||
/// Response logs
|
||||
pub logs: Vec<Log>,
|
||||
/// Absolute path to the repo root on the host.
|
||||
pub path: PathBuf,
|
||||
/// Latest short commit hash, if it could be retrieved
|
||||
pub commit_hash: Option<String>,
|
||||
/// Latest commit message, if it could be retrieved
|
||||
pub commit_message: Option<String>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Debug,
|
||||
Display,
|
||||
EnumString,
|
||||
PartialEq,
|
||||
Hash,
|
||||
Eq,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Hash,
|
||||
Default,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Display,
|
||||
EnumString,
|
||||
)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum Timelength {
|
||||
/// `1-sec`
|
||||
#[serde(rename = "1-sec")]
|
||||
#[strum(serialize = "1-sec")]
|
||||
OneSecond,
|
||||
/// `5-sec`
|
||||
#[serde(rename = "5-sec")]
|
||||
#[strum(serialize = "5-sec")]
|
||||
FiveSeconds,
|
||||
/// `10-sec`
|
||||
#[serde(rename = "10-sec")]
|
||||
#[strum(serialize = "10-sec")]
|
||||
TenSeconds,
|
||||
/// `15-sec`
|
||||
#[serde(rename = "15-sec")]
|
||||
#[strum(serialize = "15-sec")]
|
||||
FifteenSeconds,
|
||||
/// `30-sec`
|
||||
#[serde(rename = "30-sec")]
|
||||
#[strum(serialize = "30-sec")]
|
||||
ThirtySeconds,
|
||||
#[default]
|
||||
/// `1-min`
|
||||
#[serde(rename = "1-min")]
|
||||
#[strum(serialize = "1-min")]
|
||||
OneMinute,
|
||||
/// `2-min`
|
||||
#[serde(rename = "2-min")]
|
||||
#[strum(serialize = "2-min")]
|
||||
TwoMinutes,
|
||||
/// `5-min`
|
||||
#[serde(rename = "5-min")]
|
||||
#[strum(serialize = "5-min")]
|
||||
FiveMinutes,
|
||||
/// `10-min`
|
||||
#[serde(rename = "10-min")]
|
||||
#[strum(serialize = "10-min")]
|
||||
TenMinutes,
|
||||
/// `15-min`
|
||||
#[serde(rename = "15-min")]
|
||||
#[strum(serialize = "15-min")]
|
||||
FifteenMinutes,
|
||||
/// `30-min`
|
||||
#[serde(rename = "30-min")]
|
||||
#[strum(serialize = "30-min")]
|
||||
ThirtyMinutes,
|
||||
/// `1-hr`
|
||||
#[serde(rename = "1-hr")]
|
||||
#[strum(serialize = "1-hr")]
|
||||
OneHour,
|
||||
/// `2-hr`
|
||||
#[serde(rename = "2-hr")]
|
||||
#[strum(serialize = "2-hr")]
|
||||
TwoHours,
|
||||
/// `6-hr`
|
||||
#[serde(rename = "6-hr")]
|
||||
#[strum(serialize = "6-hr")]
|
||||
SixHours,
|
||||
/// `8-hr`
|
||||
#[serde(rename = "8-hr")]
|
||||
#[strum(serialize = "8-hr")]
|
||||
EightHours,
|
||||
/// `12-hr`
|
||||
#[serde(rename = "12-hr")]
|
||||
#[strum(serialize = "12-hr")]
|
||||
TwelveHours,
|
||||
/// `1-day`
|
||||
#[serde(rename = "1-day")]
|
||||
#[strum(serialize = "1-day")]
|
||||
OneDay,
|
||||
/// `3-day`
|
||||
#[serde(rename = "3-day")]
|
||||
#[strum(serialize = "3-day")]
|
||||
ThreeDay,
|
||||
/// `1-wk`
|
||||
#[serde(rename = "1-wk")]
|
||||
#[strum(serialize = "1-wk")]
|
||||
OneWeek,
|
||||
/// `2-wk`
|
||||
#[serde(rename = "2-wk")]
|
||||
#[strum(serialize = "2-wk")]
|
||||
TwoWeeks,
|
||||
/// `30-day`
|
||||
#[serde(rename = "30-day")]
|
||||
#[strum(serialize = "30-day")]
|
||||
ThirtyDays,
|
||||
|
||||
@@ -37,10 +37,10 @@ pub struct Resource<Config: Default, Info: Default = ()> {
|
||||
#[builder(default)]
|
||||
pub description: String,
|
||||
|
||||
/// When description last updated
|
||||
/// Mark resource as a template
|
||||
#[serde(default)]
|
||||
#[builder(setter(skip))]
|
||||
pub updated_at: I64,
|
||||
#[builder(default)]
|
||||
pub template: bool,
|
||||
|
||||
/// Tag Ids
|
||||
#[serde(default, deserialize_with = "string_list_deserializer")]
|
||||
@@ -62,6 +62,11 @@ pub struct Resource<Config: Default, Info: Default = ()> {
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub base_permission: PermissionLevelAndSpecifics,
|
||||
|
||||
/// When description last updated
|
||||
#[serde(default)]
|
||||
#[builder(setter(skip))]
|
||||
pub updated_at: I64,
|
||||
}
|
||||
|
||||
impl<C: Default, I: Default> Default for Resource<C, I> {
|
||||
@@ -70,11 +75,12 @@ impl<C: Default, I: Default> Default for Resource<C, I> {
|
||||
id: String::new(),
|
||||
name: String::from("temp-resource"),
|
||||
description: String::new(),
|
||||
updated_at: 0,
|
||||
template: Default::default(),
|
||||
tags: Vec::new(),
|
||||
info: I::default(),
|
||||
config: C::default(),
|
||||
base_permission: Default::default(),
|
||||
updated_at: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,6 +95,8 @@ pub struct ResourceListItem<Info> {
|
||||
pub resource_type: ResourceTargetVariant,
|
||||
/// The resource name
|
||||
pub name: String,
|
||||
/// Whether resource is a template
|
||||
pub template: bool,
|
||||
/// Tag Ids
|
||||
pub tags: Vec<String>,
|
||||
/// Resource specific info
|
||||
@@ -103,19 +111,33 @@ pub struct ResourceListItem<Info> {
|
||||
pub struct ResourceQuery<T: Default> {
|
||||
#[serde(default)]
|
||||
pub names: Vec<String>,
|
||||
#[serde(default)]
|
||||
pub templates: TemplatesQueryBehavior,
|
||||
/// Pass Vec of tag ids or tag names
|
||||
#[serde(default, deserialize_with = "string_list_deserializer")]
|
||||
pub tags: Vec<String>,
|
||||
/// 'All' or 'Any'
|
||||
#[serde(default)]
|
||||
pub tag_behavior: TagBehavior,
|
||||
pub tag_behavior: TagQueryBehavior,
|
||||
#[serde(default)]
|
||||
pub specific: T,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
pub enum TagBehavior {
|
||||
pub enum TemplatesQueryBehavior {
|
||||
/// Include templates in results. Default.
|
||||
#[default]
|
||||
Include,
|
||||
/// Exclude templates from results.
|
||||
Exclude,
|
||||
/// Results *only* includes templates.
|
||||
Only,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
|
||||
pub enum TagQueryBehavior {
|
||||
/// Returns resources which have strictly all the tags
|
||||
#[default]
|
||||
All,
|
||||
@@ -134,12 +156,23 @@ impl<T: AddFilters + Default> AddFilters for ResourceQuery<T> {
|
||||
if !self.names.is_empty() {
|
||||
filters.insert("name", doc! { "$in": &self.names });
|
||||
}
|
||||
match self.templates {
|
||||
TemplatesQueryBehavior::Exclude => {
|
||||
filters.insert("template", doc! { "$ne": true });
|
||||
}
|
||||
TemplatesQueryBehavior::Only => {
|
||||
filters.insert("template", true);
|
||||
}
|
||||
TemplatesQueryBehavior::Include => {
|
||||
// No query on template field necessary
|
||||
}
|
||||
};
|
||||
if !self.tags.is_empty() {
|
||||
match self.tag_behavior {
|
||||
TagBehavior::All => {
|
||||
TagQueryBehavior::All => {
|
||||
filters.insert("tags", doc! { "$all": &self.tags });
|
||||
}
|
||||
TagBehavior::Any => {
|
||||
TagQueryBehavior::Any => {
|
||||
let ors = self
|
||||
.tags
|
||||
.iter()
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::{collections::HashMap, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use bson::{Document, doc};
|
||||
use derive_builder::Builder;
|
||||
use derive_default_builder::DefaultBuilder;
|
||||
@@ -8,11 +9,14 @@ use serde::{Deserialize, Serialize};
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::deserializers::{
|
||||
env_vars_deserializer, file_contents_deserializer,
|
||||
option_env_vars_deserializer, option_file_contents_deserializer,
|
||||
option_maybe_string_i64_deserializer,
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
use crate::{
|
||||
deserializers::{
|
||||
env_vars_deserializer, file_contents_deserializer,
|
||||
option_env_vars_deserializer, option_file_contents_deserializer,
|
||||
option_maybe_string_i64_deserializer,
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
},
|
||||
entities::{EnvironmentVar, environment_vars_from_str},
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -476,6 +480,11 @@ impl StackConfig {
|
||||
pub fn builder() -> StackConfigBuilder {
|
||||
StackConfigBuilder::default()
|
||||
}
|
||||
|
||||
pub fn env_vars(&self) -> anyhow::Result<Vec<EnvironmentVar>> {
|
||||
environment_vars_from_str(&self.environment)
|
||||
.context("Invalid environment")
|
||||
}
|
||||
}
|
||||
|
||||
fn default_env_file_path() -> String {
|
||||
|
||||
@@ -119,6 +119,10 @@ pub struct ResourceToml<PartialConfig: Default> {
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub description: String,
|
||||
|
||||
/// Mark resource as a template
|
||||
#[serde(default, skip_serializing_if = "is_false")]
|
||||
pub template: bool,
|
||||
|
||||
/// Tag ids or names. Optional
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub tags: Vec<String>,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "komodo_client",
|
||||
"version": "1.18.3",
|
||||
"version": "1.18.4",
|
||||
"description": "Komodo client package",
|
||||
"homepage": "https://komo.do",
|
||||
"main": "dist/lib.js",
|
||||
|
||||
@@ -204,11 +204,12 @@ export type WriteResponses = {
|
||||
UpdatePermissionOnResourceType: Types.UpdatePermissionOnResourceTypeResponse;
|
||||
UpdatePermissionOnTarget: Types.UpdatePermissionOnTargetResponse;
|
||||
|
||||
// ==== DESCRIPTION ====
|
||||
UpdateDescription: Types.UpdateDescriptionResponse;
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta: Types.UpdateResourceMetaResponse;
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer: Types.Server;
|
||||
CopyServer: Types.Server;
|
||||
DeleteServer: Types.Server;
|
||||
UpdateServer: Types.Server;
|
||||
RenameServer: Types.Update;
|
||||
@@ -302,7 +303,6 @@ export type WriteResponses = {
|
||||
DeleteTag: Types.Tag;
|
||||
RenameTag: Types.Tag;
|
||||
UpdateTagColor: Types.Tag;
|
||||
UpdateTagsOnResource: Types.UpdateTagsOnResourceResponse;
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable: Types.CreateVariableResponse;
|
||||
|
||||
@@ -8,8 +8,6 @@ export interface MongoIdObj {
|
||||
|
||||
export type MongoId = MongoIdObj;
|
||||
|
||||
export type I64 = number;
|
||||
|
||||
/** The levels of permission that a User or UserGroup can have on a resource. */
|
||||
export enum PermissionLevel {
|
||||
/** No permissions. */
|
||||
@@ -27,6 +25,8 @@ export interface PermissionLevelAndSpecifics {
|
||||
specific: Array<SpecificPermission>;
|
||||
}
|
||||
|
||||
export type I64 = number;
|
||||
|
||||
export interface Resource<Config, Info> {
|
||||
/**
|
||||
* The Mongo ID of the resource.
|
||||
@@ -41,8 +41,8 @@ export interface Resource<Config, Info> {
|
||||
name: string;
|
||||
/** A description for the resource */
|
||||
description?: string;
|
||||
/** When description last updated */
|
||||
updated_at?: I64;
|
||||
/** Mark resource as a template */
|
||||
template?: boolean;
|
||||
/** Tag Ids */
|
||||
tags?: string[];
|
||||
/** Resource-specific information (not user configurable). */
|
||||
@@ -54,6 +54,8 @@ export interface Resource<Config, Info> {
|
||||
* resource.
|
||||
*/
|
||||
base_permission?: PermissionLevelAndSpecifics | PermissionLevel;
|
||||
/** When description last updated */
|
||||
updated_at?: I64;
|
||||
}
|
||||
|
||||
export enum ScheduleFormat {
|
||||
@@ -129,6 +131,8 @@ export interface ResourceListItem<Info> {
|
||||
type: ResourceTarget["type"];
|
||||
/** The resource name */
|
||||
name: string;
|
||||
/** Whether resource is a template */
|
||||
template: boolean;
|
||||
/** Tag Ids */
|
||||
tags: string[];
|
||||
/** Resource specific info */
|
||||
@@ -165,7 +169,16 @@ export interface ActionListItemInfo {
|
||||
|
||||
export type ActionListItem = ResourceListItem<ActionListItemInfo>;
|
||||
|
||||
export enum TagBehavior {
|
||||
export enum TemplatesQueryBehavior {
|
||||
/** Include templates in results. Default. */
|
||||
Include = "Include",
|
||||
/** Exclude templates from results. */
|
||||
Exclude = "Exclude",
|
||||
/** Results *only* includes templates. */
|
||||
Only = "Only",
|
||||
}
|
||||
|
||||
export enum TagQueryBehavior {
|
||||
/** Returns resources which have strictly all the tags */
|
||||
All = "All",
|
||||
/** Returns resources which have one or more of the tags */
|
||||
@@ -175,10 +188,11 @@ export enum TagBehavior {
|
||||
/** Passing empty Vec is the same as not filtering by that field */
|
||||
export interface ResourceQuery<T> {
|
||||
names?: string[];
|
||||
templates?: TemplatesQueryBehavior;
|
||||
/** Pass Vec of tag ids or tag names */
|
||||
tags?: string[];
|
||||
/** 'All' or 'Any' */
|
||||
tag_behavior?: TagBehavior;
|
||||
tag_behavior?: TagQueryBehavior;
|
||||
specific?: T;
|
||||
}
|
||||
|
||||
@@ -2271,26 +2285,47 @@ export interface SingleDiskUsage {
|
||||
}
|
||||
|
||||
export enum Timelength {
|
||||
/** `1-sec` */
|
||||
OneSecond = "1-sec",
|
||||
/** `5-sec` */
|
||||
FiveSeconds = "5-sec",
|
||||
/** `10-sec` */
|
||||
TenSeconds = "10-sec",
|
||||
/** `15-sec` */
|
||||
FifteenSeconds = "15-sec",
|
||||
/** `30-sec` */
|
||||
ThirtySeconds = "30-sec",
|
||||
/** `1-min` */
|
||||
OneMinute = "1-min",
|
||||
/** `2-min` */
|
||||
TwoMinutes = "2-min",
|
||||
/** `5-min` */
|
||||
FiveMinutes = "5-min",
|
||||
/** `10-min` */
|
||||
TenMinutes = "10-min",
|
||||
/** `15-min` */
|
||||
FifteenMinutes = "15-min",
|
||||
/** `30-min` */
|
||||
ThirtyMinutes = "30-min",
|
||||
/** `1-hr` */
|
||||
OneHour = "1-hr",
|
||||
/** `2-hr` */
|
||||
TwoHours = "2-hr",
|
||||
/** `6-hr` */
|
||||
SixHours = "6-hr",
|
||||
/** `8-hr` */
|
||||
EightHours = "8-hr",
|
||||
/** `12-hr` */
|
||||
TwelveHours = "12-hr",
|
||||
/** `1-day` */
|
||||
OneDay = "1-day",
|
||||
/** `3-day` */
|
||||
ThreeDay = "3-day",
|
||||
/** `1-wk` */
|
||||
OneWeek = "1-wk",
|
||||
/** `2-wk` */
|
||||
TwoWeeks = "2-wk",
|
||||
/** `30-day` */
|
||||
ThirtyDays = "30-day",
|
||||
}
|
||||
|
||||
@@ -3242,6 +3277,25 @@ export type ListActionsResponse = ActionListItem[];
|
||||
|
||||
export type ListAlertersResponse = AlerterListItem[];
|
||||
|
||||
export enum PortTypeEnum {
|
||||
EMPTY = "",
|
||||
TCP = "tcp",
|
||||
UDP = "udp",
|
||||
SCTP = "sctp",
|
||||
}
|
||||
|
||||
/** An open port on a container */
|
||||
export interface Port {
|
||||
/** Host IP address that the container's port is mapped to */
|
||||
IP?: string;
|
||||
/** Port on the container */
|
||||
PrivatePort?: number;
|
||||
/** Port exposed on the host */
|
||||
PublicPort?: number;
|
||||
Type?: PortTypeEnum;
|
||||
}
|
||||
|
||||
/** Container summary returned by container list apis. */
|
||||
export interface ContainerListItem {
|
||||
/** The Server which holds the container. */
|
||||
server_id?: string;
|
||||
@@ -3267,8 +3321,12 @@ export interface ContainerListItem {
|
||||
network_mode?: string;
|
||||
/** The network names attached to container */
|
||||
networks: string[];
|
||||
/** Port mappings for the container */
|
||||
ports: Port[];
|
||||
/** The volume names attached to container */
|
||||
volumes: string[];
|
||||
/** The container stats, if they can be retreived. */
|
||||
stats?: ContainerStats;
|
||||
/**
|
||||
* The labels attached to container.
|
||||
* It's too big to send with container list,
|
||||
@@ -3871,8 +3929,6 @@ export interface StackQuerySpecifics {
|
||||
|
||||
export type StackQuery = ResourceQuery<StackQuerySpecifics>;
|
||||
|
||||
export type UpdateDescriptionResponse = NoData;
|
||||
|
||||
export type UpdateDockerRegistryAccountResponse = DockerRegistryAccount;
|
||||
|
||||
export type UpdateGitProviderAccountResponse = GitProviderAccount;
|
||||
@@ -3883,9 +3939,9 @@ export type UpdatePermissionOnTargetResponse = NoData;
|
||||
|
||||
export type UpdateProcedureResponse = Procedure;
|
||||
|
||||
export type UpdateServiceUserDescriptionResponse = User;
|
||||
export type UpdateResourceMetaResponse = NoData;
|
||||
|
||||
export type UpdateTagsOnResourceResponse = NoData;
|
||||
export type UpdateServiceUserDescriptionResponse = User;
|
||||
|
||||
export type UpdateUserAdminResponse = NoData;
|
||||
|
||||
@@ -4250,31 +4306,6 @@ export interface CancelRepoBuild {
|
||||
repo: string;
|
||||
}
|
||||
|
||||
export interface CloneArgs {
|
||||
/** Resource name (eg Build name, Repo name) */
|
||||
name: string;
|
||||
/** Git provider domain. Default: `github.com` */
|
||||
provider: string;
|
||||
/** Use https (vs http). */
|
||||
https: boolean;
|
||||
/** Full repo identifier. {namespace}/{repo_name} */
|
||||
repo?: string;
|
||||
/** Git Branch. Default: `main` */
|
||||
branch: string;
|
||||
/** Specific commit hash. Optional */
|
||||
commit?: string;
|
||||
/** Use PERIPHERY_BUILD_DIR as the parent folder for the clone. */
|
||||
is_build: boolean;
|
||||
/** The clone destination path */
|
||||
destination?: string;
|
||||
/** Command to run after the repo has been cloned */
|
||||
on_clone?: SystemCommand;
|
||||
/** Command to run after the repo has been pulled */
|
||||
on_pull?: SystemCommand;
|
||||
/** Configure the account used to access repo (if private) */
|
||||
account?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clones the target repo. Response: [Update].
|
||||
*
|
||||
@@ -4356,6 +4387,214 @@ export interface ConnectTerminalQuery {
|
||||
terminal: string;
|
||||
}
|
||||
|
||||
/** Blkio stats entry. This type is Linux-specific and omitted for Windows containers. */
|
||||
export interface ContainerBlkioStatEntry {
|
||||
major?: U64;
|
||||
minor?: U64;
|
||||
op?: string;
|
||||
value?: U64;
|
||||
}
|
||||
|
||||
/**
|
||||
* BlkioStats stores all IO service stats for data read and write.
|
||||
* This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
* On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
* This type is only populated on Linux and omitted for Windows containers.
|
||||
*/
|
||||
export interface ContainerBlkioStats {
|
||||
io_service_bytes_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_serviced_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_queue_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_service_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_wait_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_merged_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
sectors_recursive?: ContainerBlkioStatEntry[];
|
||||
}
|
||||
|
||||
/** All CPU stats aggregated since container inception. */
|
||||
export interface ContainerCpuUsage {
|
||||
/** Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). */
|
||||
total_usage?: U64;
|
||||
/**
|
||||
* Total CPU time (in nanoseconds) consumed per core (Linux).
|
||||
* This field is Linux-specific when using cgroups v1.
|
||||
* It is omitted when using cgroups v2 and Windows containers.
|
||||
*/
|
||||
percpu_usage?: U64[];
|
||||
/**
|
||||
* Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux),
|
||||
* or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
* Not populated for Windows containers using Hyper-V isolation.
|
||||
*/
|
||||
usage_in_kernelmode?: U64;
|
||||
/**
|
||||
* Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux),
|
||||
* or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
* Not populated for Windows containers using Hyper-V isolation.
|
||||
*/
|
||||
usage_in_usermode?: U64;
|
||||
}
|
||||
|
||||
/**
|
||||
* CPU throttling stats of the container.
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
export interface ContainerThrottlingData {
|
||||
/** Number of periods with throttling active. */
|
||||
periods?: U64;
|
||||
/** Number of periods when the container hit its throttling limit. */
|
||||
throttled_periods?: U64;
|
||||
/** Aggregated time (in nanoseconds) the container was throttled for. */
|
||||
throttled_time?: U64;
|
||||
}
|
||||
|
||||
/** CPU related info of the container */
|
||||
export interface ContainerCpuStats {
|
||||
/** All CPU stats aggregated since container inception. */
|
||||
cpu_usage?: ContainerCpuUsage;
|
||||
/**
|
||||
* System Usage.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
system_cpu_usage?: U64;
|
||||
/**
|
||||
* Number of online CPUs.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
online_cpus?: number;
|
||||
/**
|
||||
* CPU throttling stats of the container.
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
throttling_data?: ContainerThrottlingData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aggregates all memory stats since container inception on Linux.
|
||||
* Windows returns stats for commit and private working set only.
|
||||
*/
|
||||
export interface ContainerMemoryStats {
|
||||
/**
|
||||
* Current `res_counter` usage for memory.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
usage?: U64;
|
||||
/**
|
||||
* Maximum usage ever recorded.
|
||||
* This field is Linux-specific and only supported on cgroups v1.
|
||||
* It is omitted when using cgroups v2 and for Windows containers.
|
||||
*/
|
||||
max_usage?: U64;
|
||||
/**
|
||||
* All the stats exported via memory.stat. when using cgroups v2.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
stats?: Record<string, U64>;
|
||||
/** Number of times memory usage hits limits. This field is Linux-specific and only supported on cgroups v1. It is omitted when using cgroups v2 and for Windows containers. */
|
||||
failcnt?: U64;
|
||||
/** This field is Linux-specific and omitted for Windows containers. */
|
||||
limit?: U64;
|
||||
/**
|
||||
* Committed bytes.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
commitbytes?: U64;
|
||||
/**
|
||||
* Peak committed bytes.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
commitpeakbytes?: U64;
|
||||
/**
|
||||
* Private working set.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
privateworkingset?: U64;
|
||||
}
|
||||
|
||||
/** Aggregates the network stats of one container */
|
||||
export interface ContainerNetworkStats {
|
||||
/** Bytes received. Windows and Linux. */
|
||||
rx_bytes?: U64;
|
||||
/** Packets received. Windows and Linux. */
|
||||
rx_packets?: U64;
|
||||
/**
|
||||
* Received errors. Not used on Windows.
|
||||
* This field is Linux-specific and always zero for Windows containers.
|
||||
*/
|
||||
rx_errors?: U64;
|
||||
/** Incoming packets dropped. Windows and Linux. */
|
||||
rx_dropped?: U64;
|
||||
/** Bytes sent. Windows and Linux. */
|
||||
tx_bytes?: U64;
|
||||
/** Packets sent. Windows and Linux. */
|
||||
tx_packets?: U64;
|
||||
/**
|
||||
* Sent errors. Not used on Windows.
|
||||
* This field is Linux-specific and always zero for Windows containers.
|
||||
*/
|
||||
tx_errors?: U64;
|
||||
/** Outgoing packets dropped. Windows and Linux. */
|
||||
tx_dropped?: U64;
|
||||
/**
|
||||
* Endpoint ID. Not used on Linux.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
endpoint_id?: string;
|
||||
/**
|
||||
* Instance ID. Not used on Linux.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
instance_id?: string;
|
||||
}
|
||||
|
||||
/** PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). This type is Linux-specific and omitted for Windows containers. */
|
||||
export interface ContainerPidsStats {
|
||||
/** Current is the number of PIDs in the cgroup. */
|
||||
current?: U64;
|
||||
/** Limit is the hard limit on the number of pids in the cgroup. A \"Limit\" of 0 means that there is no limit. */
|
||||
limit?: U64;
|
||||
}
|
||||
|
||||
/**
|
||||
* StorageStats is the disk I/O stats for read/write on Windows.
|
||||
* This type is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
export interface ContainerStorageStats {
|
||||
read_count_normalized?: U64;
|
||||
read_size_bytes?: U64;
|
||||
write_count_normalized?: U64;
|
||||
write_size_bytes?: U64;
|
||||
}
|
||||
|
||||
export interface Conversion {
|
||||
/** reference on the server. */
|
||||
local: string;
|
||||
@@ -4451,6 +4690,17 @@ export interface CopyResourceSync {
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new server with given `name` and the configuration
|
||||
* of the server at the given `id`. Response: [Server].
|
||||
*/
|
||||
export interface CopyServer {
|
||||
/** The name of the new server. */
|
||||
name: string;
|
||||
/** The id of the server to copy. */
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new stack with given `name` and the configuration
|
||||
* of the stack at the given `id`. Response: [Stack].
|
||||
@@ -5287,6 +5537,50 @@ export interface FindUser {
|
||||
user: string;
|
||||
}
|
||||
|
||||
/** Statistics sample for a container. */
|
||||
export interface FullContainerStats {
|
||||
/** Name of the container */
|
||||
name: string;
|
||||
/** ID of the container */
|
||||
id?: string;
|
||||
/**
|
||||
* Date and time at which this sample was collected.
|
||||
* The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
*/
|
||||
read?: string;
|
||||
/**
|
||||
* Date and time at which this first sample was collected.
|
||||
* This field is not propagated if the \"one-shot\" option is set.
|
||||
* If the \"one-shot\" option is set, this field may be omitted, empty,
|
||||
* or set to a default date (`0001-01-01T00:00:00Z`).
|
||||
* The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
*/
|
||||
preread?: string;
|
||||
/**
|
||||
* PidsStats contains Linux-specific stats of a container's process-IDs (PIDs).
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
pids_stats?: ContainerPidsStats;
|
||||
/**
|
||||
* BlkioStats stores all IO service stats for data read and write.
|
||||
* This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
* On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
* This type is only populated on Linux and omitted for Windows containers.
|
||||
*/
|
||||
blkio_stats?: ContainerBlkioStats;
|
||||
/**
|
||||
* The number of processors on the system.
|
||||
* This field is Windows-specific and always zero for Linux containers.
|
||||
*/
|
||||
num_procs?: number;
|
||||
storage_stats?: ContainerStorageStats;
|
||||
cpu_stats?: ContainerCpuStats;
|
||||
precpu_stats?: ContainerCpuStats;
|
||||
memory_stats?: ContainerMemoryStats;
|
||||
/** Network statistics for the container per interface. This field is omitted if the container has no networking enabled. */
|
||||
networks?: ContainerNetworkStats;
|
||||
}
|
||||
|
||||
/** Get a specific action. Response: [Action]. */
|
||||
export interface GetAction {
|
||||
/** Id or name */
|
||||
@@ -6498,7 +6792,7 @@ export interface ListSchedules {
|
||||
/** Pass Vec of tag ids or tag names */
|
||||
tags?: string[];
|
||||
/** 'All' or 'Any' */
|
||||
tag_behavior?: TagBehavior;
|
||||
tag_behavior?: TagQueryBehavior;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -6747,24 +7041,6 @@ export interface PermissionToml {
|
||||
specific?: Array<SpecificPermission>;
|
||||
}
|
||||
|
||||
export enum PortTypeEnum {
|
||||
EMPTY = "",
|
||||
TCP = "tcp",
|
||||
UDP = "udp",
|
||||
SCTP = "sctp",
|
||||
}
|
||||
|
||||
/** An open port on a container */
|
||||
export interface Port {
|
||||
/** Host IP address that the container's port is mapped to */
|
||||
IP?: string;
|
||||
/** Port on the container */
|
||||
PrivatePort?: number;
|
||||
/** Port exposed on the host */
|
||||
PublicPort?: number;
|
||||
Type?: PortTypeEnum;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prunes the docker buildx cache on the target server. Response: [Update].
|
||||
*
|
||||
@@ -7042,11 +7318,66 @@ export interface RenameUserGroup {
|
||||
name: string;
|
||||
}
|
||||
|
||||
export enum DefaultRepoFolder {
|
||||
/** /${root_directory}/stacks */
|
||||
Stacks = "Stacks",
|
||||
/** /${root_directory}/builds */
|
||||
Builds = "Builds",
|
||||
/** /${root_directory}/repos */
|
||||
Repos = "Repos",
|
||||
/**
|
||||
* If the repo is only cloned
|
||||
* in the core repo cache (resource sync),
|
||||
* this isn't relevant.
|
||||
*/
|
||||
NotApplicable = "NotApplicable",
|
||||
}
|
||||
|
||||
export interface RepoExecutionArgs {
|
||||
/** Resource name (eg Build name, Repo name) */
|
||||
name: string;
|
||||
/** Git provider domain. Default: `github.com` */
|
||||
provider: string;
|
||||
/** Use https (vs http). */
|
||||
https: boolean;
|
||||
/** Configure the account used to access repo (if private) */
|
||||
account?: string;
|
||||
/**
|
||||
* Full repo identifier. {namespace}/{repo_name}
|
||||
* Its optional to force checking and produce error if not defined.
|
||||
*/
|
||||
repo?: string;
|
||||
/** Git Branch. Default: `main` */
|
||||
branch: string;
|
||||
/** Specific commit hash. Optional */
|
||||
commit?: string;
|
||||
/** The clone destination path */
|
||||
destination?: string;
|
||||
/**
|
||||
* The default folder to use.
|
||||
* Depends on the resource type.
|
||||
*/
|
||||
default_folder: DefaultRepoFolder;
|
||||
}
|
||||
|
||||
export interface RepoExecutionResponse {
|
||||
/** Response logs */
|
||||
logs: Log[];
|
||||
/** Absolute path to the repo root on the host. */
|
||||
path: string;
|
||||
/** Latest short commit hash, if it could be retrieved */
|
||||
commit_hash?: string;
|
||||
/** Latest commit message, if it could be retrieved */
|
||||
commit_message?: string;
|
||||
}
|
||||
|
||||
export interface ResourceToml<PartialConfig> {
|
||||
/** The resource name. Required */
|
||||
name: string;
|
||||
/** The resource description. Optional. */
|
||||
description?: string;
|
||||
/** Mark resource as a template */
|
||||
template?: boolean;
|
||||
/** Tag ids or names. Optional */
|
||||
tags?: string[];
|
||||
/**
|
||||
@@ -7572,17 +7903,6 @@ export interface UpdateDeployment {
|
||||
config: _PartialDeploymentConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a resources description.
|
||||
* Response: [NoData].
|
||||
*/
|
||||
export interface UpdateDescription {
|
||||
/** The target resource to set description for. */
|
||||
target: ResourceTarget;
|
||||
/** The new description. */
|
||||
description: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* **Admin only.** Update a docker registry account.
|
||||
* Response: [DockerRegistryAccount].
|
||||
@@ -7668,6 +7988,33 @@ export interface UpdateRepo {
|
||||
config: _PartialRepoConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a resources common meta fields.
|
||||
* - description
|
||||
* - template
|
||||
* - tags
|
||||
* Response: [NoData].
|
||||
*/
|
||||
export interface UpdateResourceMeta {
|
||||
/** The target resource to set update meta. */
|
||||
target: ResourceTarget;
|
||||
/**
|
||||
* New description to set,
|
||||
* or null for no update
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* New template value (true or false),
|
||||
* or null for no update
|
||||
*/
|
||||
template?: boolean;
|
||||
/**
|
||||
* The exact tags to set,
|
||||
* or null for no update
|
||||
*/
|
||||
tags?: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the sync at the given id, and return the updated sync.
|
||||
* Response: [ResourceSync].
|
||||
@@ -7741,16 +8088,6 @@ export interface UpdateTagColor {
|
||||
color: TagColor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the tags on a resource.
|
||||
* Response: [NoData]
|
||||
*/
|
||||
export interface UpdateTagsOnResource {
|
||||
target: ResourceTarget;
|
||||
/** Tag Ids */
|
||||
tags: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* **Super Admin only.** Update's whether a user is admin.
|
||||
* Response: [NoData].
|
||||
@@ -8219,8 +8556,9 @@ export type WriteRequest =
|
||||
| { type: "UpdateUserBasePermissions", params: UpdateUserBasePermissions }
|
||||
| { type: "UpdatePermissionOnResourceType", params: UpdatePermissionOnResourceType }
|
||||
| { type: "UpdatePermissionOnTarget", params: UpdatePermissionOnTarget }
|
||||
| { type: "UpdateDescription", params: UpdateDescription }
|
||||
| { type: "UpdateResourceMeta", params: UpdateResourceMeta }
|
||||
| { type: "CreateServer", params: CreateServer }
|
||||
| { type: "CopyServer", params: CopyServer }
|
||||
| { type: "DeleteServer", params: DeleteServer }
|
||||
| { type: "UpdateServer", params: UpdateServer }
|
||||
| { type: "RenameServer", params: RenameServer }
|
||||
@@ -8294,7 +8632,6 @@ export type WriteRequest =
|
||||
| { type: "DeleteTag", params: DeleteTag }
|
||||
| { type: "RenameTag", params: RenameTag }
|
||||
| { type: "UpdateTagColor", params: UpdateTagColor }
|
||||
| { type: "UpdateTagsOnResource", params: UpdateTagsOnResource }
|
||||
| { type: "CreateVariable", params: CreateVariable }
|
||||
| { type: "UpdateVariableValue", params: UpdateVariableValue }
|
||||
| { type: "UpdateVariableDescription", params: UpdateVariableDescription }
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use komodo_client::entities::{
|
||||
FileContents, SearchCombinator,
|
||||
FileContents, RepoExecutionResponse, SearchCombinator,
|
||||
repo::Repo,
|
||||
stack::{ComposeProject, Stack, StackServiceNames},
|
||||
update::Log,
|
||||
@@ -7,8 +7,6 @@ use komodo_client::entities::{
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::git::RepoActionResponse;
|
||||
|
||||
/// List the compose project names that are on the host.
|
||||
/// List running `docker compose ls`
|
||||
///
|
||||
@@ -115,7 +113,7 @@ pub struct WriteComposeContentsToHost {
|
||||
/// Write and commit compose contents.
|
||||
/// Only works with git repo based stacks.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Resolve)]
|
||||
#[response(RepoActionResponse)]
|
||||
#[response(RepoExecutionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct WriteCommitComposeContents {
|
||||
/// The stack to write to.
|
||||
@@ -152,11 +150,19 @@ pub struct ComposePull {
|
||||
pub git_token: Option<String>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local registry providers.
|
||||
pub registry_token: Option<String>,
|
||||
/// Propogate any secret replacers from core interpolation.
|
||||
#[serde(default)]
|
||||
pub replacers: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
/// Response for [ComposePull]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ComposePullResponse {
|
||||
/// If any of the required files are missing, they will be here.
|
||||
pub missing_files: Vec<String>,
|
||||
/// The error in getting remote file contents at the path, or null
|
||||
pub remote_errors: Vec<FileContents>,
|
||||
/// The logs produced by the pull
|
||||
pub logs: Vec<Log>,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
use komodo_client::entities::{
|
||||
SearchCombinator, TerminationSignal,
|
||||
deployment::Deployment,
|
||||
docker::container::{Container, ContainerStats},
|
||||
docker::{
|
||||
container::{Container, ContainerStats},
|
||||
stats::FullContainerStats,
|
||||
},
|
||||
update::Log,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -69,6 +72,17 @@ pub struct GetContainerStatsList {}
|
||||
|
||||
//
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
|
||||
#[response(FullContainerStats)]
|
||||
#[error(serror::Error)]
|
||||
pub struct GetFullContainerStats {
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
// =======
|
||||
// ACTIONS
|
||||
// =======
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, EnvironmentVar, LatestCommit, update::Log,
|
||||
EnvironmentVar, LatestCommit, RepoExecutionArgs,
|
||||
RepoExecutionResponse, SystemCommand, update::Log,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -15,19 +16,24 @@ pub struct GetLatestCommit {
|
||||
pub path: Option<String>,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
|
||||
#[response(RepoActionResponse)]
|
||||
#[response(PeripheryRepoExecutionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct CloneRepo {
|
||||
pub args: CloneArgs,
|
||||
#[serde(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
#[serde(default = "default_env_file_path")]
|
||||
pub env_file_path: String,
|
||||
#[serde(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
pub args: RepoExecutionArgs,
|
||||
/// Override git token with one sent from core.
|
||||
pub git_token: Option<String>,
|
||||
#[serde(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
/// Relative to repo root
|
||||
#[serde(default = "default_env_file_path")]
|
||||
pub env_file_path: String,
|
||||
pub on_clone: Option<SystemCommand>,
|
||||
pub on_pull: Option<SystemCommand>,
|
||||
#[serde(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
/// Propogate any secret replacers from core interpolation.
|
||||
#[serde(default)]
|
||||
pub replacers: Vec<(String, String)>,
|
||||
@@ -40,18 +46,19 @@ fn default_env_file_path() -> String {
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
|
||||
#[response(RepoActionResponse)]
|
||||
#[response(PeripheryRepoExecutionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct PullRepo {
|
||||
pub args: CloneArgs,
|
||||
pub args: RepoExecutionArgs,
|
||||
/// Override git token with one sent from core.
|
||||
pub git_token: Option<String>,
|
||||
#[serde(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
#[serde(default = "default_env_file_path")]
|
||||
pub env_file_path: String,
|
||||
pub on_pull: Option<SystemCommand>,
|
||||
#[serde(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
/// Override git token with one sent from core.
|
||||
pub git_token: Option<String>,
|
||||
/// Propogate any secret replacers from core interpolation.
|
||||
#[serde(default)]
|
||||
pub replacers: Vec<(String, String)>,
|
||||
@@ -61,18 +68,20 @@ pub struct PullRepo {
|
||||
|
||||
/// Either pull or clone depending on whether it exists.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
|
||||
#[response(RepoActionResponse)]
|
||||
#[response(PeripheryRepoExecutionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct PullOrCloneRepo {
|
||||
pub args: CloneArgs,
|
||||
pub args: RepoExecutionArgs,
|
||||
/// Override git token with one sent from core.
|
||||
pub git_token: Option<String>,
|
||||
#[serde(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
#[serde(default = "default_env_file_path")]
|
||||
pub env_file_path: String,
|
||||
pub on_clone: Option<SystemCommand>,
|
||||
pub on_pull: Option<SystemCommand>,
|
||||
#[serde(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
/// Override git token with one sent from core.
|
||||
pub git_token: Option<String>,
|
||||
/// Propogate any secret replacers from core interpolation.
|
||||
#[serde(default)]
|
||||
pub replacers: Vec<(String, String)>,
|
||||
@@ -80,23 +89,16 @@ pub struct PullOrCloneRepo {
|
||||
|
||||
//
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct RepoActionResponse {
|
||||
/// Response logs
|
||||
pub logs: Vec<Log>,
|
||||
/// Absolute path to the repo root on the host.
|
||||
pub path: PathBuf,
|
||||
/// Latest short commit hash, if it could be retrieved
|
||||
pub commit_hash: Option<String>,
|
||||
/// Latest commit message, if it could be retrieved
|
||||
pub commit_message: Option<String>,
|
||||
/// Don't need to send this one to core, its only needed for calls local to single periphery
|
||||
#[serde(skip_serializing)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct PeripheryRepoExecutionResponse {
|
||||
pub res: RepoExecutionResponse,
|
||||
pub env_file_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
|
||||
#[response(Log)]
|
||||
#[error(serror::Error)]
|
||||
|
||||
@@ -156,11 +156,9 @@ async fn connect_websocket(
|
||||
format!("failed to connect to websocket | url: {url}")
|
||||
})?
|
||||
} else {
|
||||
tokio_tungstenite::connect_async(url)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to connect to websocket | url: {url}")
|
||||
})?
|
||||
tokio_tungstenite::connect_async(url).await.with_context(
|
||||
|| format!("failed to connect to websocket | url: {url}"),
|
||||
)?
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
|
||||
@@ -150,7 +150,7 @@ jwt_secret = ""
|
||||
## Specify how long a user can stay logged in before they have to log in again.
|
||||
## All jwts are invalidated on application restart unless `jwt_secret` is set.
|
||||
## Env: KOMODO_JWT_TTL
|
||||
## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day
|
||||
## Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
## Default: 1-day.
|
||||
jwt_ttl = "1-day"
|
||||
|
||||
@@ -251,13 +251,13 @@ github_oauth.secret = ""
|
||||
## Controls the rate at which servers are polled for health, system stats, and container status.
|
||||
## This affects network usage, and the size of the stats stored in mongo.
|
||||
## Env: KOMODO_MONITORING_INTERVAL
|
||||
## Options: 1-sec, 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min
|
||||
## Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
## Default: 15-sec
|
||||
monitoring_interval = "15-sec"
|
||||
|
||||
## Interval at which to poll Resources for any updates / automated actions.
|
||||
## Env: KOMODO_RESOURCE_POLL_INTERVAL
|
||||
## Options: `15-min`, `1-hr`, `2-hr`, `6-hr`, `12-hr`, `1-day`
|
||||
## Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
## Default: 1-hr
|
||||
resource_poll_interval = "1-hr"
|
||||
|
||||
|
||||
@@ -60,14 +60,19 @@ disable_terminals = false
|
||||
## Default: false
|
||||
disable_container_exec = false
|
||||
|
||||
## How often Periphery polls the host for system stats,
|
||||
## like CPU / memory usage. To effectively disable polling,
|
||||
## set this to something like 1-hr.
|
||||
## How often Periphery polls the host for system stats, like CPU / memory usage.
|
||||
## To effectively disable polling, set this to something like 1-hr.
|
||||
## Env: PERIPHERY_STATS_POLLING_RATE
|
||||
## Options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min, 5-min, 30-min, 1-hr
|
||||
## Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
## Default: 5-sec
|
||||
stats_polling_rate = "5-sec"
|
||||
|
||||
## How often Periphery polls the host for container stats,
|
||||
## Env: PERIPHERY_STATS_POLLING_RATE
|
||||
## Options: https://docs.rs/komodo_client/latest/komodo_client/entities/enum.Timelength.html
|
||||
## Default: 5-sec
|
||||
container_stats_polling_rate = "1-min"
|
||||
|
||||
## Whether stack actions should use `docker-compose ...`
|
||||
## instead of `docker compose ...`.
|
||||
## Env: PERIPHERY_LEGACY_COMPOSE_CLI
|
||||
|
||||
4
frontend/public/client/responses.d.ts
vendored
4
frontend/public/client/responses.d.ts
vendored
@@ -152,8 +152,9 @@ export type WriteResponses = {
|
||||
UpdateUserBasePermissions: Types.UpdateUserBasePermissionsResponse;
|
||||
UpdatePermissionOnResourceType: Types.UpdatePermissionOnResourceTypeResponse;
|
||||
UpdatePermissionOnTarget: Types.UpdatePermissionOnTargetResponse;
|
||||
UpdateDescription: Types.UpdateDescriptionResponse;
|
||||
UpdateResourceMeta: Types.UpdateResourceMetaResponse;
|
||||
CreateServer: Types.Server;
|
||||
CopyServer: Types.Server;
|
||||
DeleteServer: Types.Server;
|
||||
UpdateServer: Types.Server;
|
||||
RenameServer: Types.Update;
|
||||
@@ -227,7 +228,6 @@ export type WriteResponses = {
|
||||
DeleteTag: Types.Tag;
|
||||
RenameTag: Types.Tag;
|
||||
UpdateTagColor: Types.Tag;
|
||||
UpdateTagsOnResource: Types.UpdateTagsOnResourceResponse;
|
||||
CreateVariable: Types.CreateVariableResponse;
|
||||
UpdateVariableValue: Types.UpdateVariableValueResponse;
|
||||
UpdateVariableDescription: Types.UpdateVariableDescriptionResponse;
|
||||
|
||||
469
frontend/public/client/types.d.ts
vendored
469
frontend/public/client/types.d.ts
vendored
@@ -2,7 +2,6 @@ export interface MongoIdObj {
|
||||
$oid: string;
|
||||
}
|
||||
export type MongoId = MongoIdObj;
|
||||
export type I64 = number;
|
||||
/** The levels of permission that a User or UserGroup can have on a resource. */
|
||||
export declare enum PermissionLevel {
|
||||
/** No permissions. */
|
||||
@@ -18,6 +17,7 @@ export interface PermissionLevelAndSpecifics {
|
||||
level: PermissionLevel;
|
||||
specific: Array<SpecificPermission>;
|
||||
}
|
||||
export type I64 = number;
|
||||
export interface Resource<Config, Info> {
|
||||
/**
|
||||
* The Mongo ID of the resource.
|
||||
@@ -32,8 +32,8 @@ export interface Resource<Config, Info> {
|
||||
name: string;
|
||||
/** A description for the resource */
|
||||
description?: string;
|
||||
/** When description last updated */
|
||||
updated_at?: I64;
|
||||
/** Mark resource as a template */
|
||||
template?: boolean;
|
||||
/** Tag Ids */
|
||||
tags?: string[];
|
||||
/** Resource-specific information (not user configurable). */
|
||||
@@ -45,6 +45,8 @@ export interface Resource<Config, Info> {
|
||||
* resource.
|
||||
*/
|
||||
base_permission?: PermissionLevelAndSpecifics | PermissionLevel;
|
||||
/** When description last updated */
|
||||
updated_at?: I64;
|
||||
}
|
||||
export declare enum ScheduleFormat {
|
||||
English = "English",
|
||||
@@ -115,6 +117,8 @@ export interface ResourceListItem<Info> {
|
||||
type: ResourceTarget["type"];
|
||||
/** The resource name */
|
||||
name: string;
|
||||
/** Whether resource is a template */
|
||||
template: boolean;
|
||||
/** Tag Ids */
|
||||
tags: string[];
|
||||
/** Resource specific info */
|
||||
@@ -147,7 +151,15 @@ export interface ActionListItemInfo {
|
||||
schedule_error?: string;
|
||||
}
|
||||
export type ActionListItem = ResourceListItem<ActionListItemInfo>;
|
||||
export declare enum TagBehavior {
|
||||
export declare enum TemplatesQueryBehavior {
|
||||
/** Include templates in results. Default. */
|
||||
Include = "Include",
|
||||
/** Exclude templates from results. */
|
||||
Exclude = "Exclude",
|
||||
/** Results *only* includes templates. */
|
||||
Only = "Only"
|
||||
}
|
||||
export declare enum TagQueryBehavior {
|
||||
/** Returns resources which have strictly all the tags */
|
||||
All = "All",
|
||||
/** Returns resources which have one or more of the tags */
|
||||
@@ -156,10 +168,11 @@ export declare enum TagBehavior {
|
||||
/** Passing empty Vec is the same as not filtering by that field */
|
||||
export interface ResourceQuery<T> {
|
||||
names?: string[];
|
||||
templates?: TemplatesQueryBehavior;
|
||||
/** Pass Vec of tag ids or tag names */
|
||||
tags?: string[];
|
||||
/** 'All' or 'Any' */
|
||||
tag_behavior?: TagBehavior;
|
||||
tag_behavior?: TagQueryBehavior;
|
||||
specific?: T;
|
||||
}
|
||||
export interface ActionQuerySpecifics {
|
||||
@@ -2366,26 +2379,47 @@ export interface SingleDiskUsage {
|
||||
total_gb: number;
|
||||
}
|
||||
export declare enum Timelength {
|
||||
/** `1-sec` */
|
||||
OneSecond = "1-sec",
|
||||
/** `5-sec` */
|
||||
FiveSeconds = "5-sec",
|
||||
/** `10-sec` */
|
||||
TenSeconds = "10-sec",
|
||||
/** `15-sec` */
|
||||
FifteenSeconds = "15-sec",
|
||||
/** `30-sec` */
|
||||
ThirtySeconds = "30-sec",
|
||||
/** `1-min` */
|
||||
OneMinute = "1-min",
|
||||
/** `2-min` */
|
||||
TwoMinutes = "2-min",
|
||||
/** `5-min` */
|
||||
FiveMinutes = "5-min",
|
||||
/** `10-min` */
|
||||
TenMinutes = "10-min",
|
||||
/** `15-min` */
|
||||
FifteenMinutes = "15-min",
|
||||
/** `30-min` */
|
||||
ThirtyMinutes = "30-min",
|
||||
/** `1-hr` */
|
||||
OneHour = "1-hr",
|
||||
/** `2-hr` */
|
||||
TwoHours = "2-hr",
|
||||
/** `6-hr` */
|
||||
SixHours = "6-hr",
|
||||
/** `8-hr` */
|
||||
EightHours = "8-hr",
|
||||
/** `12-hr` */
|
||||
TwelveHours = "12-hr",
|
||||
/** `1-day` */
|
||||
OneDay = "1-day",
|
||||
/** `3-day` */
|
||||
ThreeDay = "3-day",
|
||||
/** `1-wk` */
|
||||
OneWeek = "1-wk",
|
||||
/** `2-wk` */
|
||||
TwoWeeks = "2-wk",
|
||||
/** `30-day` */
|
||||
ThirtyDays = "30-day"
|
||||
}
|
||||
/** Realtime system stats data. */
|
||||
@@ -3259,6 +3293,23 @@ export type InspectStackContainerResponse = Container;
|
||||
export type JsonValue = any;
|
||||
export type ListActionsResponse = ActionListItem[];
|
||||
export type ListAlertersResponse = AlerterListItem[];
|
||||
export declare enum PortTypeEnum {
|
||||
EMPTY = "",
|
||||
TCP = "tcp",
|
||||
UDP = "udp",
|
||||
SCTP = "sctp"
|
||||
}
|
||||
/** An open port on a container */
|
||||
export interface Port {
|
||||
/** Host IP address that the container's port is mapped to */
|
||||
IP?: string;
|
||||
/** Port on the container */
|
||||
PrivatePort?: number;
|
||||
/** Port exposed on the host */
|
||||
PublicPort?: number;
|
||||
Type?: PortTypeEnum;
|
||||
}
|
||||
/** Container summary returned by container list apis. */
|
||||
export interface ContainerListItem {
|
||||
/** The Server which holds the container. */
|
||||
server_id?: string;
|
||||
@@ -3284,8 +3335,12 @@ export interface ContainerListItem {
|
||||
network_mode?: string;
|
||||
/** The network names attached to container */
|
||||
networks: string[];
|
||||
/** Port mappings for the container */
|
||||
ports: Port[];
|
||||
/** The volume names attached to container */
|
||||
volumes: string[];
|
||||
/** The container stats, if they can be retreived. */
|
||||
stats?: ContainerStats;
|
||||
/**
|
||||
* The labels attached to container.
|
||||
* It's too big to send with container list,
|
||||
@@ -3797,14 +3852,13 @@ export interface StackQuerySpecifics {
|
||||
update_available?: boolean;
|
||||
}
|
||||
export type StackQuery = ResourceQuery<StackQuerySpecifics>;
|
||||
export type UpdateDescriptionResponse = NoData;
|
||||
export type UpdateDockerRegistryAccountResponse = DockerRegistryAccount;
|
||||
export type UpdateGitProviderAccountResponse = GitProviderAccount;
|
||||
export type UpdatePermissionOnResourceTypeResponse = NoData;
|
||||
export type UpdatePermissionOnTargetResponse = NoData;
|
||||
export type UpdateProcedureResponse = Procedure;
|
||||
export type UpdateResourceMetaResponse = NoData;
|
||||
export type UpdateServiceUserDescriptionResponse = User;
|
||||
export type UpdateTagsOnResourceResponse = NoData;
|
||||
export type UpdateUserAdminResponse = NoData;
|
||||
export type UpdateUserBasePermissionsResponse = NoData;
|
||||
export type UpdateUserPasswordResponse = NoData;
|
||||
@@ -4124,30 +4178,6 @@ export interface CancelRepoBuild {
|
||||
/** Can be id or name */
|
||||
repo: string;
|
||||
}
|
||||
export interface CloneArgs {
|
||||
/** Resource name (eg Build name, Repo name) */
|
||||
name: string;
|
||||
/** Git provider domain. Default: `github.com` */
|
||||
provider: string;
|
||||
/** Use https (vs http). */
|
||||
https: boolean;
|
||||
/** Full repo identifier. {namespace}/{repo_name} */
|
||||
repo?: string;
|
||||
/** Git Branch. Default: `main` */
|
||||
branch: string;
|
||||
/** Specific commit hash. Optional */
|
||||
commit?: string;
|
||||
/** Use PERIPHERY_BUILD_DIR as the parent folder for the clone. */
|
||||
is_build: boolean;
|
||||
/** The clone destination path */
|
||||
destination?: string;
|
||||
/** Command to run after the repo has been cloned */
|
||||
on_clone?: SystemCommand;
|
||||
/** Command to run after the repo has been pulled */
|
||||
on_pull?: SystemCommand;
|
||||
/** Configure the account used to access repo (if private) */
|
||||
account?: string;
|
||||
}
|
||||
/**
|
||||
* Clones the target repo. Response: [Update].
|
||||
*
|
||||
@@ -4223,6 +4253,205 @@ export interface ConnectTerminalQuery {
|
||||
*/
|
||||
terminal: string;
|
||||
}
|
||||
/** Blkio stats entry. This type is Linux-specific and omitted for Windows containers. */
|
||||
export interface ContainerBlkioStatEntry {
|
||||
major?: U64;
|
||||
minor?: U64;
|
||||
op?: string;
|
||||
value?: U64;
|
||||
}
|
||||
/**
|
||||
* BlkioStats stores all IO service stats for data read and write.
|
||||
* This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
* On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
* This type is only populated on Linux and omitted for Windows containers.
|
||||
*/
|
||||
export interface ContainerBlkioStats {
|
||||
io_service_bytes_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_serviced_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_queue_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_service_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_wait_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_merged_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
io_time_recursive?: ContainerBlkioStatEntry[];
|
||||
/**
|
||||
* This field is only available when using Linux containers with cgroups v1.
|
||||
* It is omitted or `null` when using cgroups v2.
|
||||
*/
|
||||
sectors_recursive?: ContainerBlkioStatEntry[];
|
||||
}
|
||||
/** All CPU stats aggregated since container inception. */
|
||||
export interface ContainerCpuUsage {
|
||||
/** Total CPU time consumed in nanoseconds (Linux) or 100's of nanoseconds (Windows). */
|
||||
total_usage?: U64;
|
||||
/**
|
||||
* Total CPU time (in nanoseconds) consumed per core (Linux).
|
||||
* This field is Linux-specific when using cgroups v1.
|
||||
* It is omitted when using cgroups v2 and Windows containers.
|
||||
*/
|
||||
percpu_usage?: U64[];
|
||||
/**
|
||||
* Time (in nanoseconds) spent by tasks of the cgroup in kernel mode (Linux),
|
||||
* or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
* Not populated for Windows containers using Hyper-V isolation.
|
||||
*/
|
||||
usage_in_kernelmode?: U64;
|
||||
/**
|
||||
* Time (in nanoseconds) spent by tasks of the cgroup in user mode (Linux),
|
||||
* or time spent (in 100's of nanoseconds) by all container processes in kernel mode (Windows).
|
||||
* Not populated for Windows containers using Hyper-V isolation.
|
||||
*/
|
||||
usage_in_usermode?: U64;
|
||||
}
|
||||
/**
|
||||
* CPU throttling stats of the container.
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
export interface ContainerThrottlingData {
|
||||
/** Number of periods with throttling active. */
|
||||
periods?: U64;
|
||||
/** Number of periods when the container hit its throttling limit. */
|
||||
throttled_periods?: U64;
|
||||
/** Aggregated time (in nanoseconds) the container was throttled for. */
|
||||
throttled_time?: U64;
|
||||
}
|
||||
/** CPU related info of the container */
|
||||
export interface ContainerCpuStats {
|
||||
/** All CPU stats aggregated since container inception. */
|
||||
cpu_usage?: ContainerCpuUsage;
|
||||
/**
|
||||
* System Usage.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
system_cpu_usage?: U64;
|
||||
/**
|
||||
* Number of online CPUs.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
online_cpus?: number;
|
||||
/**
|
||||
* CPU throttling stats of the container.
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
throttling_data?: ContainerThrottlingData;
|
||||
}
|
||||
/**
|
||||
* Aggregates all memory stats since container inception on Linux.
|
||||
* Windows returns stats for commit and private working set only.
|
||||
*/
|
||||
export interface ContainerMemoryStats {
|
||||
/**
|
||||
* Current `res_counter` usage for memory.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
usage?: U64;
|
||||
/**
|
||||
* Maximum usage ever recorded.
|
||||
* This field is Linux-specific and only supported on cgroups v1.
|
||||
* It is omitted when using cgroups v2 and for Windows containers.
|
||||
*/
|
||||
max_usage?: U64;
|
||||
/**
|
||||
* All the stats exported via memory.stat. when using cgroups v2.
|
||||
* This field is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
stats?: Record<string, U64>;
|
||||
/** Number of times memory usage hits limits. This field is Linux-specific and only supported on cgroups v1. It is omitted when using cgroups v2 and for Windows containers. */
|
||||
failcnt?: U64;
|
||||
/** This field is Linux-specific and omitted for Windows containers. */
|
||||
limit?: U64;
|
||||
/**
|
||||
* Committed bytes.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
commitbytes?: U64;
|
||||
/**
|
||||
* Peak committed bytes.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
commitpeakbytes?: U64;
|
||||
/**
|
||||
* Private working set.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
privateworkingset?: U64;
|
||||
}
|
||||
/** Aggregates the network stats of one container */
|
||||
export interface ContainerNetworkStats {
|
||||
/** Bytes received. Windows and Linux. */
|
||||
rx_bytes?: U64;
|
||||
/** Packets received. Windows and Linux. */
|
||||
rx_packets?: U64;
|
||||
/**
|
||||
* Received errors. Not used on Windows.
|
||||
* This field is Linux-specific and always zero for Windows containers.
|
||||
*/
|
||||
rx_errors?: U64;
|
||||
/** Incoming packets dropped. Windows and Linux. */
|
||||
rx_dropped?: U64;
|
||||
/** Bytes sent. Windows and Linux. */
|
||||
tx_bytes?: U64;
|
||||
/** Packets sent. Windows and Linux. */
|
||||
tx_packets?: U64;
|
||||
/**
|
||||
* Sent errors. Not used on Windows.
|
||||
* This field is Linux-specific and always zero for Windows containers.
|
||||
*/
|
||||
tx_errors?: U64;
|
||||
/** Outgoing packets dropped. Windows and Linux. */
|
||||
tx_dropped?: U64;
|
||||
/**
|
||||
* Endpoint ID. Not used on Linux.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
endpoint_id?: string;
|
||||
/**
|
||||
* Instance ID. Not used on Linux.
|
||||
* This field is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
instance_id?: string;
|
||||
}
|
||||
/** PidsStats contains Linux-specific stats of a container's process-IDs (PIDs). This type is Linux-specific and omitted for Windows containers. */
|
||||
export interface ContainerPidsStats {
|
||||
/** Current is the number of PIDs in the cgroup. */
|
||||
current?: U64;
|
||||
/** Limit is the hard limit on the number of pids in the cgroup. A \"Limit\" of 0 means that there is no limit. */
|
||||
limit?: U64;
|
||||
}
|
||||
/**
|
||||
* StorageStats is the disk I/O stats for read/write on Windows.
|
||||
* This type is Windows-specific and omitted for Linux containers.
|
||||
*/
|
||||
export interface ContainerStorageStats {
|
||||
read_count_normalized?: U64;
|
||||
read_size_bytes?: U64;
|
||||
write_count_normalized?: U64;
|
||||
write_size_bytes?: U64;
|
||||
}
|
||||
export interface Conversion {
|
||||
/** reference on the server. */
|
||||
local: string;
|
||||
@@ -4309,6 +4538,16 @@ export interface CopyResourceSync {
|
||||
/** The id of the sync to copy. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Creates a new server with given `name` and the configuration
|
||||
* of the server at the given `id`. Response: [Server].
|
||||
*/
|
||||
export interface CopyServer {
|
||||
/** The name of the new server. */
|
||||
name: string;
|
||||
/** The id of the server to copy. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Creates a new stack with given `name` and the configuration
|
||||
* of the stack at the given `id`. Response: [Stack].
|
||||
@@ -5073,6 +5312,49 @@ export interface FindUser {
|
||||
/** Id or username */
|
||||
user: string;
|
||||
}
|
||||
/** Statistics sample for a container. */
|
||||
export interface FullContainerStats {
|
||||
/** Name of the container */
|
||||
name: string;
|
||||
/** ID of the container */
|
||||
id?: string;
|
||||
/**
|
||||
* Date and time at which this sample was collected.
|
||||
* The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
*/
|
||||
read?: string;
|
||||
/**
|
||||
* Date and time at which this first sample was collected.
|
||||
* This field is not propagated if the \"one-shot\" option is set.
|
||||
* If the \"one-shot\" option is set, this field may be omitted, empty,
|
||||
* or set to a default date (`0001-01-01T00:00:00Z`).
|
||||
* The value is formatted as [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) with nano-seconds.
|
||||
*/
|
||||
preread?: string;
|
||||
/**
|
||||
* PidsStats contains Linux-specific stats of a container's process-IDs (PIDs).
|
||||
* This type is Linux-specific and omitted for Windows containers.
|
||||
*/
|
||||
pids_stats?: ContainerPidsStats;
|
||||
/**
|
||||
* BlkioStats stores all IO service stats for data read and write.
|
||||
* This type is Linux-specific and holds many fields that are specific to cgroups v1.
|
||||
* On a cgroup v2 host, all fields other than `io_service_bytes_recursive` are omitted or `null`.
|
||||
* This type is only populated on Linux and omitted for Windows containers.
|
||||
*/
|
||||
blkio_stats?: ContainerBlkioStats;
|
||||
/**
|
||||
* The number of processors on the system.
|
||||
* This field is Windows-specific and always zero for Linux containers.
|
||||
*/
|
||||
num_procs?: number;
|
||||
storage_stats?: ContainerStorageStats;
|
||||
cpu_stats?: ContainerCpuStats;
|
||||
precpu_stats?: ContainerCpuStats;
|
||||
memory_stats?: ContainerMemoryStats;
|
||||
/** Network statistics for the container per interface. This field is omitted if the container has no networking enabled. */
|
||||
networks?: ContainerNetworkStats;
|
||||
}
|
||||
/** Get a specific action. Response: [Action]. */
|
||||
export interface GetAction {
|
||||
/** Id or name */
|
||||
@@ -6154,7 +6436,7 @@ export interface ListSchedules {
|
||||
/** Pass Vec of tag ids or tag names */
|
||||
tags?: string[];
|
||||
/** 'All' or 'Any' */
|
||||
tag_behavior?: TagBehavior;
|
||||
tag_behavior?: TagQueryBehavior;
|
||||
}
|
||||
/**
|
||||
* List the available secrets from the core config.
|
||||
@@ -6380,22 +6662,6 @@ export interface PermissionToml {
|
||||
/** Any [SpecificPermissions](SpecificPermission) on the resource */
|
||||
specific?: Array<SpecificPermission>;
|
||||
}
|
||||
export declare enum PortTypeEnum {
|
||||
EMPTY = "",
|
||||
TCP = "tcp",
|
||||
UDP = "udp",
|
||||
SCTP = "sctp"
|
||||
}
|
||||
/** An open port on a container */
|
||||
export interface Port {
|
||||
/** Host IP address that the container's port is mapped to */
|
||||
IP?: string;
|
||||
/** Port on the container */
|
||||
PrivatePort?: number;
|
||||
/** Port exposed on the host */
|
||||
PublicPort?: number;
|
||||
Type?: PortTypeEnum;
|
||||
}
|
||||
/**
|
||||
* Prunes the docker buildx cache on the target server. Response: [Update].
|
||||
*
|
||||
@@ -6644,11 +6910,63 @@ export interface RenameUserGroup {
|
||||
/** The new name for the UserGroup */
|
||||
name: string;
|
||||
}
|
||||
export declare enum DefaultRepoFolder {
|
||||
/** /${root_directory}/stacks */
|
||||
Stacks = "Stacks",
|
||||
/** /${root_directory}/builds */
|
||||
Builds = "Builds",
|
||||
/** /${root_directory}/repos */
|
||||
Repos = "Repos",
|
||||
/**
|
||||
* If the repo is only cloned
|
||||
* in the core repo cache (resource sync),
|
||||
* this isn't relevant.
|
||||
*/
|
||||
NotApplicable = "NotApplicable"
|
||||
}
|
||||
export interface RepoExecutionArgs {
|
||||
/** Resource name (eg Build name, Repo name) */
|
||||
name: string;
|
||||
/** Git provider domain. Default: `github.com` */
|
||||
provider: string;
|
||||
/** Use https (vs http). */
|
||||
https: boolean;
|
||||
/** Configure the account used to access repo (if private) */
|
||||
account?: string;
|
||||
/**
|
||||
* Full repo identifier. {namespace}/{repo_name}
|
||||
* Its optional to force checking and produce error if not defined.
|
||||
*/
|
||||
repo?: string;
|
||||
/** Git Branch. Default: `main` */
|
||||
branch: string;
|
||||
/** Specific commit hash. Optional */
|
||||
commit?: string;
|
||||
/** The clone destination path */
|
||||
destination?: string;
|
||||
/**
|
||||
* The default folder to use.
|
||||
* Depends on the resource type.
|
||||
*/
|
||||
default_folder: DefaultRepoFolder;
|
||||
}
|
||||
export interface RepoExecutionResponse {
|
||||
/** Response logs */
|
||||
logs: Log[];
|
||||
/** Absolute path to the repo root on the host. */
|
||||
path: string;
|
||||
/** Latest short commit hash, if it could be retrieved */
|
||||
commit_hash?: string;
|
||||
/** Latest commit message, if it could be retrieved */
|
||||
commit_message?: string;
|
||||
}
|
||||
export interface ResourceToml<PartialConfig> {
|
||||
/** The resource name. Required */
|
||||
name: string;
|
||||
/** The resource description. Optional. */
|
||||
description?: string;
|
||||
/** Mark resource as a template */
|
||||
template?: boolean;
|
||||
/** Tag ids or names. Optional */
|
||||
tags?: string[];
|
||||
/**
|
||||
@@ -7130,16 +7448,6 @@ export interface UpdateDeployment {
|
||||
/** The partial config update. */
|
||||
config: _PartialDeploymentConfig;
|
||||
}
|
||||
/**
|
||||
* Update a resources description.
|
||||
* Response: [NoData].
|
||||
*/
|
||||
export interface UpdateDescription {
|
||||
/** The target resource to set description for. */
|
||||
target: ResourceTarget;
|
||||
/** The new description. */
|
||||
description: string;
|
||||
}
|
||||
/**
|
||||
* **Admin only.** Update a docker registry account.
|
||||
* Response: [DockerRegistryAccount].
|
||||
@@ -7219,6 +7527,32 @@ export interface UpdateRepo {
|
||||
/** The partial config update to apply. */
|
||||
config: _PartialRepoConfig;
|
||||
}
|
||||
/**
|
||||
* Update a resources common meta fields.
|
||||
* - description
|
||||
* - template
|
||||
* - tags
|
||||
* Response: [NoData].
|
||||
*/
|
||||
export interface UpdateResourceMeta {
|
||||
/** The target resource to set update meta. */
|
||||
target: ResourceTarget;
|
||||
/**
|
||||
* New description to set,
|
||||
* or null for no update
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* New template value (true or false),
|
||||
* or null for no update
|
||||
*/
|
||||
template?: boolean;
|
||||
/**
|
||||
* The exact tags to set,
|
||||
* or null for no update
|
||||
*/
|
||||
tags?: string[];
|
||||
}
|
||||
/**
|
||||
* Update the sync at the given id, and return the updated sync.
|
||||
* Response: [ResourceSync].
|
||||
@@ -7287,15 +7621,6 @@ export interface UpdateTagColor {
|
||||
/** The new color for the tag. */
|
||||
color: TagColor;
|
||||
}
|
||||
/**
|
||||
* Update the tags on a resource.
|
||||
* Response: [NoData]
|
||||
*/
|
||||
export interface UpdateTagsOnResource {
|
||||
target: ResourceTarget;
|
||||
/** Tag Ids */
|
||||
tags: string[];
|
||||
}
|
||||
/**
|
||||
* **Super Admin only.** Update's whether a user is admin.
|
||||
* Response: [NoData].
|
||||
@@ -8163,11 +8488,14 @@ export type WriteRequest = {
|
||||
type: "UpdatePermissionOnTarget";
|
||||
params: UpdatePermissionOnTarget;
|
||||
} | {
|
||||
type: "UpdateDescription";
|
||||
params: UpdateDescription;
|
||||
type: "UpdateResourceMeta";
|
||||
params: UpdateResourceMeta;
|
||||
} | {
|
||||
type: "CreateServer";
|
||||
params: CreateServer;
|
||||
} | {
|
||||
type: "CopyServer";
|
||||
params: CopyServer;
|
||||
} | {
|
||||
type: "DeleteServer";
|
||||
params: DeleteServer;
|
||||
@@ -8387,9 +8715,6 @@ export type WriteRequest = {
|
||||
} | {
|
||||
type: "UpdateTagColor";
|
||||
params: UpdateTagColor;
|
||||
} | {
|
||||
type: "UpdateTagsOnResource";
|
||||
params: UpdateTagsOnResource;
|
||||
} | {
|
||||
type: "CreateVariable";
|
||||
params: CreateVariable;
|
||||
|
||||
@@ -29,13 +29,22 @@ export var ActionState;
|
||||
/** Currently running */
|
||||
ActionState["Running"] = "Running";
|
||||
})(ActionState || (ActionState = {}));
|
||||
export var TagBehavior;
|
||||
(function (TagBehavior) {
|
||||
export var TemplatesQueryBehavior;
|
||||
(function (TemplatesQueryBehavior) {
|
||||
/** Include templates in results. Default. */
|
||||
TemplatesQueryBehavior["Include"] = "Include";
|
||||
/** Exclude templates from results. */
|
||||
TemplatesQueryBehavior["Exclude"] = "Exclude";
|
||||
/** Results *only* includes templates. */
|
||||
TemplatesQueryBehavior["Only"] = "Only";
|
||||
})(TemplatesQueryBehavior || (TemplatesQueryBehavior = {}));
|
||||
export var TagQueryBehavior;
|
||||
(function (TagQueryBehavior) {
|
||||
/** Returns resources which have strictly all the tags */
|
||||
TagBehavior["All"] = "All";
|
||||
TagQueryBehavior["All"] = "All";
|
||||
/** Returns resources which have one or more of the tags */
|
||||
TagBehavior["Any"] = "Any";
|
||||
})(TagBehavior || (TagBehavior = {}));
|
||||
TagQueryBehavior["Any"] = "Any";
|
||||
})(TagQueryBehavior || (TagQueryBehavior = {}));
|
||||
/** Types of maintenance schedules */
|
||||
export var MaintenanceScheduleType;
|
||||
(function (MaintenanceScheduleType) {
|
||||
@@ -238,26 +247,47 @@ export var SeverityLevel;
|
||||
})(SeverityLevel || (SeverityLevel = {}));
|
||||
export var Timelength;
|
||||
(function (Timelength) {
|
||||
/** `1-sec` */
|
||||
Timelength["OneSecond"] = "1-sec";
|
||||
/** `5-sec` */
|
||||
Timelength["FiveSeconds"] = "5-sec";
|
||||
/** `10-sec` */
|
||||
Timelength["TenSeconds"] = "10-sec";
|
||||
/** `15-sec` */
|
||||
Timelength["FifteenSeconds"] = "15-sec";
|
||||
/** `30-sec` */
|
||||
Timelength["ThirtySeconds"] = "30-sec";
|
||||
/** `1-min` */
|
||||
Timelength["OneMinute"] = "1-min";
|
||||
/** `2-min` */
|
||||
Timelength["TwoMinutes"] = "2-min";
|
||||
/** `5-min` */
|
||||
Timelength["FiveMinutes"] = "5-min";
|
||||
/** `10-min` */
|
||||
Timelength["TenMinutes"] = "10-min";
|
||||
/** `15-min` */
|
||||
Timelength["FifteenMinutes"] = "15-min";
|
||||
/** `30-min` */
|
||||
Timelength["ThirtyMinutes"] = "30-min";
|
||||
/** `1-hr` */
|
||||
Timelength["OneHour"] = "1-hr";
|
||||
/** `2-hr` */
|
||||
Timelength["TwoHours"] = "2-hr";
|
||||
/** `6-hr` */
|
||||
Timelength["SixHours"] = "6-hr";
|
||||
/** `8-hr` */
|
||||
Timelength["EightHours"] = "8-hr";
|
||||
/** `12-hr` */
|
||||
Timelength["TwelveHours"] = "12-hr";
|
||||
/** `1-day` */
|
||||
Timelength["OneDay"] = "1-day";
|
||||
/** `3-day` */
|
||||
Timelength["ThreeDay"] = "3-day";
|
||||
/** `1-wk` */
|
||||
Timelength["OneWeek"] = "1-wk";
|
||||
/** `2-wk` */
|
||||
Timelength["TwoWeeks"] = "2-wk";
|
||||
/** `30-day` */
|
||||
Timelength["ThirtyDays"] = "30-day";
|
||||
})(Timelength || (Timelength = {}));
|
||||
export var TagColor;
|
||||
@@ -412,6 +442,13 @@ export var ClusterVolumePublishStatusStateEnum;
|
||||
ClusterVolumePublishStatusStateEnum["PendingNodeUnpublish"] = "pending-node-unpublish";
|
||||
ClusterVolumePublishStatusStateEnum["PendingControllerUnpublish"] = "pending-controller-unpublish";
|
||||
})(ClusterVolumePublishStatusStateEnum || (ClusterVolumePublishStatusStateEnum = {}));
|
||||
export var PortTypeEnum;
|
||||
(function (PortTypeEnum) {
|
||||
PortTypeEnum["EMPTY"] = "";
|
||||
PortTypeEnum["TCP"] = "tcp";
|
||||
PortTypeEnum["UDP"] = "udp";
|
||||
PortTypeEnum["SCTP"] = "sctp";
|
||||
})(PortTypeEnum || (PortTypeEnum = {}));
|
||||
export var ProcedureState;
|
||||
(function (ProcedureState) {
|
||||
/** Last run successful */
|
||||
@@ -517,13 +554,21 @@ export var TerminalRecreateMode;
|
||||
/** Only kill and recreate if the command is different. */
|
||||
TerminalRecreateMode["DifferentCommand"] = "DifferentCommand";
|
||||
})(TerminalRecreateMode || (TerminalRecreateMode = {}));
|
||||
export var PortTypeEnum;
|
||||
(function (PortTypeEnum) {
|
||||
PortTypeEnum["EMPTY"] = "";
|
||||
PortTypeEnum["TCP"] = "tcp";
|
||||
PortTypeEnum["UDP"] = "udp";
|
||||
PortTypeEnum["SCTP"] = "sctp";
|
||||
})(PortTypeEnum || (PortTypeEnum = {}));
|
||||
export var DefaultRepoFolder;
|
||||
(function (DefaultRepoFolder) {
|
||||
/** /${root_directory}/stacks */
|
||||
DefaultRepoFolder["Stacks"] = "Stacks";
|
||||
/** /${root_directory}/builds */
|
||||
DefaultRepoFolder["Builds"] = "Builds";
|
||||
/** /${root_directory}/repos */
|
||||
DefaultRepoFolder["Repos"] = "Repos";
|
||||
/**
|
||||
* If the repo is only cloned
|
||||
* in the core repo cache (resource sync),
|
||||
* this isn't relevant.
|
||||
*/
|
||||
DefaultRepoFolder["NotApplicable"] = "NotApplicable";
|
||||
})(DefaultRepoFolder || (DefaultRepoFolder = {}));
|
||||
export var SearchCombinator;
|
||||
(function (SearchCombinator) {
|
||||
SearchCombinator["Or"] = "Or";
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
useWebhookIdOrName,
|
||||
WebhookIntegration,
|
||||
useWebhookIntegrations,
|
||||
useSettingsView,
|
||||
} from "@lib/hooks";
|
||||
import { Types } from "komodo_client";
|
||||
import {
|
||||
@@ -59,7 +60,6 @@ import {
|
||||
MonacoEditor,
|
||||
MonacoLanguage,
|
||||
} from "@components/monaco";
|
||||
import { useSettingsView } from "@pages/settings";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
|
||||
export const ConfigItem = ({
|
||||
|
||||
@@ -18,7 +18,7 @@ import { ResourceTags } from "./tags";
|
||||
import { Topbar } from "./topbar";
|
||||
import { cn, usableResourcePath } from "@lib/utils";
|
||||
import { Sidebar } from "./sidebar";
|
||||
import { ResourceName } from "./resources/common";
|
||||
import { ResourceNameSimple } from "./resources/common";
|
||||
import { useShiftKeyListener } from "@lib/hooks";
|
||||
|
||||
export const Layout = () => {
|
||||
@@ -238,7 +238,7 @@ export const NewLayout = ({
|
||||
<DialogTitle>New {entityType}</DialogTitle>
|
||||
<DialogDescription>Enter a unique name for the new {entityType}.</DialogDescription>
|
||||
</DialogHeader>
|
||||
<div className="flex flex-col gap-4 py-8">{children}</div>
|
||||
<div className="flex flex-col gap-6 py-8">{children}</div>
|
||||
<DialogFooter>
|
||||
<Button
|
||||
variant="secondary"
|
||||
@@ -274,7 +274,7 @@ export const ResourceCard = ({
|
||||
<CardHeader className="flex-row justify-between">
|
||||
<div>
|
||||
<CardTitle>
|
||||
<ResourceName type={type} id={id} />
|
||||
<ResourceNameSimple type={type} id={id} />
|
||||
</CardTitle>
|
||||
{/* <CardDescription>
|
||||
<Components.Description id={id} />
|
||||
@@ -310,7 +310,7 @@ export const ResourceRow = ({
|
||||
<Card className="h-full hover:bg-accent/50 group-focus:bg-accent/50 transition-colors">
|
||||
<CardHeader className="grid grid-cols-4 items-center">
|
||||
<CardTitle>
|
||||
<ResourceName type={type} id={id} />
|
||||
<ResourceNameSimple type={type} id={id} />
|
||||
</CardTitle>
|
||||
{Object.entries(Components.Info).map(([key, Info]) => (
|
||||
<Info key={key} id={id} />
|
||||
|
||||
@@ -16,7 +16,8 @@ import { cn, RESOURCE_TARGETS, usableResourcePath } from "@lib/utils";
|
||||
import { Badge } from "@ui/badge";
|
||||
import { ResourceComponents } from "./resources";
|
||||
import { Switch } from "@ui/switch";
|
||||
import { DOCKER_LINK_ICONS } from "./util";
|
||||
import { DOCKER_LINK_ICONS, TemplateMarker } from "./util";
|
||||
import { UsableResource } from "@types";
|
||||
|
||||
export const OmniSearch = ({
|
||||
className,
|
||||
@@ -50,8 +51,10 @@ export const OmniSearch = ({
|
||||
|
||||
type OmniItem = {
|
||||
key: string;
|
||||
type: UsableResource;
|
||||
label: string;
|
||||
icon: ReactNode;
|
||||
template: boolean;
|
||||
onSelect: () => void;
|
||||
};
|
||||
|
||||
@@ -93,7 +96,7 @@ export const OmniDialog = ({
|
||||
<Fragment key={key}>
|
||||
{i !== 0 && <CommandSeparator />}
|
||||
<CommandGroup heading={key ? key : undefined}>
|
||||
{items.map(({ key, label, icon, onSelect }) => (
|
||||
{items.map(({ key, type, label, icon, onSelect, template }) => (
|
||||
<CommandItem
|
||||
key={key}
|
||||
value={key}
|
||||
@@ -102,6 +105,7 @@ export const OmniDialog = ({
|
||||
>
|
||||
{icon}
|
||||
{label}
|
||||
{template && <TemplateMarker type={type} />}
|
||||
</CommandItem>
|
||||
))}
|
||||
</CommandGroup>
|
||||
@@ -131,31 +135,39 @@ const useOmniItems = (
|
||||
"": [
|
||||
{
|
||||
key: "Home",
|
||||
type: "Server" as UsableResource,
|
||||
label: "Home",
|
||||
icon: <Home className="w-4 h-4" />,
|
||||
onSelect: () => nav("/"),
|
||||
template: false,
|
||||
},
|
||||
...RESOURCE_TARGETS.map((_type) => {
|
||||
const type = _type === "ResourceSync" ? "Sync" : _type;
|
||||
const Components = ResourceComponents[_type];
|
||||
return {
|
||||
key: type + "s",
|
||||
type: _type,
|
||||
label: type + "s",
|
||||
icon: <Components.Icon />,
|
||||
onSelect: () => nav(usableResourcePath(_type)),
|
||||
template: false,
|
||||
};
|
||||
}),
|
||||
{
|
||||
key: "Containers",
|
||||
type: "Server" as UsableResource,
|
||||
label: "Containers",
|
||||
icon: <Box className="w-4 h-4" />,
|
||||
onSelect: () => nav("/containers"),
|
||||
template: false,
|
||||
},
|
||||
(user?.admin && {
|
||||
key: "Users",
|
||||
type: "Server" as UsableResource,
|
||||
label: "Users",
|
||||
icon: <User className="w-4 h-4" />,
|
||||
onSelect: () => nav("/users"),
|
||||
template: false,
|
||||
}) as OmniItem,
|
||||
]
|
||||
.filter((item) => item)
|
||||
@@ -174,8 +186,8 @@ const useOmniItems = (
|
||||
return [
|
||||
type + "s",
|
||||
resources[_type]
|
||||
?.filter((item) => {
|
||||
const lower_name = item.name.toLowerCase();
|
||||
?.filter((resource) => {
|
||||
const lower_name = resource.name.toLowerCase();
|
||||
return (
|
||||
searchTerms.length === 0 ||
|
||||
searchTerms.every(
|
||||
@@ -184,12 +196,14 @@ const useOmniItems = (
|
||||
)
|
||||
);
|
||||
})
|
||||
.map((server) => ({
|
||||
key: type + "-" + server.name,
|
||||
label: server.name,
|
||||
icon: <Components.Icon id={server.id} />,
|
||||
.map((resource) => ({
|
||||
key: type + "-" + resource.name,
|
||||
type: _type,
|
||||
label: resource.name,
|
||||
icon: <Components.Icon id={resource.id} />,
|
||||
onSelect: () =>
|
||||
nav(`/${usableResourcePath(_type)}/${server.id}`),
|
||||
nav(`/${usableResourcePath(_type)}/${resource.id}`),
|
||||
template: resource.template,
|
||||
})) || [],
|
||||
];
|
||||
})
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
import {
|
||||
ActionWithDialog,
|
||||
ResourcePageHeader,
|
||||
StatusBadge,
|
||||
} from "@components/util";
|
||||
import { ActionWithDialog, StatusBadge } from "@components/util";
|
||||
import { useExecute, useRead } from "@lib/hooks";
|
||||
import { RequiredResourceComponents } from "@types";
|
||||
import { Clapperboard, Clock } from "lucide-react";
|
||||
import { ActionConfig } from "./config";
|
||||
import { ActionTable } from "./table";
|
||||
import { DeleteResource, NewResource } from "../common";
|
||||
import { DeleteResource, NewResource, ResourcePageHeader } from "../common";
|
||||
import {
|
||||
action_state_intention,
|
||||
stroke_color_class_by_intention,
|
||||
@@ -152,14 +148,13 @@ export const ActionComponents: RequiredResourceComponents = {
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const action = useAction(id);
|
||||
|
||||
return (
|
||||
<ResourcePageHeader
|
||||
intent={action_state_intention(action?.info.state)}
|
||||
icon={<ActionIcon id={id} size={8} />}
|
||||
type="Action"
|
||||
id={id}
|
||||
name={action?.name}
|
||||
resource={action}
|
||||
state={action?.info.state}
|
||||
status={undefined}
|
||||
/>
|
||||
|
||||
@@ -4,10 +4,10 @@ import { AlarmClock, FlaskConical } from "lucide-react";
|
||||
import { Link } from "react-router-dom";
|
||||
import { Card, CardDescription, CardHeader, CardTitle } from "@ui/card";
|
||||
import { AlerterConfig } from "./config";
|
||||
import { DeleteResource, NewResource } from "../common";
|
||||
import { DeleteResource, NewResource, ResourcePageHeader } from "../common";
|
||||
import { AlerterTable } from "./table";
|
||||
import { Types } from "komodo_client";
|
||||
import { ConfirmButton, ResourcePageHeader } from "@components/util";
|
||||
import { ConfirmButton } from "@components/util";
|
||||
import { GroupActions } from "@components/group-actions";
|
||||
|
||||
const useAlerter = (id?: string) =>
|
||||
@@ -95,7 +95,7 @@ export const AlerterComponents: RequiredResourceComponents = {
|
||||
icon={<AlarmClock className="w-8" />}
|
||||
type="Alerter"
|
||||
id={id}
|
||||
name={alerter?.name}
|
||||
resource={alerter}
|
||||
state={alerter?.info.enabled ? "Enabled" : "Disabled"}
|
||||
status={alerter?.info.endpoint_type}
|
||||
/>
|
||||
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
DeleteResource,
|
||||
NewResource,
|
||||
ResourceLink,
|
||||
ResourcePageHeader,
|
||||
StandardSource,
|
||||
} from "../common";
|
||||
import { DeploymentTable } from "../deployment/table";
|
||||
@@ -28,7 +29,7 @@ import { Tabs, TabsContent, TabsList, TabsTrigger } from "@ui/tabs";
|
||||
import { ResourceComponents } from "..";
|
||||
import { Types } from "komodo_client";
|
||||
import { DashboardPieChart } from "@pages/home/dashboard";
|
||||
import { ResourcePageHeader, StatusBadge } from "@components/util";
|
||||
import { StatusBadge } from "@components/util";
|
||||
import { Card } from "@ui/card";
|
||||
import { Badge } from "@ui/badge";
|
||||
import { useToast } from "@ui/use-toast";
|
||||
@@ -285,14 +286,13 @@ export const BuildComponents: RequiredResourceComponents = {
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const build = useBuild(id);
|
||||
|
||||
return (
|
||||
<ResourcePageHeader
|
||||
intent={build_state_intention(build?.info.state)}
|
||||
icon={<BuildIcon id={id} size={8} />}
|
||||
type="Build"
|
||||
id={id}
|
||||
name={build?.name}
|
||||
resource={build}
|
||||
state={build?.info.state}
|
||||
status=""
|
||||
/>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user