Compare commits

..

14 Commits

Author SHA1 Message Date
mbecker20
84fb603951 1.10 2024-07-01 03:18:26 -07:00
mbecker20
55bac0dd13 check right thing for empty 2024-07-01 03:12:22 -07:00
mbecker20
b143f42363 update mungos 2024-07-01 02:47:06 -07:00
mbecker20
007efd136a 1.10.0 pre 2024-07-01 02:38:24 -07:00
mbecker20
b329767f9e 1.10.0-pre-0 2024-07-01 02:33:01 -07:00
mbecker20
b4231957d5 config for secret args 2024-07-01 02:31:53 -07:00
mbecker20
b4dc446f95 interpolate core variables / secrets into build secret_args 2024-07-01 02:27:03 -07:00
mbecker20
c92515cecc combine into router 2024-07-01 01:44:07 -07:00
mbecker20
f3712feea2 finish periphery clean 2024-07-01 01:39:03 -07:00
mbecker20
0e81d17860 shrink periphery implementation 2024-07-01 01:19:25 -07:00
mbecker20
c3f1557b83 fix mem alert 2024-06-30 00:27:37 -07:00
mbecker20
5f88e4b436 seperate webhook actions 2024-06-25 01:22:38 -07:00
mbecker20
473c6b3867 dont send failed build alert on build cancel 2024-06-24 16:59:34 -07:00
mbecker20
c10edaa5d1 fix builder toml export 2024-06-23 03:00:31 -07:00
65 changed files with 1571 additions and 1501 deletions

174
Cargo.lock generated
View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "alerter"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"axum 0.7.5",
@@ -241,9 +241,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.53.0"
version = "1.55.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db7385f189e99cae1fbadbfa7e0ed58cd89f655e3b3884b8386fecd9d988b4d5"
checksum = "e2fd9674e7f6fd95c7df47694c7826ea34d0c997529b62708df886de4bab6887"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -443,11 +443,11 @@ dependencies = [
"http-body 1.0.0",
"httparse",
"hyper 0.14.28",
"hyper-rustls",
"hyper-rustls 0.24.2",
"once_cell",
"pin-project-lite",
"pin-utils",
"rustls",
"rustls 0.21.12",
"tokio",
"tracing",
]
@@ -944,7 +944,7 @@ dependencies = [
[[package]]
name = "command"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"monitor_client",
"run_command",
@@ -1322,7 +1322,7 @@ dependencies = [
[[package]]
name = "formatting"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"serror",
]
@@ -1451,7 +1451,7 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "git"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"command",
@@ -1685,7 +1685,7 @@ dependencies = [
"httpdate",
"itoa",
"pin-project-lite",
"socket2 0.5.7",
"socket2",
"tokio",
"tower-service",
"tracing",
@@ -1738,10 +1738,27 @@ dependencies = [
"http 0.2.12",
"hyper 0.14.28",
"log",
"rustls",
"rustls 0.21.12",
"rustls-native-certs",
"tokio",
"tokio-rustls",
"tokio-rustls 0.24.1",
]
[[package]]
name = "hyper-rustls"
version = "0.27.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155"
dependencies = [
"futures-util",
"http 1.1.0",
"hyper 1.3.1",
"hyper-util",
"rustls 0.23.10",
"rustls-pki-types",
"tokio",
"tokio-rustls 0.26.0",
"tower-service",
]
[[package]]
@@ -1798,7 +1815,7 @@ dependencies = [
"http-body 1.0.0",
"hyper 1.3.1",
"pin-project-lite",
"socket2 0.5.7",
"socket2",
"tokio",
"tower",
"tower-service",
@@ -1907,7 +1924,7 @@ version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f"
dependencies = [
"socket2 0.5.7",
"socket2",
"widestring",
"windows-sys 0.48.0",
"winreg 0.50.0",
@@ -2006,7 +2023,7 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "logger"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"monitor_client",
@@ -2075,7 +2092,7 @@ dependencies = [
[[package]]
name = "migrator"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"chrono",
@@ -2127,9 +2144,9 @@ dependencies = [
[[package]]
name = "mongo_indexed"
version = "0.3.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e837961dc668ca0d6e298066f22c1c9242bdfe7cc473b18f5e3d960f27e6adb"
checksum = "202ab1775c25b55f035ae5a5e10c6505ddcb8319159bca055ebe3790542478e4"
dependencies = [
"anyhow",
"mongo_indexed_derive",
@@ -2139,9 +2156,9 @@ dependencies = [
[[package]]
name = "mongo_indexed_derive"
version = "0.3.0"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b1bfd94b65c8adb78d87177ccb5754204c175098be1057e7ad25282753dc4e8"
checksum = "177b97b43773866e7ed1aa023acdbe438d973ce181d64d9769c303c002b3eaf5"
dependencies = [
"proc-macro2",
"quote",
@@ -2150,9 +2167,9 @@ dependencies = [
[[package]]
name = "mongodb"
version = "2.8.2"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef206acb1b72389b49bc9985efe7eb1f8a9bb18e5680d262fac26c07f44025f1"
checksum = "bb4af699cbb12a221e978134999b16cbf030d964c3a7cd6c4af2584734190e58"
dependencies = [
"async-trait",
"base64 0.13.1",
@@ -2167,26 +2184,27 @@ dependencies = [
"futures-util",
"hex",
"hmac",
"lazy_static",
"md-5",
"mongodb-internal-macros",
"once_cell",
"pbkdf2",
"percent-encoding",
"rand",
"rustc_version_runtime",
"rustls",
"rustls 0.21.12",
"rustls-pemfile 1.0.4",
"serde",
"serde_bytes",
"serde_with 1.14.0",
"sha-1",
"sha2",
"socket2 0.4.10",
"socket2",
"stringprep",
"strsim 0.10.0",
"take_mut",
"thiserror",
"tokio",
"tokio-rustls",
"tokio-rustls 0.24.1",
"tokio-util",
"trust-dns-proto",
"trust-dns-resolver",
@@ -2195,9 +2213,20 @@ dependencies = [
"webpki-roots",
]
[[package]]
name = "mongodb-internal-macros"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1159486b5e1993138c385369c3c46f5608cb0b1c53734e503538366f3c4cedcc"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "monitor_cli"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"clap",
@@ -2217,7 +2246,7 @@ dependencies = [
[[package]]
name = "monitor_client"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2232,7 +2261,7 @@ dependencies = [
"futures",
"mongo_indexed",
"partial_derive2",
"reqwest 0.12.4",
"reqwest 0.12.5",
"resolver_api",
"serde",
"serde_json",
@@ -2249,7 +2278,7 @@ dependencies = [
[[package]]
name = "monitor_core"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2279,7 +2308,7 @@ dependencies = [
"partial_derive2",
"periphery_client",
"rand",
"reqwest 0.12.4",
"reqwest 0.12.5",
"resolver_api",
"run_command",
"serde",
@@ -2303,7 +2332,7 @@ dependencies = [
[[package]]
name = "monitor_periphery"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2335,15 +2364,16 @@ dependencies = [
[[package]]
name = "mungos"
version = "0.5.6"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ba5223d327a4869d60eee6aaf505e88109c2299b6e4b26ff26f9f2e1d4cb65a"
checksum = "9c89188a577f4e4464d83203336cc57ee5c835bf0e52ce3748f0abc090466ed8"
dependencies = [
"anyhow",
"envy",
"futures",
"mongodb",
"serde",
"serde_derive",
]
[[package]]
@@ -2639,11 +2669,11 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "periphery_client"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"monitor_client",
"reqwest 0.12.4",
"reqwest 0.12.5",
"resolver_api",
"serde",
"serde_json",
@@ -2861,9 +2891,9 @@ dependencies = [
[[package]]
name = "reqwest"
version = "0.12.4"
version = "0.12.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10"
checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -2875,6 +2905,7 @@ dependencies = [
"http-body 1.0.0",
"http-body-util",
"hyper 1.3.1",
"hyper-rustls 0.27.2",
"hyper-tls 0.6.0",
"hyper-util",
"ipnet",
@@ -2889,7 +2920,7 @@ dependencies = [
"serde",
"serde_json",
"serde_urlencoded",
"sync_wrapper 0.1.2",
"sync_wrapper 1.0.1",
"system-configuration",
"tokio",
"tokio-native-tls",
@@ -2913,9 +2944,9 @@ dependencies = [
[[package]]
name = "resolver_api"
version = "1.1.0"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4e8831e742662de68dbce8ca6243d1f0a1a5a295e1b3fa9ad996f215d1645b5"
checksum = "e86d2e3a6e01f92f8f964bc16a5e579028c02fa625612e5e1f159a6474e47969"
dependencies = [
"anyhow",
"resolver_api_derive",
@@ -3014,10 +3045,23 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
dependencies = [
"log",
"ring",
"rustls-webpki",
"rustls-webpki 0.101.7",
"sct",
]
[[package]]
name = "rustls"
version = "0.23.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402"
dependencies = [
"once_cell",
"rustls-pki-types",
"rustls-webpki 0.102.4",
"subtle",
"zeroize",
]
[[package]]
name = "rustls-native-certs"
version = "0.6.3"
@@ -3065,6 +3109,17 @@ dependencies = [
"untrusted",
]
[[package]]
name = "rustls-webpki"
version = "0.102.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e"
dependencies = [
"ring",
"rustls-pki-types",
"untrusted",
]
[[package]]
name = "rustversion"
version = "1.0.17"
@@ -3177,9 +3232,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.117"
version = "1.0.119"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0"
dependencies = [
"indexmap 2.2.6",
"itoa",
@@ -3361,16 +3416,6 @@ version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "socket2"
version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "socket2"
version = "0.5.7"
@@ -3581,7 +3626,7 @@ dependencies = [
[[package]]
name = "tests"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"dotenv",
@@ -3687,7 +3732,7 @@ dependencies = [
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2 0.5.7",
"socket2",
"tokio-macros",
"windows-sys 0.48.0",
]
@@ -3729,7 +3774,18 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
"rustls",
"rustls 0.21.12",
"tokio",
]
[[package]]
name = "tokio-rustls"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4"
dependencies = [
"rustls 0.23.10",
"rustls-pki-types",
"tokio",
]
@@ -4166,7 +4222,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "update_logger"
version = "1.9.0"
version = "1.10.0"
dependencies = [
"anyhow",
"logger",
@@ -4206,9 +4262,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
version = "1.8.0"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0"
checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439"
dependencies = [
"getrandom",
"rand",

View File

@@ -3,7 +3,7 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.9.0"
version = "1.10.0"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -15,7 +15,7 @@ monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.9.0"
monitor_client = "1.10.0"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
@@ -32,16 +32,16 @@ merge_config_files = "0.1.5"
async_timing_util = "0.1.14"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
mongo_indexed = "1.0.0"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
mungos = "1.0.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
reqwest = { version = "0.12.5", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
@@ -57,7 +57,7 @@ tokio-tungstenite = "0.23.1"
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.117"
serde_json = "1.0.118"
toml = "0.8.14"
# ERROR
@@ -78,7 +78,7 @@ dotenv = "0.15.0"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.9.1", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
bcrypt = "0.15.1"
base64 = "0.22.1"
@@ -93,8 +93,8 @@ bollard = "0.16.1"
sysinfo = "0.30.12"
# CLOUD
aws-config = "1.5.1"
aws-sdk-ec2 = "1.51.1"
aws-config = "1.5.3"
aws-sdk-ec2 = "1.53.0"
aws-sdk-ecr = "1.33.0"
# MISC

View File

@@ -1,4 +1,4 @@
use std::{collections::HashSet, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{format_serror, muted};
@@ -145,8 +145,10 @@ impl Resolve<RunBuild, (User, Update)> for State {
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update, build.id, build.name)
.await;
return handle_early_return(
update, build.id, build.name, false,
)
.await;
}
};
@@ -172,7 +174,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -196,6 +198,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut secret_replacers_for_log = HashSet::new();
// Interpolate into build args
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
@@ -214,10 +219,40 @@ impl Resolve<RunBuild, (User, Update)> for State {
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.iter().map(|(_, variable)| variable.clone()),
);
secret_replacers.extend(more_replacers);
arg.value = res;
}
// Interpolate into secret args
for arg in &mut build.config.secret_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.into_iter().map(|(_, variable)| variable),
);
// Secret args don't need to be in replacers sent to periphery.
// The secret args don't end up in the command like build args do.
arg.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
@@ -229,12 +264,12 @@ impl Resolve<RunBuild, (User, Update)> for State {
.join("\n"),
);
}
if !secret_replacers.is_empty() {
if !secret_replacers_for_log.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
secret_replacers_for_log
.into_iter()
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
@@ -253,7 +288,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -288,7 +323,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
"info.last_built_at": monitor_timestamp(),
}
},
None,
)
.await;
}
@@ -354,6 +388,7 @@ async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
@@ -371,7 +406,7 @@ async fn handle_early_return(
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
if !update.success && !is_cancel {
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
@@ -406,24 +441,28 @@ pub async fn validate_cancel_build(
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
db.updates
.find_one(doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {

View File

@@ -181,7 +181,6 @@ async fn update_last_pulled_time(repo_name: &str) {
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {

View File

@@ -36,12 +36,9 @@ impl Resolve<LaunchServer, (User, Update)> for State {
if db_client()
.await
.servers
.find_one(
doc! {
"name": &name
},
None,
)
.find_one(doc! {
"name": &name
})
.await
.context("failed to query db for servers")?
.is_some()

View File

@@ -80,7 +80,7 @@ impl Resolve<GetAlertersSummary, User> for State {
let total = db_client()
.await
.alerters
.count_documents(query, None)
.count_documents(query.unwrap_or_default())
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {

View File

@@ -147,16 +147,13 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
.find(doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
None,
)
"operation": Operation::RunBuild.to_string(),
})
.await
.context("failed to get updates cursor")?;

View File

@@ -80,7 +80,7 @@ impl Resolve<GetBuildersSummary, User> for State {
let total = db_client()
.await
.builders
.count_documents(query, None)
.count_documents(query.unwrap_or_default())
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {

View File

@@ -77,7 +77,7 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
let total = db_client()
.await
.server_templates
.count_documents(query, None)
.count_documents(query.unwrap_or_default())
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {

View File

@@ -230,7 +230,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
// replace server id of builder
if let BuilderConfig::Server(config) = &mut builder.config {
config.server_id.clone_from(
names.servers.get(&id).unwrap_or(&String::new()),
names
.servers
.get(&config.server_id)
.unwrap_or(&String::new()),
)
}
res

View File

@@ -37,7 +37,7 @@ impl Resolve<GetUserGroup, User> for State {
db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")

View File

@@ -187,7 +187,7 @@ impl Resolve<CreateApiKey, User> for State {
db_client()
.await
.api_keys
.insert_one(api_key, None)
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
@@ -208,7 +208,7 @@ impl Resolve<DeleteApiKey, User> for State {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
@@ -217,7 +217,7 @@ impl Resolve<DeleteApiKey, User> for State {
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})

View File

@@ -129,8 +129,8 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.with_options(UpdateOptions::builder().upsert(true).build())
.await?;
Ok(UpdatePermissionOnTargetResponse {})
@@ -150,7 +150,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for users")?
.context("no matching user found")?
@@ -165,7 +165,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
@@ -192,7 +192,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builds
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
@@ -207,7 +207,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builders
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
@@ -222,7 +222,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.deployments
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
@@ -237,7 +237,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.servers
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
@@ -252,7 +252,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.repos
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
@@ -267,7 +267,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.alerters
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
@@ -282,7 +282,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.procedures
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
@@ -297,7 +297,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
@@ -312,7 +312,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?

View File

@@ -61,7 +61,7 @@ impl Resolve<CreateServiceUser, User> for State {
user.id = db_client()
.await
.users
.insert_one(&user, None)
.insert_one(&user)
.await
.context("failed to create service user on db")?
.inserted_id
@@ -91,7 +91,7 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("no user with given username")?;
@@ -102,12 +102,11 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("user with username not found")
@@ -155,7 +154,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
@@ -168,7 +167,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.delete_one(doc! { "key": key })
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})

View File

@@ -268,7 +268,6 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.await
.context("failed to query db for alert")
@@ -290,7 +289,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.insert_one(&alert)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))

View File

@@ -46,7 +46,7 @@ impl Resolve<CreateTag, User> for State {
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.insert_one(&tag)
.await
.context("failed to create tag on db")?
.inserted_id

View File

@@ -35,7 +35,7 @@ impl Resolve<CreateUserGroup, User> for State {
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.insert_one(user_group)
.await
.context("failed to create UserGroup on db")?
.inserted_id
@@ -99,7 +99,7 @@ impl Resolve<DeleteUserGroup, User> for State {
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
})
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
@@ -125,7 +125,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -138,12 +138,11 @@ impl Resolve<AddUserToUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -171,7 +170,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -184,12 +183,11 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -229,15 +227,11 @@ impl Resolve<SetUsersInUserGroup, User> for State {
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.update_one(filter.clone(), doc! { "$set": { "users": users } })
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")

View File

@@ -44,7 +44,7 @@ impl Resolve<CreateVariable, User> for State {
db_client()
.await
.variables
.insert_one(&variable, None)
.insert_one(&variable)
.await
.context("failed to create variable on db")?;
@@ -86,7 +86,6 @@ impl Resolve<UpdateVariableValue, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
None,
)
.await
.context("failed to update variable value on db")?;
@@ -127,7 +126,6 @@ impl Resolve<UpdateVariableDescription, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
None,
)
.await
.context("failed to update variable description on db")?;
@@ -148,7 +146,7 @@ impl Resolve<DeleteVariable, User> for State {
db_client()
.await
.variables
.delete_one(doc! { "name": &name }, None)
.delete_one(doc! { "name": &name })
.await
.context("failed to delete variable on db")?;

View File

@@ -2,6 +2,7 @@ use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
user::{User, UserConfig},
@@ -66,7 +67,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.find_one(doc! { "config.data.github_id": &github_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -76,7 +77,7 @@ async fn callback(
None => {
let ts = monitor_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
@@ -98,7 +99,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -3,6 +3,7 @@ use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
@@ -75,7 +76,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.find_one(doc! { "config.data.google_id": &google_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -85,7 +86,7 @@ async fn callback(
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
@@ -113,7 +114,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -3,6 +3,7 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use mongo_indexed::Document;
use monitor_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
@@ -46,7 +47,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.find_one(Document::new())
.await?
.is_none();
@@ -72,7 +73,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let user_id = db_client()
.await
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user")?
.inserted_id
@@ -102,7 +103,7 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed at db query for users")?
.with_context(|| {

View File

@@ -127,7 +127,7 @@ pub async fn auth_api_key_get_user_id(
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.find_one(doc! { "key": key })
.await
.context("failed to query db")?
.context("no api key matching key")?;

View File

@@ -112,7 +112,7 @@ impl DbClient {
}
}
async fn resource_collection<T>(
async fn resource_collection<T: Send + Sync>(
db: &Database,
collection_name: &str,
) -> anyhow::Result<Collection<T>> {

View File

@@ -62,7 +62,6 @@ where
}
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")
@@ -111,7 +110,6 @@ pub async fn create_permission<T>(
resource_target: target.clone(),
level,
},
None,
)
.await
{

View File

@@ -72,7 +72,6 @@ async fn prune_stats() -> anyhow::Result<()> {
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.await?;
info!("deleted {} stats from db", res.deleted_count);
@@ -93,7 +92,6 @@ async fn prune_alerts() -> anyhow::Result<()> {
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.await?;
info!("deleted {} alerts from db", res.deleted_count);

View File

@@ -89,7 +89,7 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
db_client()
.await
.tags
.find_one(query, None)
.find_one(query)
.await
.context("failed to query mongo for tag")?
.with_context(|| format!("no tag found matching {id_or_name}"))
@@ -240,7 +240,7 @@ pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
db_client()
.await
.variables
.find_one(doc! { "name": &name }, None)
.find_one(doc! { "name": &name })
.await
.context("failed at call to db")?
.with_context(|| {
@@ -256,12 +256,12 @@ pub async fn get_latest_update(
db_client()
.await
.updates
.find_one(
doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
},
.find_one(doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -45,7 +45,7 @@ pub async fn add_update(
update.id = db_client()
.await
.updates
.insert_one(&update, None)
.insert_one(&update)
.await
.context("failed to insert update into db")?
.inserted_id

View File

@@ -1,425 +0,0 @@
use std::sync::{Arc, OnceLock};
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use monitor_client::{
api::{execute, write::RefreshResourceSyncPending},
entities::{
build::Build, procedure::Procedure, repo::Repo,
sync::ResourceSync, user::github_user,
},
};
use resolver_api::Resolve;
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{
cache::Cache, random_duration, update::init_execution_update,
},
resource,
state::State,
};
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunBuild(
execute::RunBuild { build: build_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::CloneRepo(
execute::CloneRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(
execute::PullRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunProcedure(
execute::RunProcedure {
procedure: procedure_id,
},
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunProcedure(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::RunSync(execute::RunSync {
sync: sync_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().github_webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}

View File

@@ -0,0 +1,51 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunBuild,
entities::{build::Build, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,204 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{cache::Cache, random_duration},
};
mod build;
mod procedure;
mod repo;
mod sync;
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = build::handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = repo::handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = repo::handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = procedure::handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = sync::handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = sync::handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().github_webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;

View File

@@ -0,0 +1,55 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunProcedure,
entities::{procedure::Procedure, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure_id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,86 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::{CloneRepo, PullRepo},
entities::{repo::Repo, user::github_user},
};
use resolver_api::Resolve;
use crate::{
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
pub async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
pub async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,78 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{sync::ResourceSync, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}
pub async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
pub async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -74,7 +74,7 @@ pub async fn alert_deployments(
return;
}
send_alerts(&alerts).await;
let res = db_client().await.alerts.insert_many(alerts, None).await;
let res = db_client().await.alerts.insert_many(alerts).await;
if let Err(e) = res {
error!("failed to record deployment status alerts to db | {e:#}");
}

View File

@@ -194,7 +194,7 @@ pub async fn alert_servers(
ts,
resolved: false,
resolved_ts: None,
level: health.cpu,
level: health.mem,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerMem {
id: server_status.id.clone(),
@@ -357,7 +357,7 @@ async fn open_alerts(alerts: &[(Alert, SendAlerts)]) {
let open = || async {
let ids = db
.alerts
.insert_many(alerts.iter().map(|(alert, _)| alert), None)
.insert_many(alerts.iter().map(|(alert, _)| alert))
.await?
.inserted_ids
.into_iter()
@@ -468,7 +468,6 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
"resolved_ts": monitor_timestamp()
}
},
None,
)
.await
.context("failed to resolve alerts on db")

View File

@@ -30,8 +30,7 @@ pub async fn record_server_stats(ts: i64) {
})
.collect::<Vec<_>>();
if !records.is_empty() {
let res =
db_client().await.stats.insert_many(records, None).await;
let res = db_client().await.stats.insert_many(records).await;
if let Err(e) = res {
error!("failed to record server stats | {e:#}");
}

View File

@@ -137,7 +137,6 @@ impl super::MonitorResource for Builder {
mungos::update::Update::Set(
doc! { "config.builder.params.builder_id": "" },
),
None,
)
.await
.context("failed to update_many builds on database")?;

View File

@@ -187,7 +187,7 @@ pub async fn get<T: MonitorResource>(
) -> anyhow::Result<Resource<T::Config, T::Info>> {
T::coll()
.await
.find_one(id_or_name_filter(id_or_name), None)
.find_one(id_or_name_filter(id_or_name))
.await
.context("failed to query db for resource")?
.with_context(|| {
@@ -336,7 +336,7 @@ pub async fn create<T: MonitorResource>(
let resource_id = T::coll()
.await
.insert_one(&resource, None)
.insert_one(&resource)
.await
.with_context(|| {
format!("failed to add {} to db", T::resource_type())
@@ -489,7 +489,6 @@ pub async fn update_description<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "description": description } },
None,
)
.await?;
Ok(())
@@ -524,7 +523,6 @@ pub async fn update_tags<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "tags": tags } },
None,
)
.await?;
Ok(())
@@ -535,7 +533,7 @@ pub async fn remove_tag_from_all<T: MonitorResource>(
) -> anyhow::Result<()> {
T::coll()
.await
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } }, None)
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
.await
.context("failed to remove tag from resources")?;
Ok(())
@@ -619,7 +617,6 @@ where
"resource_target.type": variant.as_ref(),
"resource_target.id": &id
},
None,
)
.await
{
@@ -652,7 +649,6 @@ where
recent_field: id
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")

View File

@@ -337,12 +337,12 @@ async fn get_procedure_state_from_db(id: &str) -> ProcedureState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
},
.find_one(doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -244,15 +244,15 @@ async fn get_repo_state_from_db(id: &str) -> RepoState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
},
.find_one(doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -130,7 +130,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.params.server_id": &id },
doc! { "$set": { "config.params.server_id": "" } },
None,
)
.await
.context("failed to detach server from builders")?;
@@ -139,7 +138,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from deployments")?;
@@ -148,7 +146,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from repos")?;
@@ -160,7 +157,6 @@ impl super::MonitorResource for Server {
"resolved": true,
"resolved_ts": monitor_timestamp()
} },
None,
)
.await
.context("failed to detach server from repos")?;

View File

@@ -213,12 +213,12 @@ async fn get_resource_sync_state_from_db(
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
},
.find_one(doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -198,11 +198,6 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
id: value.id,
name: value.name,
description: value.description,
// permissions: value
// .permissions
// .into_iter()
// .map(|(id, p)| (id, p.into()))
// .collect(),
updated_at: unix_from_monitor_ts(&value.updated_at)?,
tags: Vec::new(),
info: BuildInfo {
@@ -233,6 +228,7 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
build_path,
dockerfile_path,
build_args,
secret_args: Default::default(),
extra_args,
use_buildx,
labels: Default::default(),

View File

@@ -134,6 +134,7 @@ impl From<BuildConfig>
.into_iter()
.map(Into::into)
.collect(),
secret_args: Default::default(),
labels: value.labels.into_iter().map(Into::into).collect(),
extra_args: value.extra_args,
use_buildx: value.use_buildx,

View File

@@ -46,7 +46,7 @@ pub async fn migrate_users(
target_db
.users
.insert_many(users, None)
.insert_many(users)
.await
.context("failed to insert users on target")?;
@@ -91,7 +91,7 @@ pub async fn migrate_servers(
if !new_servers.is_empty() {
target_db
.servers
.insert_many(new_servers, None)
.insert_many(new_servers)
.await
.context("failed to insert servers on target")?;
}
@@ -99,7 +99,7 @@ pub async fn migrate_servers(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.await
.context("failed to insert server permissions on target")?;
}
@@ -147,7 +147,7 @@ pub async fn migrate_deployments(
if !new_deployments.is_empty() {
target_db
.deployments
.insert_many(new_deployments, None)
.insert_many(new_deployments)
.await
.context("failed to insert deployments on target")?;
}
@@ -155,7 +155,7 @@ pub async fn migrate_deployments(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.await
.context("failed to insert deployment permissions on target")?;
}
@@ -201,7 +201,7 @@ pub async fn migrate_builds(
if !new_builds.is_empty() {
target_db
.builds
.insert_many(new_builds, None)
.insert_many(new_builds)
.await
.context("failed to insert builds on target")?;
}
@@ -209,7 +209,7 @@ pub async fn migrate_builds(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.await
.context("failed to insert build permissions on target")?;
}
@@ -239,7 +239,7 @@ pub async fn migrate_updates(
target_db
.updates
.insert_many(updates, None)
.insert_many(updates)
.await
.context("failed to insert updates on target")?;

View File

@@ -34,7 +34,6 @@ pub async fn migrate_deployments_in_place(
.update_one(
doc! { "name": &deployment.name },
doc! { "$set": to_document(&deployment)? },
None,
)
.await
.context("failed to insert deployments on target")?;
@@ -62,7 +61,6 @@ pub async fn migrate_builds_in_place(
.update_one(
doc! { "name": &build.name },
doc! { "$set": to_document(&build)? },
None,
)
.await
.context("failed to insert builds on target")?;

View File

@@ -1,41 +1,200 @@
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
server::docker_image::ImageSummary, update::Log,
};
use periphery_client::api::build::{
Build, GetImageList, PruneImages,
build::{Build, BuildConfig},
get_image_name, optional_string,
server::docker_image::ImageSummary,
to_monitor_name,
update::Log,
EnvironmentVar, Version,
};
use periphery_client::api::build::{self, GetImageList, PruneImages};
use resolver_api::Resolve;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
impl Resolve<Build> for State {
#[instrument(
name = "Build",
skip(self, registry_token, replacers, aws_ecr)
)]
impl Resolve<build::Build> for State {
#[instrument(name = "Build", skip_all)]
async fn resolve(
&self,
Build {
build::Build {
build,
registry_token,
replacers,
aws_ecr,
}: Build,
registry_token,
replacers: core_replacers,
}: build::Build,
_: (),
) -> anyhow::Result<Vec<Log>> {
docker::build::build(
&build,
registry_token,
replacers,
let Build {
name,
config:
BuildConfig {
version,
skip_secret_interp,
build_path,
dockerfile_path,
build_args,
secret_args,
labels,
extra_args,
use_buildx,
image_registry,
..
},
..
} = &build;
let mut logs = Vec::new();
// Maybe docker login
let should_push = match docker_login(
image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await
{
Ok(should_push) => should_push,
Err(e) => {
logs.push(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
return Ok(logs);
}
};
let name = to_monitor_name(name);
// Get paths
let build_dir =
periphery_config().repo_dir.join(&name).join(build_path);
let dockerfile_path = match optional_string(dockerfile_path) {
Some(dockerfile_path) => dockerfile_path.to_owned(),
None => "Dockerfile".to_owned(),
};
// Get command parts
let image_name = get_image_name(&build, |_| aws_ecr)
.context("failed to make image name")?;
let build_args = parse_build_args(build_args);
let _secret_args =
parse_secret_args(secret_args, *skip_secret_interp)?;
let labels = parse_labels(labels);
let extra_args = parse_extra_args(extra_args);
let buildx = if *use_buildx { " buildx" } else { "" };
let image_tags = image_tags(&image_name, version);
let push_command = should_push
.then(|| {
format!(" && docker image push --all-tags {image_name}")
})
.unwrap_or_default();
// Construct command
let command = format!(
"cd {} && docker{buildx} build{build_args}{_secret_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
build_dir.display()
);
if *skip_secret_interp {
let build_log =
run_monitor_command("docker build", command).await;
info!("finished building docker image");
logs.push(build_log);
} else {
// Interpolate any missing secrets
let (command, mut replacers) = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker build command",
)?;
replacers.extend(core_replacers);
let mut build_log =
run_monitor_command("docker build", command).await;
build_log.command =
svi::replace_in_string(&build_log.command, &replacers);
build_log.stdout =
svi::replace_in_string(&build_log.stdout, &replacers);
build_log.stderr =
svi::replace_in_string(&build_log.stderr, &replacers);
logs.push(build_log);
}
cleanup_secret_env_vars(secret_args);
Ok(logs)
}
}
fn image_tags(image_name: &str, version: &Version) -> String {
let Version { major, minor, .. } = version;
format!(
" -t {image_name}:latest -t {image_name}:{version} -t {image_name}:{major}.{minor} -t {image_name}:{major}",
)
}
fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
build_args
.iter()
.map(|p| format!(" --build-arg {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_secret_args(
secret_args: &[EnvironmentVar],
skip_secret_interp: bool,
) -> anyhow::Result<String> {
let periphery_config = periphery_config();
Ok(
secret_args
.iter()
.map(|EnvironmentVar { variable, value }| {
if variable.is_empty() {
return Err(anyhow!("secret variable cannot be empty string"))
} else if variable.contains('=') {
return Err(anyhow!("invalid variable {variable}. variable cannot contain '='"))
}
let value = if skip_secret_interp {
value.to_string()
} else {
svi::interpolate_variables(
value,
&periphery_config.secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate periphery secrets into build secrets",
)?.0
};
std::env::set_var(variable, value);
anyhow::Ok(format!(" --secret id={variable}"))
})
.collect::<anyhow::Result<Vec<_>>>()?
.join(""),
)
}
fn cleanup_secret_env_vars(secret_args: &[EnvironmentVar]) {
secret_args.iter().for_each(
|EnvironmentVar { variable, .. }| std::env::remove_var(variable),
)
}
//
impl Resolve<GetImageList> for State {
@@ -58,6 +217,7 @@ impl Resolve<PruneImages> for State {
_: PruneImages,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::build::prune_images().await)
let command = String::from("docker image prune -a -f");
Ok(run_monitor_command("prune images", command).await)
}
}

View File

@@ -1,13 +1,24 @@
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
deployment::{ContainerSummary, DockerContainerStats},
deployment::{
ContainerSummary, Conversion, Deployment, DeploymentConfig,
DeploymentImage, DockerContainerStats, RestartMode,
TerminationSignal,
},
to_monitor_name,
update::Log,
EnvironmentVar, SearchCombinator,
};
use periphery_client::api::container::*;
use resolver_api::Resolve;
use run_command::async_run_command;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
@@ -34,10 +45,11 @@ impl Resolve<GetContainerLog> for State {
#[instrument(name = "GetContainerLog", level = "debug", skip(self))]
async fn resolve(
&self,
req: GetContainerLog,
GetContainerLog { name, tail }: GetContainerLog,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::container_log(&req.name, req.tail).await)
let command = format!("docker logs {name} --tail {tail}");
Ok(run_monitor_command("get container log", command).await)
}
}
@@ -59,12 +71,21 @@ impl Resolve<GetContainerLogSearch> for State {
}: GetContainerLogSearch,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::container_log_search(
&name, &terms, combinator, invert,
)
.await,
)
let maybe_invert = invert.then_some(" -v").unwrap_or_default();
let grep = match combinator {
SearchCombinator::Or => {
format!("grep{maybe_invert} -E '{}'", terms.join("|"))
}
SearchCombinator::And => {
format!(
"grep{maybe_invert} -P '^(?=.*{})'",
terms.join(")(?=.*")
)
}
};
let command =
format!("docker logs {name} --tail 5000 2>&1 | {grep}");
Ok(run_monitor_command("get container log grep", command).await)
}
}
@@ -82,8 +103,7 @@ impl Resolve<GetContainerStats> for State {
_: (),
) -> anyhow::Result<DockerContainerStats> {
let error = anyhow!("no stats matching {}", req.name);
let mut stats =
docker::container::container_stats(Some(req.name)).await?;
let mut stats = container_stats(Some(req.name)).await?;
let stats = stats.pop().ok_or(error)?;
Ok(stats)
}
@@ -102,7 +122,7 @@ impl Resolve<GetContainerStatsList> for State {
_: GetContainerStatsList,
_: (),
) -> anyhow::Result<Vec<DockerContainerStats>> {
docker::container::container_stats(None).await
container_stats(None).await
}
}
@@ -112,10 +132,16 @@ impl Resolve<StartContainer> for State {
#[instrument(name = "StartContainer", skip(self))]
async fn resolve(
&self,
req: StartContainer,
StartContainer { name }: StartContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::start_container(&req.name).await)
Ok(
run_monitor_command(
"docker start",
format!("docker start {name}"),
)
.await,
)
}
}
@@ -125,15 +151,26 @@ impl Resolve<StopContainer> for State {
#[instrument(name = "StopContainer", skip(self))]
async fn resolve(
&self,
req: StopContainer,
StopContainer { name, signal, time }: StopContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_container(
&req.name, req.signal, req.time,
)
.await,
)
let command = stop_container_command(&name, signal, time);
let log = run_monitor_command("docker stop", command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(&name, None, time);
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -143,15 +180,31 @@ impl Resolve<RemoveContainer> for State {
#[instrument(name = "RemoveContainer", skip(self))]
async fn resolve(
&self,
req: RemoveContainer,
RemoveContainer { name, signal, time }: RemoveContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_and_remove_container(
&req.name, req.signal, req.time,
)
.await,
)
let stop_command = stop_container_command(&name, signal, time);
let command =
format!("{stop_command} && docker container rm {name}");
let log =
run_monitor_command("docker stop and remove", command).await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command = stop_container_command(&name, None, time);
let command =
format!("{stop_command} && docker container rm {name}");
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -161,16 +214,15 @@ impl Resolve<RenameContainer> for State {
#[instrument(name = "RenameContainer", skip(self))]
async fn resolve(
&self,
req: RenameContainer,
RenameContainer {
curr_name,
new_name,
}: RenameContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::rename_container(
&req.curr_name,
&req.new_name,
)
.await,
)
let new = to_monitor_name(&new_name);
let command = format!("docker rename {curr_name} {new}");
Ok(run_monitor_command("docker rename", command).await)
}
}
@@ -183,14 +235,18 @@ impl Resolve<PruneContainers> for State {
_: PruneContainers,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::prune_containers().await)
let command = String::from("docker container prune -f");
Ok(run_monitor_command("prune containers", command).await)
}
}
//
impl Resolve<Deploy> for State {
#[instrument(name = "Deploy", skip(self, replacers))]
#[instrument(
name = "Deploy",
skip(self, core_replacers, aws_ecr, registry_token)
)]
async fn resolve(
&self,
Deploy {
@@ -198,24 +254,206 @@ impl Resolve<Deploy> for State {
stop_signal,
stop_time,
registry_token,
replacers,
replacers: core_replacers,
aws_ecr,
}: Deploy,
_: (),
) -> anyhow::Result<Log> {
let res = docker::container::deploy(
&deployment,
stop_signal
.unwrap_or(deployment.config.termination_signal)
.into(),
stop_time
.unwrap_or(deployment.config.termination_timeout)
.into(),
registry_token,
replacers,
if let Err(e) = docker_login(
&deployment.config.image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await;
Ok(res)
.await
{
return Ok(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
}
let image = if let DeploymentImage::Image { image } =
&deployment.config.image
{
if image.is_empty() {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
}
image
} else {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
};
let _ = pull_image(image).await;
debug!("image pulled");
let _ = State
.resolve(
RemoveContainer {
name: deployment.name.clone(),
signal: stop_signal,
time: stop_time,
},
(),
)
.await;
debug!("container stopped and removed");
let command = docker_run_command(&deployment, image);
debug!("docker run command: {command}");
if deployment.config.skip_secret_interp {
Ok(run_monitor_command("docker run", command).await)
} else {
let command = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker run command",
);
if let Err(e) = command {
return Ok(Log::error("docker run", format!("{e:?}")));
}
let (command, mut replacers) = command.unwrap();
replacers.extend(core_replacers);
let mut log = run_monitor_command("docker run", command).await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
log.stderr = svi::replace_in_string(&log.stderr, &replacers);
Ok(log)
}
}
}
//
fn docker_run_command(
Deployment {
name,
config:
DeploymentConfig {
volumes,
ports,
network,
command,
restart,
environment,
labels,
extra_args,
..
},
..
}: &Deployment,
image: &str,
) -> String {
let name = to_monitor_name(name);
let ports = parse_conversions(ports, "-p");
let volumes = volumes.to_owned();
let volumes = parse_conversions(&volumes, "-v");
let network = parse_network(network);
let restart = parse_restart(restart);
let environment = parse_environment(environment);
let labels = parse_labels(labels);
let command = parse_command(command);
let extra_args = parse_extra_args(extra_args);
format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}")
}
fn parse_conversions(
conversions: &[Conversion],
flag: &str,
) -> String {
conversions
.iter()
.map(|p| format!(" {flag} {}:{}", p.local, p.container))
.collect::<Vec<_>>()
.join("")
}
fn parse_environment(environment: &[EnvironmentVar]) -> String {
environment
.iter()
.map(|p| format!(" --env {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_network(network: &str) -> String {
format!(" --network {network}")
}
fn parse_restart(restart: &RestartMode) -> String {
let restart = match restart {
RestartMode::OnFailure => "on-failure:10".to_string(),
_ => restart.to_string(),
};
format!(" --restart {restart}")
}
fn parse_command(command: &str) -> String {
if command.is_empty() {
String::new()
} else {
format!(" {command}")
}
}
//
async fn container_stats(
container_name: Option<String>,
) -> anyhow::Result<Vec<DockerContainerStats>> {
let format = "--format \"{{ json . }}\"";
let container_name = match container_name {
Some(name) => format!(" {name}"),
None => "".to_string(),
};
let command =
format!("docker stats{container_name} --no-stream {format}");
let output = async_run_command(&command).await;
if output.success() {
let res = output
.stdout
.split('\n')
.filter(|e| !e.is_empty())
.map(|e| {
let parsed = serde_json::from_str(e)
.context(format!("failed at parsing entry {e}"))?;
Ok(parsed)
})
.collect::<anyhow::Result<Vec<DockerContainerStats>>>()?;
Ok(res)
} else {
Err(anyhow!("{}", output.stderr.replace('\n', "")))
}
}
#[instrument]
async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_monitor_command("docker pull", command).await
}
fn stop_container_command(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> String {
let container_name = to_monitor_name(container_name);
let signal = signal
.map(|signal| format!(" --signal {signal}"))
.unwrap_or_default();
let time = time
.map(|time| format!(" --time {time}"))
.unwrap_or_default();
format!("docker stop{signal}{time} {container_name}")
}

View File

@@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
use crate::{
config::{accounts_response, secrets_response},
docker, State,
State,
};
mod build;
@@ -154,6 +154,7 @@ impl Resolve<PruneSystem> for State {
PruneSystem {}: PruneSystem,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::prune_system().await)
let command = String::from("docker system prune -a -f");
Ok(run_monitor_command("prune system", command).await)
}
}

View File

@@ -1,3 +1,4 @@
use command::run_monitor_command;
use monitor_client::entities::{
server::docker_network::DockerNetwork, update::Log,
};
@@ -6,10 +7,7 @@ use periphery_client::api::network::{
};
use resolver_api::Resolve;
use crate::{
docker::{self, client::docker_client},
State,
};
use crate::{docker::docker_client, State};
//
@@ -33,7 +31,12 @@ impl Resolve<CreateNetwork> for State {
CreateNetwork { name, driver }: CreateNetwork,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::create_network(&name, driver).await)
let driver = match driver {
Some(driver) => format!(" -d {driver}"),
None => String::new(),
};
let command = format!("docker network create{driver} {name}");
Ok(run_monitor_command("create network", command).await)
}
}
@@ -46,7 +49,8 @@ impl Resolve<DeleteNetwork> for State {
DeleteNetwork { name }: DeleteNetwork,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::delete_network(&name).await)
let command = format!("docker network rm {name}");
Ok(run_monitor_command("delete network", command).await)
}
}
@@ -59,6 +63,7 @@ impl Resolve<PruneNetworks> for State {
_: PruneNetworks,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::prune_networks().await)
let command = String::from("docker network prune -f");
Ok(run_monitor_command("prune networks", command).await)
}
}

View File

@@ -1,142 +0,0 @@
use anyhow::Context;
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
build::{Build, BuildConfig},
config::core::AwsEcrConfig,
get_image_name, optional_string, to_monitor_name,
update::Log,
EnvironmentVar, Version,
};
use crate::config::periphery_config;
use super::{docker_login, parse_extra_args, parse_labels};
#[instrument]
pub async fn prune_images() -> Log {
let command = String::from("docker image prune -a -f");
run_monitor_command("prune images", command).await
}
#[instrument(skip(registry_token, core_replacers))]
pub async fn build(
build: &Build,
registry_token: Option<String>,
core_replacers: Vec<(String, String)>,
aws_ecr: Option<&AwsEcrConfig>,
) -> anyhow::Result<Vec<Log>> {
let Build {
name,
config:
BuildConfig {
version,
skip_secret_interp,
build_path,
dockerfile_path,
build_args,
labels,
extra_args,
use_buildx,
image_registry,
..
},
..
} = build;
let mut logs = Vec::new();
// Maybe docker login
let should_push = match docker_login(
image_registry,
registry_token.as_deref(),
aws_ecr,
)
.await
{
Ok(should_push) => should_push,
Err(e) => {
logs.push(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
return Ok(logs);
}
};
let name = to_monitor_name(name);
// Get paths
let build_dir =
periphery_config().repo_dir.join(&name).join(build_path);
let dockerfile_path = match optional_string(dockerfile_path) {
Some(dockerfile_path) => dockerfile_path.to_owned(),
None => "Dockerfile".to_owned(),
};
// Get command parts
let image_name = get_image_name(build, |_| aws_ecr.cloned())
.context("failed to make image name")?;
let build_args = parse_build_args(build_args);
let labels = parse_labels(labels);
let extra_args = parse_extra_args(extra_args);
let buildx = if *use_buildx { " buildx" } else { "" };
let image_tags = image_tags(&image_name, version);
let push_command = should_push
.then(|| format!(" && docker image push --all-tags {image_name}"))
.unwrap_or_default();
// Construct command
let command = format!(
"cd {} && docker{buildx} build{build_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
build_dir.display()
);
if *skip_secret_interp {
let build_log =
run_monitor_command("docker build", command).await;
info!("finished building docker image");
logs.push(build_log);
} else {
// Interpolate any missing secrets
let (command, mut replacers) = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker build command",
)?;
replacers.extend(core_replacers);
let mut build_log =
run_monitor_command("docker build", command).await;
build_log.command =
svi::replace_in_string(&build_log.command, &replacers);
build_log.stdout =
svi::replace_in_string(&build_log.stdout, &replacers);
build_log.stderr =
svi::replace_in_string(&build_log.stderr, &replacers);
logs.push(build_log);
}
Ok(logs)
}
fn image_tags(image_name: &str, version: &Version) -> String {
let Version { major, minor, .. } = version;
format!(
" -t {image_name}:latest -t {image_name}:{version} -t {image_name}:{major}.{minor} -t {image_name}:{major}",
)
}
fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
build_args
.iter()
.map(|p| format!(" --build-arg {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}

View File

@@ -1,328 +0,0 @@
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
config::core::AwsEcrConfig,
deployment::{
Conversion, Deployment, DeploymentConfig, DeploymentImage,
DockerContainerStats, RestartMode, TerminationSignal,
},
to_monitor_name,
update::Log,
EnvironmentVar, SearchCombinator,
};
use run_command::async_run_command;
use crate::config::periphery_config;
use super::{docker_login, parse_extra_args, parse_labels};
#[instrument(level = "debug")]
pub async fn container_log(container_name: &str, tail: u64) -> Log {
let command = format!("docker logs {container_name} --tail {tail}");
run_monitor_command("get container log", command).await
}
#[instrument(level = "debug")]
pub async fn container_log_search(
container_name: &str,
terms: &[String],
combinator: SearchCombinator,
invert: bool,
) -> Log {
let maybe_invert = invert.then_some(" -v").unwrap_or_default();
let grep = match combinator {
SearchCombinator::Or => {
format!("grep{maybe_invert} -E '{}'", terms.join("|"))
}
SearchCombinator::And => {
format!(
"grep{maybe_invert} -P '^(?=.*{})'",
terms.join(")(?=.*")
)
}
};
let command =
format!("docker logs {container_name} --tail 5000 2>&1 | {grep}");
run_monitor_command("get container log grep", command).await
}
#[instrument(level = "debug")]
pub async fn container_stats(
container_name: Option<String>,
) -> anyhow::Result<Vec<DockerContainerStats>> {
let format = "--format \"{{ json . }}\"";
let container_name = match container_name {
Some(name) => format!(" {name}"),
None => "".to_string(),
};
let command =
format!("docker stats{container_name} --no-stream {format}");
let output = async_run_command(&command).await;
if output.success() {
let res = output
.stdout
.split('\n')
.filter(|e| !e.is_empty())
.map(|e| {
let parsed = serde_json::from_str(e)
.context(format!("failed at parsing entry {e}"))?;
Ok(parsed)
})
.collect::<anyhow::Result<Vec<DockerContainerStats>>>()?;
Ok(res)
} else {
Err(anyhow!("{}", output.stderr.replace('\n', "")))
}
}
#[instrument]
pub async fn prune_containers() -> Log {
let command = String::from("docker container prune -f");
run_monitor_command("prune containers", command).await
}
#[instrument]
pub async fn start_container(container_name: &str) -> Log {
let container_name = to_monitor_name(container_name);
let command = format!("docker start {container_name}");
run_monitor_command("docker start", command).await
}
#[instrument]
pub async fn stop_container(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> Log {
let command = stop_container_command(container_name, signal, time);
let log = run_monitor_command("docker stop", command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(container_name, None, time);
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
log
} else {
log
}
}
#[instrument]
pub async fn stop_and_remove_container(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> Log {
let stop_command =
stop_container_command(container_name, signal, time);
let command =
format!("{stop_command} && docker container rm {container_name}");
let log =
run_monitor_command("docker stop and remove", command).await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command =
stop_container_command(container_name, None, time);
let command = format!(
"{stop_command} && docker container rm {container_name}"
);
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
log
} else {
log
}
}
fn stop_container_command(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> String {
let container_name = to_monitor_name(container_name);
let signal = signal
.map(|signal| format!(" --signal {signal}"))
.unwrap_or_default();
let time = time
.map(|time| format!(" --time {time}"))
.unwrap_or_default();
format!("docker stop{signal}{time} {container_name}")
}
#[instrument]
pub async fn rename_container(
curr_name: &str,
new_name: &str,
) -> Log {
let curr = to_monitor_name(curr_name);
let new = to_monitor_name(new_name);
let command = format!("docker rename {curr} {new}");
run_monitor_command("docker rename", command).await
}
#[instrument]
async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_monitor_command("docker pull", command).await
}
#[instrument(skip(registry_token, core_replacers))]
pub async fn deploy(
deployment: &Deployment,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
registry_token: Option<String>,
core_replacers: Vec<(String, String)>,
aws_ecr: Option<&AwsEcrConfig>,
) -> Log {
if let Err(e) = docker_login(
&deployment.config.image_registry,
registry_token.as_deref(),
aws_ecr,
)
.await
{
return Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
);
}
let image = if let DeploymentImage::Image { image } =
&deployment.config.image
{
if image.is_empty() {
return Log::error(
"get image",
String::from("deployment does not have image attached"),
);
}
image
} else {
return Log::error(
"get image",
String::from("deployment does not have image attached"),
);
};
let _ = pull_image(image).await;
debug!("image pulled");
let _ = stop_and_remove_container(
&deployment.name,
stop_signal,
stop_time,
)
.await;
debug!("container stopped and removed");
let command = docker_run_command(deployment, image);
debug!("docker run command: {command}");
if deployment.config.skip_secret_interp {
run_monitor_command("docker run", command).await
} else {
let command = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context("failed to interpolate secrets into docker run command");
if let Err(e) = command {
return Log::error("docker run", format!("{e:?}"));
}
let (command, mut replacers) = command.unwrap();
replacers.extend(core_replacers);
let mut log = run_monitor_command("docker run", command).await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
log.stderr = svi::replace_in_string(&log.stderr, &replacers);
log
}
}
pub fn docker_run_command(
Deployment {
name,
config:
DeploymentConfig {
volumes,
ports,
network,
command,
restart,
environment,
labels,
extra_args,
..
},
..
}: &Deployment,
image: &str,
) -> String {
let name = to_monitor_name(name);
let ports = parse_conversions(ports, "-p");
let volumes = volumes.to_owned();
let volumes = parse_conversions(&volumes, "-v");
let network = parse_network(network);
let restart = parse_restart(restart);
let environment = parse_environment(environment);
let labels = parse_labels(labels);
let command = parse_command(command);
let extra_args = parse_extra_args(extra_args);
format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}")
}
fn parse_conversions(
conversions: &[Conversion],
flag: &str,
) -> String {
conversions
.iter()
.map(|p| format!(" {flag} {}:{}", p.local, p.container))
.collect::<Vec<_>>()
.join("")
}
fn parse_environment(environment: &[EnvironmentVar]) -> String {
environment
.iter()
.map(|p| format!(" --env {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_network(network: &str) -> String {
format!(" --network {network}")
}
fn parse_restart(restart: &RestartMode) -> String {
let restart = match restart {
RestartMode::OnFailure => "on-failure:10".to_string(),
_ => restart.to_string(),
};
format!(" --restart {restart}")
}
fn parse_command(command: &str) -> String {
if command.is_empty() {
String::new()
} else {
format!(" {command}")
}
}

View File

@@ -1,121 +0,0 @@
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use monitor_client::entities::{
build::{CloudRegistryConfig, ImageRegistry},
config::core::AwsEcrConfig,
update::Log,
EnvironmentVar,
};
use run_command::async_run_command;
use crate::helpers::{get_docker_token, get_github_token};
pub mod build;
pub mod client;
pub mod container;
pub mod network;
/// Returns whether build result should be pushed after build
pub async fn docker_login(
registry: &ImageRegistry,
// For local token override from core.
registry_token: Option<&str>,
// For local config override from core.
aws_ecr: Option<&AwsEcrConfig>,
) -> anyhow::Result<bool> {
match registry {
ImageRegistry::None(_) => Ok(false),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must configure account for DockerHub registry"
));
}
let registry_token = match registry_token {
Some(token) => token,
None => get_docker_token(account)?,
};
let log = async_run_command(&format!(
"docker login -u {account} -p {registry_token}",
))
.await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"dockerhub login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must configure account for GithubContainerRegistry"
));
}
let registry_token = match registry_token {
Some(token) => token,
None => get_github_token(account)?,
};
let log = async_run_command(&format!(
"docker login ghcr.io -u {account} -p {registry_token}",
))
.await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"ghcr login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::AwsEcr(label) => {
let AwsEcrConfig { region, account_id } = aws_ecr
.with_context(|| {
format!("Could not find aws ecr config for label {label}")
})?;
let registry_token = registry_token
.context("aws ecr build missing registry token from core")?;
let log = async_run_command(&format!("docker login {account_id}.dkr.ecr.{region}.amazonaws.com -u AWS -p {registry_token}")).await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"aws ecr login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::Custom(_) => todo!(),
}
}
pub fn parse_extra_args(extra_args: &[String]) -> String {
let args = extra_args.join(" ");
if !args.is_empty() {
format!(" {args}")
} else {
args
}
}
pub fn parse_labels(labels: &[EnvironmentVar]) -> String {
labels
.iter()
.map(|p| format!(" --label {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
#[instrument]
pub async fn prune_system() -> Log {
let command = String::from("docker system prune -a -f");
run_monitor_command("prune system", command).await
}

View File

@@ -1,27 +0,0 @@
use command::run_monitor_command;
use monitor_client::entities::update::Log;
#[instrument]
pub async fn create_network(
name: &str,
driver: Option<String>,
) -> Log {
let driver = match driver {
Some(driver) => format!(" -d {driver}"),
None => String::new(),
};
let command = format!("docker network create{driver} {name}");
run_monitor_command("create network", command).await
}
#[instrument]
pub async fn delete_network(name: &str) -> Log {
let command = format!("docker network rm {name}");
run_monitor_command("delete network", command).await
}
#[instrument]
pub async fn prune_networks() -> Log {
let command = String::from("docker network prune -f");
run_monitor_command("prune networks", command).await
}

View File

@@ -1,69 +0,0 @@
use std::net::SocketAddr;
use anyhow::{anyhow, Context};
use axum::{
body::Body,
extract::ConnectInfo,
http::{Request, StatusCode},
middleware::Next,
response::Response,
};
use serror::{AddStatusCode, AddStatusCodeError};
use crate::config::periphery_config;
#[instrument(level = "debug")]
pub async fn guard_request_by_passkey(
req: Request<Body>,
next: Next,
) -> serror::Result<Response> {
if periphery_config().passkeys.is_empty() {
return Ok(next.run(req).await);
}
let Some(req_passkey) = req.headers().get("authorization") else {
return Err(
anyhow!("request was not sent with passkey")
.status_code(StatusCode::UNAUTHORIZED),
);
};
let req_passkey = req_passkey
.to_str()
.context("failed to convert passkey to str")
.status_code(StatusCode::UNAUTHORIZED)?;
if periphery_config()
.passkeys
.iter()
.any(|passkey| passkey == req_passkey)
{
Ok(next.run(req).await)
} else {
Err(
anyhow!("request passkey invalid")
.status_code(StatusCode::UNAUTHORIZED),
)
}
}
#[instrument(level = "debug")]
pub async fn guard_request_by_ip(
req: Request<Body>,
next: Next,
) -> serror::Result<Response> {
if periphery_config().allowed_ips.is_empty() {
return Ok(next.run(req).await);
}
let ConnectInfo(socket_addr) = req
.extensions()
.get::<ConnectInfo<SocketAddr>>()
.context("could not get ConnectionInfo of request")
.status_code(StatusCode::UNAUTHORIZED)?;
let ip = socket_addr.ip();
if periphery_config().allowed_ips.contains(&ip) {
Ok(next.run(req).await)
} else {
Err(
anyhow!("requesting ip {ip} not allowed")
.status_code(StatusCode::UNAUTHORIZED),
)
}
}

View File

@@ -1,53 +0,0 @@
use std::time::Instant;
use anyhow::{anyhow, Context};
use axum_extra::{headers::ContentType, TypedHeader};
use resolver_api::Resolver;
use serror::Json;
use uuid::Uuid;
use crate::State;
pub async fn handler(
Json(request): Json<crate::api::PeripheryRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request))
.await
.context("task handler spawn error");
if let Err(e) = &res {
warn!("request {req_id} spawn error: {e:#}");
}
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "PeripheryHandler")]
async fn task(
req_id: Uuid,
request: crate::api::PeripheryRequest,
) -> anyhow::Result<String> {
let timer = Instant::now();
let res =
State
.resolve_request(request, ())
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -1,4 +1,10 @@
use anyhow::Context;
use anyhow::{anyhow, Context};
use monitor_client::entities::{
build::{CloudRegistryConfig, ImageRegistry},
config::core::AwsEcrConfig,
EnvironmentVar,
};
use run_command::async_run_command;
use crate::config::periphery_config;
@@ -19,3 +25,103 @@ pub fn get_docker_token(
.get(docker_account)
.with_context(|| format!("did not find token in config for docker account {docker_account}"))
}
pub fn parse_extra_args(extra_args: &[String]) -> String {
let args = extra_args.join(" ");
if !args.is_empty() {
format!(" {args}")
} else {
args
}
}
pub fn parse_labels(labels: &[EnvironmentVar]) -> String {
labels
.iter()
.map(|p| format!(" --label {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
/// Returns whether build result should be pushed after build
#[instrument(skip(registry_token))]
pub async fn docker_login(
registry: &ImageRegistry,
// For local token override from core.
registry_token: Option<&str>,
// For local config override from core.
aws_ecr: Option<&AwsEcrConfig>,
) -> anyhow::Result<bool> {
match registry {
ImageRegistry::None(_) => Ok(false),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must configure account for DockerHub registry"
));
}
let registry_token = match registry_token {
Some(token) => token,
None => get_docker_token(account)?,
};
let log = async_run_command(&format!(
"docker login -u {account} -p {registry_token}",
))
.await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"dockerhub login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must configure account for GithubContainerRegistry"
));
}
let registry_token = match registry_token {
Some(token) => token,
None => get_github_token(account)?,
};
let log = async_run_command(&format!(
"docker login ghcr.io -u {account} -p {registry_token}",
))
.await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"ghcr login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::AwsEcr(label) => {
let AwsEcrConfig { region, account_id } = aws_ecr
.with_context(|| {
format!("Could not find aws ecr config for label {label}")
})?;
let registry_token = registry_token
.context("aws ecr build missing registry token from core")?;
let log = async_run_command(&format!("docker login {account_id}.dkr.ecr.{region}.amazonaws.com -u AWS -p {registry_token}")).await;
if log.success() {
Ok(true)
} else {
Err(anyhow!(
"aws ecr login error: stdout: {} | stderr: {}",
log.stdout,
log.stderr
))
}
}
ImageRegistry::Custom(_) => todo!(),
}
}

View File

@@ -4,14 +4,12 @@ extern crate tracing;
use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::{middleware, routing::post, Router};
mod api;
mod config;
mod docker;
mod guard;
mod handler;
mod helpers;
mod router;
mod stats;
struct State;
@@ -29,20 +27,16 @@ async fn app() -> anyhow::Result<()> {
SocketAddr::from_str(&format!("0.0.0.0:{}", config.port))
.context("failed to parse socket addr")?;
let app = Router::new()
.route("/", post(handler::handler))
.layer(middleware::from_fn(guard::guard_request_by_ip))
.layer(middleware::from_fn(guard::guard_request_by_passkey));
info!("starting server on {}", socket_addr);
let listener = tokio::net::TcpListener::bind(&socket_addr)
.await
.context("failed to bind tcp listener")?;
info!("monitor core started on {}", socket_addr);
axum::serve(
listener,
app.into_make_service_with_connect_info::<SocketAddr>(),
router::router()
.into_make_service_with_connect_info::<SocketAddr>(),
)
.await?;

125
bin/periphery/src/router.rs Normal file
View File

@@ -0,0 +1,125 @@
use std::{net::SocketAddr, time::Instant};
use anyhow::{anyhow, Context};
use axum::{
body::Body,
extract::ConnectInfo,
http::{Request, StatusCode},
middleware::{self, Next},
response::Response,
routing::post,
Router,
};
use axum_extra::{headers::ContentType, TypedHeader};
use resolver_api::Resolver;
use serror::{AddStatusCode, AddStatusCodeError, Json};
use uuid::Uuid;
use crate::{config::periphery_config, State};
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(guard_request_by_ip))
.layer(middleware::from_fn(guard_request_by_passkey))
}
async fn handler(
Json(request): Json<crate::api::PeripheryRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request))
.await
.context("task handler spawn error");
if let Err(e) = &res {
warn!("request {req_id} spawn error: {e:#}");
}
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "PeripheryHandler")]
async fn task(
req_id: Uuid,
request: crate::api::PeripheryRequest,
) -> anyhow::Result<String> {
let timer = Instant::now();
let res =
State
.resolve_request(request, ())
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("request {req_id} | resolve time: {elapsed:?}");
res
}
#[instrument(level = "debug")]
async fn guard_request_by_passkey(
req: Request<Body>,
next: Next,
) -> serror::Result<Response> {
if periphery_config().passkeys.is_empty() {
return Ok(next.run(req).await);
}
let Some(req_passkey) = req.headers().get("authorization") else {
return Err(
anyhow!("request was not sent with passkey")
.status_code(StatusCode::UNAUTHORIZED),
);
};
let req_passkey = req_passkey
.to_str()
.context("failed to convert passkey to str")
.status_code(StatusCode::UNAUTHORIZED)?;
if periphery_config()
.passkeys
.iter()
.any(|passkey| passkey == req_passkey)
{
Ok(next.run(req).await)
} else {
Err(
anyhow!("request passkey invalid")
.status_code(StatusCode::UNAUTHORIZED),
)
}
}
#[instrument(level = "debug")]
async fn guard_request_by_ip(
req: Request<Body>,
next: Next,
) -> serror::Result<Response> {
if periphery_config().allowed_ips.is_empty() {
return Ok(next.run(req).await);
}
let ConnectInfo(socket_addr) = req
.extensions()
.get::<ConnectInfo<SocketAddr>>()
.context("could not get ConnectionInfo of request")
.status_code(StatusCode::UNAUTHORIZED)?;
let ip = socket_addr.ip();
if periphery_config().allowed_ips.contains(&ip) {
Ok(next.run(req).await)
} else {
Err(
anyhow!("requesting ip {ip} not allowed")
.status_code(StatusCode::UNAUTHORIZED),
)
}
}

View File

@@ -59,7 +59,7 @@ pub struct DeleteAlerter {
/// Update the alerter at the given id, and return the updated alerter. Response: [Alerter].
///
/// Note. This method updates only the fields which are set in the [PartialAlerterConfig],
/// Note. This method updates only the fields which are set in the [PartialAlerterConfig][crate::entities::alerter::PartialAlerterConfig],
/// effectively merging diffs into the final document. This is helpful when multiple users are using
/// the same resources concurrently by ensuring no unintentional
/// field changes occur from out of date local state.

View File

@@ -142,7 +142,9 @@ pub struct BuildConfig {
#[builder(default)]
pub extra_args: Vec<String>,
/// Docker build arguments
/// Docker build arguments.
///
/// These values are visible in the final image by running `docker inspect`.
#[serde(
default,
deserialize_with = "super::env_vars_deserializer"
@@ -154,6 +156,27 @@ pub struct BuildConfig {
#[builder(default)]
pub build_args: Vec<EnvironmentVar>,
/// Secret arguments.
///
/// These values remain hidden in the final image by using
/// docker secret mounts. See `<https://docs.docker.com/build/building/secrets>`.
///
/// The values can be used in RUN commands:
/// ```
/// RUN --mount=type=secret,id=SECRET_KEY \
/// SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
/// ```
#[serde(
default,
deserialize_with = "super::env_vars_deserializer"
)]
#[partial_attr(serde(
default,
deserialize_with = "super::option_env_vars_deserializer"
))]
#[builder(default)]
pub secret_args: Vec<EnvironmentVar>,
/// Docker labels
#[serde(
default,
@@ -203,6 +226,7 @@ impl Default for BuildConfig {
build_path: default_build_path(),
dockerfile_path: default_dockerfile_path(),
build_args: Default::default(),
secret_args: Default::default(),
labels: Default::default(),
extra_args: Default::default(),
use_buildx: Default::default(),

View File

@@ -387,8 +387,25 @@ export interface BuildConfig {
webhook_enabled: boolean;
/** Any extra docker cli arguments to be included in the build command */
extra_args?: string[];
/** Docker build arguments */
/**
* Docker build arguments.
*
* These values are visible in the final image by running `docker inspect`.
*/
build_args?: EnvironmentVar[] | string;
/**
* Secret arguments.
*
* These values remain hidden in the final image by using
* docker secret mounts. See `<https://docs.docker.com/build/building/secrets>`.
*
* To use the values, add commands like this in the Dockerfile:
* ```
* RUN --mount=type=secret,id=SECRET_KEY \
* SECRET_VALUE=$(cat /run/secrets/SECRET_KEY) ...
* ```
*/
secret_args?: EnvironmentVar[] | string;
/** Docker labels */
labels?: EnvironmentVar[] | string;
}
@@ -2993,7 +3010,7 @@ export interface DeleteAlerter {
/**
* Update the alerter at the given id, and return the updated alerter. Response: [Alerter].
*
* Note. This method updates only the fields which are set in the [PartialAlerterConfig],
* Note. This method updates only the fields which are set in the [PartialAlerterConfig][crate::entities::alerter::PartialAlerterConfig],
* effectively merging diffs into the final document. This is helpful when multiple users are using
* the same resources concurrently by ensuring no unintentional
* field changes occur from out of date local state.

View File

@@ -267,7 +267,32 @@ export const BuildConfig = ({
const args =
typeof vars === "object" ? env_to_text(vars) : vars;
return (
<BuildArgs args={args ?? ""} set={set} disabled={disabled} />
<Args
type="build"
args={args ?? ""}
set={set}
disabled={disabled}
/>
);
},
skip_secret_interp: true,
},
},
],
"Secret Args": [
{
label: "Secret Args",
components: {
secret_args: (vars, set) => {
const args =
typeof vars === "object" ? env_to_text(vars) : vars;
return (
<Args
type="secret"
args={args ?? ""}
set={set}
disabled={disabled}
/>
);
},
skip_secret_interp: true,
@@ -279,17 +304,19 @@ export const BuildConfig = ({
);
};
const BuildArgs = ({
const Args = ({
type,
args,
set,
disabled,
}: {
type: "build" | "secret";
args: string;
set: (input: Partial<Types.BuildConfig>) => void;
disabled: boolean;
}) => {
const ref = createRef<HTMLTextAreaElement>();
const setArgs = (build_args: string) => set({ build_args });
const setArgs = (args: string) => set({ [`${type}_args`]: args });
return (
<ConfigItem className="flex-col gap-4 items-start">

View File

@@ -67,7 +67,7 @@ pub async fn pull(
logs
}
#[tracing::instrument(skip(repo_dir, github_token))]
#[tracing::instrument(skip(github_token))]
pub async fn clone<T>(
clone_args: T,
repo_dir: &Path,