Compare commits

...

13 Commits

Author SHA1 Message Date
Maxwell Becker
aea5441466 1.16.9 (#172)
* BatchDestroyDeployment

* periphery image pull api

* Add Pull apis

* Add PullStack / PullDeployment

* improve init deploy from container

* stacks + deployments update_available source

* Fix deploy / destroy stack service

* updates available indicator

* add poll for updates and auto update options

* use interval to handle waiting between resource refresh

* stack auto update deploy whole stack

* format

* clean up the docs

* update available alerts

* update alerting format

* fix most clippy
2024-11-04 20:28:31 -08:00
mbecker20
97ced3b2cb frontend allow Alerter configure StackStateChanged, include Stacks and Repos in whitelist 2024-11-02 21:00:38 -04:00
Maxwell Becker
1f79987c58 1.16.8 (#170)
* update configs

* bump to 1.16.8
2024-11-01 14:35:12 -07:00
Maxwell Becker
e859a919c5 1.16.8 (#169)
* use this to extract from path

* Fix references to __ALL__
2024-11-01 14:33:41 -07:00
mbecker20
2a1270dd74 webhook check will return better status codex 2024-11-01 15:57:36 -04:00
Maxwell Becker
f5a59b0333 1.16.7 (#167)
* 1.16.7

* increase builder max poll to allow User Data more time to setup periphery

* rework to KOMODO_OIDC_REDIRECT_HOST
2024-10-31 21:06:01 -07:00
mbecker20
cacea235f9 replace networks empty with network_mode, replace container: network mode 2024-10-30 02:58:27 -04:00
mbecker20
54ba31dca9 gen ts types 2024-10-30 02:18:57 -04:00
Maxwell Becker
17d7ecb419 1.16.6 (#163)
* remove instrument from validate_cancel_build

* use type safe AllResources map - Action not showing omnisearch

* Stack support replicated services

* server docker nested tables

* fix container networks which use network of another container

* bump version

* add 'address' to ServerListItemInfo

* secrets list on variables page wraps

* fix user data script

* update default template user data

* improve sidebar layout styling

* fix network names shown on containers

* improve stack service / container page

* deleted resource log records Toml backup for later reference

* align all the tables

* add Url Builder type
2024-10-29 23:17:10 -07:00
mbecker20
38f3448790 add Procedures and Actions page 2024-10-28 00:57:42 -04:00
Maxwell Becker
ec88a6fa5a 1.16.5 (#159)
* repo pull lock

* implement BatchRunAction

* other batch methods
2024-10-27 20:56:56 -07:00
mbecker20
3820cd0ca2 make Build Organization configurable with custom value 2024-10-27 14:42:06 -04:00
mbecker20
419aa87bbb update resource toml examples to latest standard 2024-10-26 16:22:12 -04:00
139 changed files with 6585 additions and 2097 deletions

97
Cargo.lock generated
View File

@@ -41,7 +41,7 @@ dependencies = [
[[package]]
name = "alerter"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"axum",
@@ -120,9 +120,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.90"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95"
checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8"
[[package]]
name = "arc-swap"
@@ -201,9 +201,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "aws-config"
version = "1.5.8"
version = "1.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6"
checksum = "2d6448cfb224dd6a9b9ac734f58622dd0d4751f3589f3b777345745f46b2eb14"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -268,9 +268,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.79.0"
version = "1.83.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95559916ae9d6ef69b104546098a4b4c57082db5d11571917916e9a69234c6ea"
checksum = "59ef9cdd731373735434b79a33fd1049525d78674c49b1e278d56544e388fe01"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -292,9 +292,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sso"
version = "1.46.0"
version = "1.47.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dc2faec3205d496c7e57eff685dd944203df7ce16a4116d0281c44021788a7b"
checksum = "a8776850becacbd3a82a4737a9375ddb5c6832a51379f24443a98e61513f852c"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -314,9 +314,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ssooidc"
version = "1.47.0"
version = "1.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c93c241f52bc5e0476e259c953234dab7e2a35ee207ee202e86c0095ec4951dc"
checksum = "0007b5b8004547133319b6c4e87193eee2a0bcb3e4c18c75d09febe9dab7b383"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -336,9 +336,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sts"
version = "1.46.0"
version = "1.47.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b259429be94a3459fa1b00c5684faee118d74f9577cc50aebadc36e507c63b5f"
checksum = "9fffaa356e7f1c725908b75136d53207fa714e348f365671df14e95a60530ad3"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -432,9 +432,9 @@ dependencies = [
[[package]]
name = "aws-smithy-runtime"
version = "1.7.2"
version = "1.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a065c0fe6fdbdf9f11817eb68582b2ab4aff9e9c39e986ae48f7ec576c6322db"
checksum = "be28bd063fa91fd871d131fc8b68d7cd4c5fa0869bea68daca50dcb1cbd76be2"
dependencies = [
"aws-smithy-async",
"aws-smithy-http",
@@ -476,9 +476,9 @@ dependencies = [
[[package]]
name = "aws-smithy-types"
version = "1.2.7"
version = "1.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147100a7bea70fa20ef224a6bad700358305f5dc0f84649c53769761395b355b"
checksum = "07c9cdc179e6afbf5d391ab08c85eac817b51c87e1892a5edb5f7bbdc64314b4"
dependencies = [
"base64-simd",
"bytes",
@@ -845,6 +845,14 @@ dependencies = [
"either",
]
[[package]]
name = "cache"
version = "1.16.9"
dependencies = [
"anyhow",
"tokio",
]
[[package]]
name = "cc"
version = "1.1.30"
@@ -943,7 +951,7 @@ dependencies = [
[[package]]
name = "command"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"komodo_client",
"run_command",
@@ -1355,7 +1363,7 @@ dependencies = [
[[package]]
name = "environment_file"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"thiserror",
]
@@ -1439,7 +1447,7 @@ dependencies = [
[[package]]
name = "formatting"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"serror",
]
@@ -1571,9 +1579,10 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "git"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"cache",
"command",
"formatting",
"komodo_client",
@@ -2191,7 +2200,7 @@ dependencies = [
[[package]]
name = "komodo_cli"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"clap",
@@ -2207,7 +2216,7 @@ dependencies = [
[[package]]
name = "komodo_client"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2238,7 +2247,7 @@ dependencies = [
[[package]]
name = "komodo_core"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2249,6 +2258,7 @@ dependencies = [
"axum-server",
"base64 0.22.1",
"bcrypt",
"cache",
"command",
"dashmap",
"derive_variants",
@@ -2292,11 +2302,12 @@ dependencies = [
"typeshare",
"urlencoding",
"uuid",
"wildcard",
]
[[package]]
name = "komodo_periphery"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2304,6 +2315,7 @@ dependencies = [
"axum-extra",
"axum-server",
"bollard",
"cache",
"clap",
"command",
"derive_variants",
@@ -2383,7 +2395,7 @@ dependencies = [
[[package]]
name = "logger"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"komodo_client",
@@ -3089,7 +3101,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "periphery_client"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"komodo_client",
@@ -3307,9 +3319,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@@ -3993,9 +4005,9 @@ dependencies = [
[[package]]
name = "serror"
version = "0.4.6"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8f432d878d404110352cfbaa031d8a6878a166cb7f50e00ab87d0508f8f68a0"
checksum = "715a997753611604c722411afbe11f83a89e00e39323dc9016db96a86cc04fc8"
dependencies = [
"anyhow",
"axum",
@@ -4327,18 +4339,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.64"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84"
checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.64"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602"
dependencies = [
"proc-macro2",
"quote",
@@ -4791,9 +4803,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "typeshare"
version = "1.0.3"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f17399b76c2e743d58eac0635d7686e9c00f48cd4776f00695d9882a7d3187"
checksum = "19be0f411120091e76e13e5a0186d8e2bcc3e7e244afdb70152197f1a8486ceb"
dependencies = [
"chrono",
"serde",
@@ -4864,7 +4876,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "update_logger"
version = "1.16.4"
version = "1.16.9"
dependencies = [
"anyhow",
"komodo_client",
@@ -5073,6 +5085,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311"
[[package]]
name = "wildcard"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36241ad0795516b55e3b60e55c7f979d4f324e4aaea4c70d56b548b9164ee4d2"
dependencies = [
"thiserror",
]
[[package]]
name = "winapi"
version = "0.3.9"

View File

@@ -9,7 +9,7 @@ members = [
]
[workspace.package]
version = "1.16.4"
version = "1.16.9"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -28,11 +28,12 @@ environment_file = { path = "lib/environment_file" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
logger = { path = "lib/logger" }
cache = { path = "lib/cache" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.4.6", default-features = false }
serror = { version = "0.4.7", default-features = false }
slack = { version = "0.2.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
@@ -69,8 +70,8 @@ serde_yaml = "0.9.34"
toml = "0.8.19"
# ERROR
anyhow = "1.0.90"
thiserror = "1.0.64"
anyhow = "1.0.91"
thiserror = "1.0.65"
# LOGGING
opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio"] }
@@ -104,14 +105,15 @@ bollard = "0.17.1"
sysinfo = "0.32.0"
# CLOUD
aws-config = "1.5.8"
aws-sdk-ec2 = "1.77.0"
aws-config = "1.5.9"
aws-sdk-ec2 = "1.83.0"
# MISC
derive_builder = "0.20.2"
typeshare = "1.0.3"
typeshare = "1.0.4"
octorust = "0.7.0"
dashmap = "6.1.0"
wildcard = "0.2.0"
colored = "2.1.0"
regex = "1.11.0"
regex = "1.11.1"
bson = "2.13.0"

View File

@@ -1,13 +1,21 @@
use std::time::Duration;
use colored::Colorize;
use komodo_client::api::execute::Execution;
use komodo_client::{
api::execute::{BatchExecutionResponse, Execution},
entities::update::Update,
};
use crate::{
helpers::wait_for_enter,
state::{cli_args, komodo_client},
};
pub enum ExecutionResult {
Single(Update),
Batch(BatchExecutionResponse),
}
pub async fn run(execution: Execution) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
@@ -24,18 +32,33 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunAction(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunAction(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -54,15 +77,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchCloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchPullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchBuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -138,9 +173,18 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -159,6 +203,9 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -171,144 +218,242 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
info!("Running Execution...");
let res = match execution {
Execution::RunAction(request) => {
komodo_client().execute(request).await
}
Execution::RunProcedure(request) => {
komodo_client().execute(request).await
}
Execution::RunBuild(request) => {
komodo_client().execute(request).await
}
Execution::CancelBuild(request) => {
komodo_client().execute(request).await
}
Execution::Deploy(request) => {
komodo_client().execute(request).await
}
Execution::StartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::RestartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::PauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::StopDeployment(request) => {
komodo_client().execute(request).await
}
Execution::DestroyDeployment(request) => {
komodo_client().execute(request).await
}
Execution::CloneRepo(request) => {
komodo_client().execute(request).await
}
Execution::PullRepo(request) => {
komodo_client().execute(request).await
}
Execution::BuildRepo(request) => {
komodo_client().execute(request).await
}
Execution::CancelRepoBuild(request) => {
komodo_client().execute(request).await
}
Execution::StartContainer(request) => {
komodo_client().execute(request).await
}
Execution::RestartContainer(request) => {
komodo_client().execute(request).await
}
Execution::PauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::StopContainer(request) => {
komodo_client().execute(request).await
}
Execution::DestroyContainer(request) => {
komodo_client().execute(request).await
}
Execution::StartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::RestartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::StopAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PruneContainers(request) => {
komodo_client().execute(request).await
}
Execution::DeleteNetwork(request) => {
komodo_client().execute(request).await
}
Execution::PruneNetworks(request) => {
komodo_client().execute(request).await
}
Execution::DeleteImage(request) => {
komodo_client().execute(request).await
}
Execution::PruneImages(request) => {
komodo_client().execute(request).await
}
Execution::DeleteVolume(request) => {
komodo_client().execute(request).await
}
Execution::PruneVolumes(request) => {
komodo_client().execute(request).await
}
Execution::PruneDockerBuilders(request) => {
komodo_client().execute(request).await
}
Execution::PruneBuildx(request) => {
komodo_client().execute(request).await
}
Execution::PruneSystem(request) => {
komodo_client().execute(request).await
}
Execution::RunSync(request) => {
komodo_client().execute(request).await
}
Execution::CommitSync(request) => {
komodo_client().write(request).await
}
Execution::DeployStack(request) => {
komodo_client().execute(request).await
}
Execution::DeployStackIfChanged(request) => {
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
komodo_client().execute(request).await
}
Execution::RestartStack(request) => {
komodo_client().execute(request).await
}
Execution::PauseStack(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseStack(request) => {
komodo_client().execute(request).await
}
Execution::StopStack(request) => {
komodo_client().execute(request).await
}
Execution::DestroyStack(request) => {
komodo_client().execute(request).await
}
Execution::RunAction(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchRunAction(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::RunProcedure(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchRunProcedure(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::RunBuild(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchRunBuild(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CancelBuild(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::Deploy(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchDeploy(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StartDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::RestartDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PauseDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::UnpauseDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StopDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DestroyDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchDestroyDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CloneRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchCloneRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchPullRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::BuildRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchBuildRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CancelRepoBuild(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StartContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::RestartContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PauseContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::UnpauseContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StopContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DestroyContainer(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StartAllContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::RestartAllContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PauseAllContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::UnpauseAllContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StopAllContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneContainers(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DeleteNetwork(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneNetworks(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DeleteImage(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneImages(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DeleteVolume(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneVolumes(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneDockerBuilders(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneBuildx(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PruneSystem(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::RunSync(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::CommitSync(request) => komodo_client()
.write(request)
.await
.map(ExecutionResult::Single),
Execution::DeployStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchDeployStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::DeployStackIfChanged(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchDeployStackIfChanged(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StartStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::RestartStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::PauseStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::UnpauseStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::StopStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::DestroyStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchDestroyStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);
@@ -320,7 +465,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
};
match res {
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
Ok(ExecutionResult::Single(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
}
Ok(ExecutionResult::Batch(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
}
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}

View File

@@ -21,6 +21,7 @@ environment_file.workspace = true
formatting.workspace = true
command.workspace = true
logger.workspace = true
cache.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
@@ -48,13 +49,14 @@ serde_json.workspace = true
serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
wildcard.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true

View File

@@ -22,7 +22,7 @@ pub async fn send_alert(
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | *{name}*{region} is now *reachable*\n{link}"
"{level} | **{name}**{region} is now **reachable**\n{link}"
)
}
SeverityLevel::Critical => {
@@ -31,7 +31,7 @@ pub async fn send_alert(
.map(|e| format!("\n**error**: {e:#?}"))
.unwrap_or_default();
format!(
"{level} | *{name}*{region} is *unreachable* ❌\n{link}{err}"
"{level} | **{name}**{region} is **unreachable**\n{link}{err}"
)
}
_ => unreachable!(),
@@ -46,7 +46,7 @@ pub async fn send_alert(
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%*\n{link}"
"{level} | **{name}**{region} cpu usage at **{percentage:.1}%**\n{link}"
)
}
AlertData::ServerMem {
@@ -60,7 +60,7 @@ pub async fn send_alert(
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾\n\nUsing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}"
"{level} | **{name}**{region} memory usage at **{percentage:.1}%** 💾\n\nUsing **{used_gb:.1} GiB** / **{total_gb:.1} GiB**\n{link}"
)
}
AlertData::ServerDisk {
@@ -75,7 +75,7 @@ pub async fn send_alert(
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* 💿\nmount point: `{path:?}`\nusing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}"
"{level} | **{name}**{region} disk usage at **{percentage:.1}%** 💿\nmount point: `{path:?}`\nusing **{used_gb:.1} GiB** / **{total_gb:.1} GiB**\n{link}"
)
}
AlertData::ContainerStateChange {
@@ -88,7 +88,17 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to = fmt_docker_container_state(to);
format!("📦 Deployment *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}")
format!("📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!("⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}")
}
AlertData::StackStateChange {
id,
@@ -100,28 +110,39 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to = fmt_stack_state(to);
format!("🥞 Stack *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}")
format!("🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!("⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}")
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!("{level} | Failed to terminated AWS builder instance\ninstance id: *{instance_id}*\n{message}")
format!("{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}")
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on *{name}*\n{link}"
"{level} | Pending resource sync updates on **{name}**\n{link}"
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!("{level} | Build *{name}* failed\nversion: v{version}\n{link}")
format!("{level} | Build **{name}** failed\nversion: **v{version}**\n{link}")
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for *{name}* failed\n{link}")
format!("{level} | Repo build for **{name}** failed\n{link}")
}
AlertData::None {} => Default::default(),
};

View File

@@ -182,7 +182,7 @@ pub async fn send_alert(
..
} => {
let to = fmt_docker_container_state(to);
let text = format!("📦 Container *{name}* is now {to}");
let text = format!("📦 Container *{name}* is now *{to}*");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
@@ -195,6 +195,27 @@ pub async fn send_alert(
];
(text, blocks.into())
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_name,
server_id: _server_id,
image,
} => {
let text =
format!("⬆ Deployment *{name}* has an update available");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: *{server_name}*\nimage: *{image}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
)),
];
(text, blocks.into())
}
AlertData::StackStateChange {
name,
server_name,
@@ -204,11 +225,32 @@ pub async fn send_alert(
..
} => {
let to = fmt_stack_state(to);
let text = format!("🥞 Stack *{name}* is now {to}");
let text = format!("🥞 Stack *{name}* is now *{to}*");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}",
"server: *{server_name}*\nprevious: *{from}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
id,
)),
];
(text, blocks.into())
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_name,
server_id: _server_id,
service,
image,
} => {
let text = format!("⬆ Stack *{name}* has an update available");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: *{server_name}*\nservice: *{service}*\nimage: *{image}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
@@ -233,8 +275,9 @@ pub async fn send_alert(
(text, blocks.into())
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | Pending resource sync updates on {name}");
let text = format!(
"{level} | Pending resource sync updates on *{name}*"
);
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
@@ -252,20 +295,21 @@ pub async fn send_alert(
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}",
"build name: *{name}*\nversion: *v{version}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Build,
id,
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
AlertData::RepoBuildFailed { id, name } => {
let text =
format!("{level} | Repo build for {name} has failed");
format!("{level} | Repo build for *{name}* has *failed*");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"repo id: *{id}*\nrepo name: *{name}*",
)),
Block::section(format!("repo name: *{name}*",)),
Block::section(resource_link(
ResourceTargetVariant::Repo,
id,

View File

@@ -9,7 +9,7 @@ use anyhow::Context;
use command::run_komodo_command;
use komodo_client::{
api::{
execute::RunAction,
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
@@ -25,6 +25,7 @@ use resolver_api::Resolve;
use tokio::fs;
use crate::{
api::execute::ExecuteRequest,
config::core_config,
helpers::{
interpolate::{
@@ -39,7 +40,26 @@ use crate::{
state::{action_states, db_client, State},
};
impl super::BatchExecute for BatchRunAction {
type Resource = Action;
fn single_request(action: String) -> ExecuteRequest {
ExecuteRequest::RunAction(RunAction { action })
}
}
impl Resolve<BatchRunAction, (User, Update)> for State {
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunAction { pattern }: BatchRunAction,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunAction>(&pattern, &user).await
}
}
impl Resolve<RunAction, (User, Update)> for State {
#[instrument(name = "RunAction", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunAction { action }: RunAction,

View File

@@ -4,7 +4,10 @@ use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::{CancelBuild, Deploy, RunBuild},
api::execute::{
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
RunBuild,
},
entities::{
alert::{Alert, AlertData, SeverityLevel},
all_logs_success,
@@ -51,6 +54,24 @@ use crate::{
use super::ExecuteRequest;
impl super::BatchExecute for BatchRunBuild {
type Resource = Build;
fn single_request(build: String) -> ExecuteRequest {
ExecuteRequest::RunBuild(RunBuild { build })
}
}
impl Resolve<BatchRunBuild, (User, Update)> for State {
#[instrument(name = "BatchRunBuild", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunBuild { pattern }: BatchRunBuild,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunBuild>(&pattern, &user).await
}
}
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -438,7 +459,6 @@ async fn handle_early_return(
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {

View File

@@ -1,6 +1,7 @@
use std::collections::HashSet;
use std::{collections::HashSet, sync::OnceLock};
use anyhow::{anyhow, Context};
use cache::TimeoutCache;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
@@ -9,7 +10,7 @@ use komodo_client::{
deployment::{
extract_registry_domain, Deployment, DeploymentImage,
},
get_image_name,
get_image_name, komodo_timestamp, optional_string,
permission::PermissionLevel,
server::Server,
update::{Log, Update},
@@ -37,6 +38,30 @@ use crate::{
state::{action_states, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchDeploy {
type Resource = Deployment;
fn single_request(deployment: String) -> ExecuteRequest {
ExecuteRequest::Deploy(Deploy {
deployment,
stop_signal: None,
stop_time: None,
})
}
}
impl Resolve<BatchDeploy, (User, Update)> for State {
#[instrument(name = "BatchDeploy", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeploy { pattern }: BatchDeploy,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeploy>(&pattern, &user).await
}
}
async fn setup_deployment_execution(
deployment: &str,
user: &User,
@@ -49,12 +74,16 @@ async fn setup_deployment_execution(
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
return Err(anyhow!("Deployment has no Server configured"));
}
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
if !server.config.enabled {
return Err(anyhow!("Attached Server is not enabled"));
}
Ok((deployment, server))
}
@@ -86,13 +115,6 @@ impl Resolve<Deploy, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
periphery
.health_check()
.await
.context("Failed server health check, stopping run.")?;
// This block resolves the attached Build to an actual versioned image
let (version, registry_token) = match &deployment.config.image {
DeploymentImage::Build { build_id, version } => {
@@ -104,12 +126,7 @@ impl Resolve<Deploy, (User, Update)> for State {
} else {
*version
};
// Remove ending patch if it is 0, this means use latest patch.
let version_str = if version.patch == 0 {
format!("{}.{}", version.major, version.minor)
} else {
version.to_string()
};
let version_str = version.to_string();
// Potentially add the build image_tag postfix
let version_str = if build.config.image_tag.is_empty() {
version_str
@@ -217,7 +234,7 @@ impl Resolve<Deploy, (User, Update)> for State {
update.version = version;
update_update(update.clone()).await?;
match periphery
match periphery_client(&server)?
.request(api::container::Deploy {
deployment,
stop_signal,
@@ -230,10 +247,8 @@ impl Resolve<Deploy, (User, Update)> for State {
Ok(log) => update.logs.push(log),
Err(e) => {
update.push_error_log(
"deploy container",
format_serror(
&e.context("failed to deploy container").into(),
),
"Deploy Container",
format_serror(&e.into()),
);
}
};
@@ -247,6 +262,155 @@ impl Resolve<Deploy, (User, Update)> for State {
}
}
/// Wait this long after a pull to allow another pull through
const PULL_TIMEOUT: i64 = 5_000;
type ServerId = String;
type Image = String;
type PullCache = TimeoutCache<(ServerId, Image), Log>;
fn pull_cache() -> &'static PullCache {
static PULL_CACHE: OnceLock<PullCache> = OnceLock::new();
PULL_CACHE.get_or_init(Default::default)
}
pub async fn pull_deployment_inner(
deployment: Deployment,
server: &Server,
) -> anyhow::Result<Log> {
let (image, account, token) = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build)
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version.to_string()
} else {
version.to_string()
};
// Potentially add the build image_tag postfix
let version = if build.config.image_tag.is_empty() {
version
} else {
format!("{version}-{}", build.config.image_tag)
};
// replace image with corresponding build image.
let image = format!("{image_name}:{version}");
if build.config.image_registry.domain.is_empty() {
(image, None, None)
} else {
let ImageRegistryConfig {
domain, account, ..
} = build.config.image_registry;
let account =
if deployment.config.image_registry_account.is_empty() {
account
} else {
deployment.config.image_registry_account
};
let token = if !account.is_empty() {
registry_token(&domain, &account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
)?
} else {
None
};
(image, optional_string(&account), token)
}
}
DeploymentImage::Image { image } => {
let domain = extract_registry_domain(&image)?;
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
)?
} else {
None
};
(
image,
optional_string(&deployment.config.image_registry_account),
token,
)
}
};
// Acquire the pull lock for this image on the server
let lock = pull_cache()
.get_lock((server.id.clone(), image.clone()))
.await;
// Lock the path lock, prevents simultaneous pulls by
// ensuring simultaneous pulls will wait for first to finish
// and checking cached results.
let mut locked = lock.lock().await;
// Early return from cache if lasted pulled with PULL_TIMEOUT
if locked.last_ts + PULL_TIMEOUT > komodo_timestamp() {
return locked.clone_res();
}
let res = async {
let log = match periphery_client(server)?
.request(api::image::PullImage {
name: image,
account,
token,
})
.await
{
Ok(log) => log,
Err(e) => Log::error("Pull image", format_serror(&e.into())),
};
update_cache_for_server(server).await;
anyhow::Ok(log)
}
.await;
// Set the cache with results. Any other calls waiting on the lock will
// then immediately also use this same result.
locked.set(&res, komodo_timestamp());
res
}
impl Resolve<PullDeployment, (User, Update)> for State {
async fn resolve(
&self,
PullDeployment { deployment }: PullDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pulling = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = pull_deployment_inner(deployment, &server).await?;
update.logs.push(log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartDeployment, (User, Update)> for State {
#[instrument(name = "StartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -271,9 +435,7 @@ impl Resolve<StartDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::StartContainer {
name: deployment.name,
})
@@ -319,9 +481,7 @@ impl Resolve<RestartDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::RestartContainer {
name: deployment.name,
})
@@ -369,9 +529,7 @@ impl Resolve<PauseDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::PauseContainer {
name: deployment.name,
})
@@ -417,9 +575,7 @@ impl Resolve<UnpauseDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::UnpauseContainer {
name: deployment.name,
})
@@ -471,9 +627,7 @@ impl Resolve<StopDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::StopContainer {
name: deployment.name,
signal: signal
@@ -501,6 +655,29 @@ impl Resolve<StopDeployment, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchDestroyDeployment {
type Resource = Deployment;
fn single_request(deployment: String) -> ExecuteRequest {
ExecuteRequest::DestroyDeployment(DestroyDeployment {
deployment,
signal: None,
time: None,
})
}
}
impl Resolve<BatchDestroyDeployment, (User, Update)> for State {
#[instrument(name = "BatchDestroyDeployment", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDestroyDeployment { pattern }: BatchDestroyDeployment,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDestroyDeployment>(&pattern, &user)
.await
}
}
impl Resolve<DestroyDeployment, (User, Update)> for State {
#[instrument(name = "DestroyDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -529,9 +706,7 @@ impl Resolve<DestroyDeployment, (User, Update)> for State {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
let log = match periphery_client(&server)?
.request(api::container::RemoveContainer {
name: deployment.name,
signal: signal

View File

@@ -2,13 +2,16 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::by_id::find_one_by_id;
@@ -21,6 +24,7 @@ use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
resource::{list_full_for_user_using_pattern, KomodoResource},
state::{db_client, State},
};
@@ -34,6 +38,10 @@ mod server_template;
mod stack;
mod sync;
pub use {
deployment::pull_deployment_inner, stack::pull_stack_inner,
};
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
@@ -68,38 +76,51 @@ pub enum ExecuteRequest {
// ==== DEPLOYMENT ====
Deploy(Deploy),
BatchDeploy(BatchDeploy),
PullDeployment(PullDeployment),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
UnpauseDeployment(UnpauseDeployment),
StopDeployment(StopDeployment),
DestroyDeployment(DestroyDeployment),
BatchDestroyDeployment(BatchDestroyDeployment),
// ==== STACK ====
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
PullStack(PullStack),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// ==== BUILD ====
RunBuild(RunBuild),
BatchRunBuild(BatchRunBuild),
CancelBuild(CancelBuild),
// ==== REPO ====
CloneRepo(CloneRepo),
BatchCloneRepo(BatchCloneRepo),
PullRepo(PullRepo),
BatchPullRepo(BatchPullRepo),
BuildRepo(BuildRepo),
BatchBuildRepo(BatchBuildRepo),
CancelRepoBuild(CancelRepoBuild),
// ==== PROCEDURE ====
RunProcedure(RunProcedure),
BatchRunProcedure(BatchRunProcedure),
// ==== ACTION ====
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
@@ -117,7 +138,25 @@ pub fn router() -> Router {
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> serror::Result<Json<Update>> {
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let res = match inner_handler(request, user).await? {
ExecutionResult::Single(update) => serde_json::to_string(&update)
.context("Failed to serialize Update")?,
ExecutionResult::Batch(res) => res,
};
Ok((TypedHeader(ContentType::json()), res))
}
pub enum ExecutionResult {
Single(Update),
/// The batch contents will be pre serialized here
Batch(String),
}
pub async fn inner_handler(
request: ExecuteRequest,
user: User,
) -> anyhow::Result<ExecutionResult> {
let req_id = Uuid::new_v4();
// need to validate no cancel is active before any update is created.
@@ -125,6 +164,17 @@ async fn handler(
let update = init_execution_update(&request, &user).await?;
// This will be the case for the Batch exections,
// they don't have their own updates.
// The batch calls also call "inner_handler" themselves,
// and in their case will spawn tasks, so that isn't necessary
// here either.
if update.operation == Operation::None {
return Ok(ExecutionResult::Batch(
task(req_id, request, user, update).await?,
));
}
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
@@ -160,7 +210,7 @@ async fn handler(
}
});
Ok(Json(update))
Ok(ExecutionResult::Single(update))
}
#[instrument(
@@ -200,3 +250,40 @@ async fn task(
res
}
trait BatchExecute {
type Resource: KomodoResource;
fn single_request(name: String) -> ExecuteRequest;
}
async fn batch_execute<E: BatchExecute>(
pattern: &str,
user: &User,
) -> anyhow::Result<BatchExecutionResponse> {
let resources = list_full_for_user_using_pattern::<E::Resource>(
pattern,
Default::default(),
user,
&[],
)
.await?;
let futures = resources.into_iter().map(|resource| {
let user = user.clone();
async move {
inner_handler(E::single_request(resource.name.clone()), user)
.await
.map(|r| {
let ExecutionResult::Single(update) = r else {
unreachable!()
};
update
})
.map_err(|e| BatchExecutionResponseItemErr {
name: resource.name,
error: e.into(),
})
.into()
}
});
Ok(join_all(futures).await)
}

View File

@@ -2,7 +2,9 @@ use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::{
api::execute::RunProcedure,
api::execute::{
BatchExecutionResponse, BatchRunProcedure, RunProcedure,
},
entities::{
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User,
@@ -18,6 +20,26 @@ use crate::{
state::{action_states, db_client, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchRunProcedure {
type Resource = Procedure;
fn single_request(procedure: String) -> ExecuteRequest {
ExecuteRequest::RunProcedure(RunProcedure { procedure })
}
}
impl Resolve<BatchRunProcedure, (User, Update)> for State {
#[instrument(name = "BatchRunProcedure", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunProcedure { pattern }: BatchRunProcedure,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunProcedure>(&pattern, &user).await
}
}
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -47,6 +47,24 @@ use crate::{
use super::ExecuteRequest;
impl super::BatchExecute for BatchCloneRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchCloneRepo, (User, Update)> for State {
#[instrument(name = "BatchCloneRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchCloneRepo { pattern }: BatchCloneRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchCloneRepo>(&pattern, &user).await
}
}
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -138,6 +156,24 @@ impl Resolve<CloneRepo, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchPullRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchPullRepo, (User, Update)> for State {
#[instrument(name = "BatchPullRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchPullRepo { pattern }: BatchPullRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchPullRepo>(&pattern, &user).await
}
}
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -271,6 +307,24 @@ async fn update_last_pulled_time(repo_name: &str) {
}
}
impl super::BatchExecute for BatchBuildRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchBuildRepo, (User, Update)> for State {
#[instrument(name = "BatchBuildRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchBuildRepo { pattern }: BatchBuildRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchBuildRepo>(&pattern, &user).await
}
}
impl Resolve<BuildRepo, (User, Update)> for State {
#[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -6,8 +6,9 @@ use komodo_client::{
api::{execute::*, write::RefreshStackCache},
entities::{
permission::PermissionLevel,
server::Server,
stack::{Stack, StackInfo},
update::Update,
update::{Log, Update},
user::User,
},
};
@@ -36,11 +37,39 @@ use crate::{
state::{action_states, db_client, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchDeployStack {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStack(DeployStack {
stack,
service: None,
stop_time: None,
})
}
}
impl Resolve<BatchDeployStack, (User, Update)> for State {
#[instrument(name = "BatchDeployStack", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeployStack { pattern }: BatchDeployStack,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeployStack>(&pattern, &user).await
}
}
impl Resolve<DeployStack, (User, Update)> for State {
#[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeployStack { stack, stop_time }: DeployStack,
DeployStack {
stack,
service,
stop_time,
}: DeployStack,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (mut stack, server) = get_stack_and_server(
@@ -62,6 +91,13 @@ impl Resolve<DeployStack, (User, Update)> for State {
update_update(update.clone()).await?;
if let Some(service) = &service {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
))
}
let git_token = crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
@@ -135,7 +171,7 @@ impl Resolve<DeployStack, (User, Update)> for State {
} = periphery_client(&server)?
.request(ComposeUp {
stack: stack.clone(),
service: None,
service,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
@@ -246,7 +282,30 @@ impl Resolve<DeployStack, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchDeployStackIfChanged {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStackIfChanged(DeployStackIfChanged {
stack,
stop_time: None,
})
}
}
impl Resolve<BatchDeployStackIfChanged, (User, Update)> for State {
#[instrument(name = "BatchDeployStackIfChanged", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeployStackIfChanged { pattern }: BatchDeployStackIfChanged,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeployStackIfChanged>(&pattern, &user)
.await
}
}
impl Resolve<DeployStackIfChanged, (User, Update)> for State {
#[instrument(name = "DeployStackIfChanged", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeployStackIfChanged { stack, stop_time }: DeployStackIfChanged,
@@ -309,6 +368,7 @@ impl Resolve<DeployStackIfChanged, (User, Update)> for State {
.resolve(
DeployStack {
stack: stack.name,
service: None,
stop_time,
},
(user, update),
@@ -317,6 +377,87 @@ impl Resolve<DeployStackIfChanged, (User, Update)> for State {
}
}
pub async fn pull_stack_inner(
mut stack: Stack,
service: Option<String>,
server: &Server,
update: Option<&mut Update>,
) -> anyhow::Result<ComposePullResponse> {
if let (Some(service), Some(update)) = (&service, update) {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
))
}
let git_token = crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
|https| stack.config.git_https = https,
).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
)?;
let registry_token = crate::helpers::registry_token(
&stack.config.registry_provider,
&stack.config.registry_account,
).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
)?;
let res = periphery_client(server)?
.request(ComposePull {
stack,
service,
git_token,
registry_token,
})
.await?;
// Ensure cached stack state up to date by updating server cache
update_cache_for_server(server).await;
Ok(res)
}
impl Resolve<PullStack, (User, Update)> for State {
#[instrument(name = "PullStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PullStack { stack, service }: PullStack,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Execute,
true,
)
.await?;
// get the action state for the stack (or insert default).
let action_state =
action_states().stack.get_or_insert_default(&stack.id).await;
// Will check to ensure stack not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pulling = true)?;
update_update(update.clone()).await?;
let res =
pull_stack_inner(stack, service, &server, Some(&mut update))
.await?;
update.logs.extend(res.logs);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartStack, (User, Update)> for State {
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -418,12 +559,36 @@ impl Resolve<StopStack, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchDestroyStack {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DestroyStack(DestroyStack {
stack,
service: None,
remove_orphans: false,
stop_time: None,
})
}
}
impl Resolve<BatchDestroyStack, (User, Update)> for State {
#[instrument(name = "BatchDestroyStack", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDestroyStack { pattern }: BatchDestroyStack,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDestroyStack>(&pattern, &user).await
}
}
impl Resolve<DestroyStack, (User, Update)> for State {
#[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DestroyStack {
stack,
service,
remove_orphans,
stop_time,
}: DestroyStack,
@@ -431,7 +596,7 @@ impl Resolve<DestroyStack, (User, Update)> for State {
) -> anyhow::Result<Update> {
execute_compose::<DestroyStack>(
&stack,
None,
service,
&user,
|state| state.destroying = true,
update,

View File

@@ -339,6 +339,7 @@ impl Resolve<ListSecrets, User> for State {
ResourceTarget::Server(id) => Some(id),
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => None,
BuilderConfig::Server(config) => Some(config.server_id),
BuilderConfig::Aws(config) => {
secrets.extend(config.secrets);
@@ -387,6 +388,7 @@ impl Resolve<ListGitProvidersFromConfig, User> for State {
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => {}
BuilderConfig::Server(config) => {
merge_git_providers_for_server(
&mut providers,
@@ -485,6 +487,7 @@ impl Resolve<ListDockerRegistriesFromConfig, User> for State {
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => {}
BuilderConfig::Server(config) => {
merge_docker_registries_for_server(
&mut registries,

View File

@@ -539,20 +539,21 @@ impl Resolve<GetResourceMatchingContainer, User> for State {
for StackServiceNames {
service_name,
container_name,
..
} in stack
.info
.deployed_services
.unwrap_or(stack.info.latest_services)
{
let is_match = match compose_container_match_regex(&container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
continue;
}
}.is_match(&container);
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
continue;
}
}.is_match(&container);
if is_match {
return Ok(GetResourceMatchingContainerResponse {

View File

@@ -2,10 +2,14 @@ use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{
deployment::{Deployment, DeploymentState},
deployment::{
Deployment, DeploymentImage, DeploymentState,
PartialDeploymentConfig, RestartMode,
},
docker::container::RestartPolicyNameEnum,
komodo_timestamp,
permission::PermissionLevel,
server::Server,
server::{Server, ServerState},
to_komodo_name,
update::Update,
user::User,
@@ -13,7 +17,7 @@ use komodo_client::{
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use periphery_client::api::{self, container::InspectContainer};
use resolver_api::Resolve;
use crate::{
@@ -23,7 +27,7 @@ use crate::{
update::{add_update, make_update},
},
resource,
state::{action_states, db_client, State},
state::{action_states, db_client, server_status_cache, State},
};
impl Resolve<CreateDeployment, User> for State {
@@ -55,6 +59,97 @@ impl Resolve<CopyDeployment, User> for State {
}
}
impl Resolve<CreateDeploymentFromContainer, User> for State {
#[instrument(
name = "CreateDeploymentFromContainer",
skip(self, user)
)]
async fn resolve(
&self,
CreateDeploymentFromContainer { name, server }: CreateDeploymentFromContainer,
user: User,
) -> anyhow::Result<Deployment> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Write,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect container: server is {:?}",
cache.state
));
}
let container = periphery_client(&server)?
.request(InspectContainer { name: name.clone() })
.await
.context("Failed to inspect container")?;
let mut config = PartialDeploymentConfig {
server_id: server.id.into(),
..Default::default()
};
if let Some(container_config) = container.config {
config.image = container_config
.image
.map(|image| DeploymentImage::Image { image });
config.command = container_config.cmd.join(" ").into();
config.environment = container_config
.env
.into_iter()
.map(|env| format!(" {env}"))
.collect::<Vec<_>>()
.join("\n")
.into();
config.labels = container_config
.labels
.into_iter()
.map(|(key, val)| format!(" {key}: {val}"))
.collect::<Vec<_>>()
.join("\n")
.into();
}
if let Some(host_config) = container.host_config {
config.volumes = host_config
.binds
.into_iter()
.map(|bind| format!(" {bind}"))
.collect::<Vec<_>>()
.join("\n")
.into();
config.network = host_config.network_mode;
config.ports = host_config
.port_bindings
.into_iter()
.filter_map(|(container, mut host)| {
let host = host.pop()?.host_port?;
Some(format!(" {host}:{}", container.replace("/tcp", "")))
})
.collect::<Vec<_>>()
.join("\n")
.into();
config.restart = host_config.restart_policy.map(|restart| {
match restart.name {
RestartPolicyNameEnum::Always => RestartMode::Always,
RestartPolicyNameEnum::No
| RestartPolicyNameEnum::Empty => RestartMode::NoRestart,
RestartPolicyNameEnum::UnlessStopped => {
RestartMode::UnlessStopped
}
RestartPolicyNameEnum::OnFailure => RestartMode::OnFailure,
}
});
}
resource::create::<Deployment>(&name, config, &user).await
}
}
impl Resolve<DeleteDeployment, User> for State {
#[instrument(name = "DeleteDeployment", skip(self, user))]
async fn resolve(

View File

@@ -80,6 +80,7 @@ pub enum WriteRequest {
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
CopyDeployment(CopyDeployment),
CreateDeploymentFromContainer(CreateDeploymentFromContainer),
DeleteDeployment(DeleteDeployment),
UpdateDeployment(UpdateDeployment),
RenameDeployment(RenameDeployment),

View File

@@ -23,6 +23,7 @@ use periphery_client::api::compose::{
use resolver_api::Resolve;
use crate::{
api::execute::pull_stack_inner,
config::core_config,
helpers::{
git_token, periphery_client,
@@ -32,7 +33,7 @@ use crate::{
resource,
stack::{
get_stack_and_server,
remote::{get_remote_compose_contents, RemoteComposeContents},
remote::{get_repo_compose_contents, RemoteComposeContents},
services::extract_services_into_res,
},
state::{db_client, github_client, State},
@@ -258,54 +259,56 @@ impl Resolve<RefreshStackCache, User> for State {
// =============
// FILES ON HOST
// =============
if stack.config.server_id.is_empty() {
(vec![], None, None, None, None)
let (server, state) = if stack.config.server_id.is_empty() {
(None, ServerState::Disabled)
} else {
let (server, status) =
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
if status != ServerState::Ok {
(vec![], None, None, None, None)
} else {
let GetComposeContentsOnHostResponse { contents, errors } =
match periphery_client(&server)?
.request(GetComposeContentsOnHost {
file_paths: stack.file_paths().to_vec(),
name: stack.name.clone(),
run_directory: stack.config.run_directory.clone(),
})
.await
.context(
"failed to get compose file contents from host",
) {
Ok(res) => res,
Err(e) => GetComposeContentsOnHostResponse {
contents: Default::default(),
errors: vec![FileContents {
path: stack.config.run_directory.clone(),
contents: format_serror(&e.into()),
}],
},
};
(Some(server), state)
};
if state != ServerState::Ok {
(vec![], None, None, None, None)
} else if let Some(server) = server {
let GetComposeContentsOnHostResponse { contents, errors } =
match periphery_client(&server)?
.request(GetComposeContentsOnHost {
file_paths: stack.file_paths().to_vec(),
name: stack.name.clone(),
run_directory: stack.config.run_directory.clone(),
})
.await
.context("failed to get compose file contents from host")
{
Ok(res) => res,
Err(e) => GetComposeContentsOnHostResponse {
contents: Default::default(),
errors: vec![FileContents {
path: stack.config.run_directory.clone(),
contents: format_serror(&e.into()),
}],
},
};
let project_name = stack.project_name(true);
let project_name = stack.project_name(true);
let mut services = Vec::new();
let mut services = Vec::new();
for contents in &contents {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&mut services,
) {
warn!(
for contents in &contents {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
}
}
(services, Some(contents), Some(errors), None, None)
}
(services, Some(contents), Some(errors), None, None)
} else {
(vec![], None, None, None, None)
}
} else if !repo_empty {
// ================
@@ -317,9 +320,8 @@ impl Resolve<RefreshStackCache, User> for State {
hash: latest_hash,
message: latest_message,
..
} =
get_remote_compose_contents(&stack, Some(&mut missing_files))
.await?;
} = get_repo_compose_contents(&stack, Some(&mut missing_files))
.await?;
let project_name = stack.project_name(true);
@@ -357,21 +359,21 @@ impl Resolve<RefreshStackCache, User> for State {
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
"Failed to extract Stack services for {}, things may not work correctly. | {e:#}",
stack.name
);
services.extend(stack.info.latest_services);
services.extend(stack.info.latest_services.clone());
};
(services, None, None, None, None)
};
let info = StackInfo {
missing_files,
deployed_services: stack.info.deployed_services,
deployed_project_name: stack.info.deployed_project_name,
deployed_contents: stack.info.deployed_contents,
deployed_hash: stack.info.deployed_hash,
deployed_message: stack.info.deployed_message,
deployed_services: stack.info.deployed_services.clone(),
deployed_project_name: stack.info.deployed_project_name.clone(),
deployed_contents: stack.info.deployed_contents.clone(),
deployed_hash: stack.info.deployed_hash.clone(),
deployed_message: stack.info.deployed_message.clone(),
latest_services,
remote_contents,
remote_errors,
@@ -391,6 +393,23 @@ impl Resolve<RefreshStackCache, User> for State {
.await
.context("failed to update stack info on db")?;
if (stack.config.poll_for_updates || stack.config.auto_update)
&& !stack.config.server_id.is_empty()
{
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
if state == ServerState::Ok {
let name = stack.name.clone();
if let Err(e) =
pull_stack_inner(stack, None, &server, None).await
{
warn!(
"Failed to pull latest images for Stack {name} | {e:#}",
);
}
}
}
Ok(NoData {})
}
}

View File

@@ -92,13 +92,19 @@ async fn login(
);
let config = core_config();
let redirect = if !config.oidc_redirect.is_empty() {
Redirect::to(
auth_url
.as_str()
.replace(&config.oidc_provider, &config.oidc_redirect)
.as_str(),
)
let redirect = if !config.oidc_redirect_host.is_empty() {
let auth_url = auth_url.as_str();
let (protocol, rest) = auth_url
.split_once("://")
.context("Invalid URL: Missing protocol (eg 'https://')")?;
let host = rest
.split_once(['/', '?'])
.map(|(host, _)| host)
.unwrap_or(rest);
Redirect::to(&auth_url.replace(
&format!("{protocol}://{host}"),
&config.oidc_redirect_host,
))
} else {
Redirect::to(auth_url.as_str())
};

View File

@@ -78,7 +78,7 @@ pub fn core_config() -> &'static CoreConfig {
},
oidc_enabled: env.komodo_oidc_enabled.unwrap_or(config.oidc_enabled),
oidc_provider: env.komodo_oidc_provider.unwrap_or(config.oidc_provider),
oidc_redirect: env.komodo_oidc_redirect.unwrap_or(config.oidc_redirect),
oidc_redirect_host: env.komodo_oidc_redirect_host.unwrap_or(config.oidc_redirect_host),
oidc_client_id: maybe_read_item_from_file(env.komodo_oidc_client_id_file,env
.komodo_oidc_client_id)
.unwrap_or(config.oidc_client_id),

View File

@@ -31,7 +31,7 @@ use crate::{
use super::periphery_client;
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
const BUILDER_POLL_MAX_TRIES: usize = 60;
#[instrument(skip_all, fields(builder_id = builder.id, update_id = update.id))]
pub async fn get_builder_periphery(
@@ -42,9 +42,34 @@ pub async fn get_builder_periphery(
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
match builder.config {
BuilderConfig::Url(config) => {
if config.address.is_empty() {
return Err(anyhow!(
"Builder has not yet configured an address"
));
}
let periphery = PeripheryClient::new(
config.address,
if config.passkey.is_empty() {
core_config().passkey.clone()
} else {
config.passkey
},
);
periphery
.health_check()
.await
.context("Url Builder failed health check")?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
return Err(anyhow!("Builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;

View File

@@ -4,9 +4,14 @@ use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use komodo_client::{
api::execute::Execution,
api::execute::*,
entities::{
action::Action,
build::Build,
deployment::Deployment,
procedure::Procedure,
repo::Repo,
stack::Stack,
update::{Log, Update},
user::procedure_user,
},
@@ -17,6 +22,7 @@ use tokio::sync::Mutex;
use crate::{
api::execute::ExecuteRequest,
resource::{list_full_for_user_using_pattern, KomodoResource},
state::{db_client, State},
};
@@ -79,11 +85,94 @@ pub async fn execute_procedure(
#[allow(dependency_on_unit_never_type_fallback)]
#[instrument(skip(update))]
async fn execute_stage(
executions: Vec<Execution>,
_executions: Vec<Execution>,
parent_id: &str,
parent_name: &str,
update: &Mutex<Update>,
) -> anyhow::Result<()> {
let mut executions = Vec::with_capacity(_executions.capacity());
for execution in _executions {
match execution {
Execution::BatchRunAction(exec) => {
extend_batch_exection::<BatchRunAction>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchRunProcedure(exec) => {
extend_batch_exection::<BatchRunProcedure>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchRunBuild(exec) => {
extend_batch_exection::<BatchRunBuild>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchCloneRepo(exec) => {
extend_batch_exection::<BatchCloneRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchPullRepo(exec) => {
extend_batch_exection::<BatchPullRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchBuildRepo(exec) => {
extend_batch_exection::<BatchBuildRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeploy(exec) => {
extend_batch_exection::<BatchDeploy>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDestroyDeployment(exec) => {
extend_batch_exection::<BatchDestroyDeployment>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeployStack(exec) => {
extend_batch_exection::<BatchDeployStack>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeployStackIfChanged(exec) => {
extend_batch_exection::<BatchDeployStackIfChanged>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDestroyStack(exec) => {
extend_batch_exection::<BatchDestroyStack>(
&exec.pattern,
&mut executions,
)
.await?;
}
execution => executions.push(execution),
}
}
let futures = executions.into_iter().map(|execution| async move {
let now = Instant::now();
add_line_to_update(
@@ -146,6 +235,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunProcedure(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunProcedure not implemented correctly"
));
}
Execution::RunAction(req) => {
let req = ExecuteRequest::RunAction(req);
let update = init_execution_update(&req, &user).await?;
@@ -162,6 +257,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunAction(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunAction not implemented correctly"
));
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -178,6 +279,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunBuild(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunBuild not implemented correctly"
));
}
Execution::CancelBuild(req) => {
let req = ExecuteRequest::CancelBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -210,6 +317,28 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeploy(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeploy not implemented correctly"
));
}
Execution::PullDeployment(req) => {
let req = ExecuteRequest::PullDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PullDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("Failed at PullDeployment"),
&update_id,
)
.await?
}
Execution::StartDeployment(req) => {
let req = ExecuteRequest::StartDeployment(req);
let update = init_execution_update(&req, &user).await?;
@@ -306,6 +435,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDestroyDeployment(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDestroyDeployment not implemented correctly"
));
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -322,6 +457,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchCloneRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchCloneRepo not implemented correctly"
));
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -338,6 +479,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchPullRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchPullRepo not implemented correctly"
));
}
Execution::BuildRepo(req) => {
let req = ExecuteRequest::BuildRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -354,6 +501,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchBuildRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchBuildRepo not implemented correctly"
));
}
Execution::CancelRepoBuild(req) => {
let req = ExecuteRequest::CancelRepoBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -743,6 +896,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeployStack(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeployStack not implemented correctly"
));
}
Execution::DeployStackIfChanged(req) => {
let req = ExecuteRequest::DeployStackIfChanged(req);
let update = init_execution_update(&req, &user).await?;
@@ -759,6 +918,28 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeployStackIfChanged(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeployStackIfChanged not implemented correctly"
));
}
Execution::PullStack(req) => {
let req = ExecuteRequest::PullStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PullStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("Failed at PullStack"),
&update_id,
)
.await?
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;
@@ -855,6 +1036,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDestroyStack(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDestroyStack not implemented correctly"
));
}
Execution::Sleep(req) => {
let duration = Duration::from_millis(req.duration_ms as u64);
tokio::time::sleep(duration).await;
@@ -912,3 +1099,122 @@ async fn add_line_to_update(update: &Mutex<Update>, line: &str) {
error!("Failed to update an update during procedure | {e:#}");
};
}
async fn extend_batch_exection<E: ExtendBatch>(
pattern: &str,
executions: &mut Vec<Execution>,
) -> anyhow::Result<()> {
let more = list_full_for_user_using_pattern::<E::Resource>(
pattern,
Default::default(),
procedure_user(),
&[],
)
.await?
.into_iter()
.map(|resource| E::single_execution(resource.name));
executions.extend(more);
Ok(())
}
trait ExtendBatch {
type Resource: KomodoResource;
fn single_execution(name: String) -> Execution;
}
impl ExtendBatch for BatchRunProcedure {
type Resource = Procedure;
fn single_execution(procedure: String) -> Execution {
Execution::RunProcedure(RunProcedure { procedure })
}
}
impl ExtendBatch for BatchRunAction {
type Resource = Action;
fn single_execution(action: String) -> Execution {
Execution::RunAction(RunAction { action })
}
}
impl ExtendBatch for BatchRunBuild {
type Resource = Build;
fn single_execution(build: String) -> Execution {
Execution::RunBuild(RunBuild { build })
}
}
impl ExtendBatch for BatchCloneRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::CloneRepo(CloneRepo { repo })
}
}
impl ExtendBatch for BatchPullRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::PullRepo(PullRepo { repo })
}
}
impl ExtendBatch for BatchBuildRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::BuildRepo(BuildRepo { repo })
}
}
impl ExtendBatch for BatchDeploy {
type Resource = Deployment;
fn single_execution(deployment: String) -> Execution {
Execution::Deploy(Deploy {
deployment,
stop_signal: None,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDestroyDeployment {
type Resource = Deployment;
fn single_execution(deployment: String) -> Execution {
Execution::DestroyDeployment(DestroyDeployment {
deployment,
signal: None,
time: None,
})
}
}
impl ExtendBatch for BatchDeployStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DeployStack(DeployStack {
stack,
service: None,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDeployStackIfChanged {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DeployStackIfChanged(DeployStackIfChanged {
stack,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDestroyStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DestroyStack(DestroyStack {
stack,
service: None,
remove_orphans: false,
stop_time: None,
})
}
}

View File

@@ -103,7 +103,7 @@ pub fn get_stack_state_from_containers(
})
.collect::<Vec<_>>();
let containers = containers.iter().filter(|container| {
services.iter().any(|StackServiceNames { service_name, container_name }| {
services.iter().any(|StackServiceNames { service_name, container_name, .. }| {
match compose_container_match_regex(container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
@@ -118,7 +118,7 @@ pub fn get_stack_state_from_containers(
if containers.is_empty() {
return StackState::Down;
}
if services.len() != containers.len() {
if services.len() > containers.len() {
return StackState::Unhealthy;
}
let running = containers.iter().all(|container| {

View File

@@ -261,6 +261,15 @@ pub async fn init_execution_update(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::BatchDeploy(_data) => {
return Ok(Default::default())
}
ExecuteRequest::PullDeployment(data) => (
Operation::PullDeployment,
ResourceTarget::Deployment(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::StartDeployment(data) => (
Operation::StartDeployment,
ResourceTarget::Deployment(
@@ -297,6 +306,9 @@ pub async fn init_execution_update(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::BatchDestroyDeployment(_data) => {
return Ok(Default::default())
}
// Build
ExecuteRequest::RunBuild(data) => (
@@ -305,6 +317,9 @@ pub async fn init_execution_update(
resource::get::<Build>(&data.build).await?.id,
),
),
ExecuteRequest::BatchRunBuild(_data) => {
return Ok(Default::default())
}
ExecuteRequest::CancelBuild(data) => (
Operation::CancelBuild,
ResourceTarget::Build(
@@ -319,18 +334,27 @@ pub async fn init_execution_update(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchCloneRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::PullRepo(data) => (
Operation::PullRepo,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchPullRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::BuildRepo(data) => (
Operation::BuildRepo,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchBuildRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::CancelRepoBuild(data) => (
Operation::CancelRepoBuild,
ResourceTarget::Repo(
@@ -345,6 +369,9 @@ pub async fn init_execution_update(
resource::get::<Procedure>(&data.procedure).await?.id,
),
),
ExecuteRequest::BatchRunProcedure(_) => {
return Ok(Default::default())
}
// Action
ExecuteRequest::RunAction(data) => (
@@ -353,6 +380,9 @@ pub async fn init_execution_update(
resource::get::<Action>(&data.action).await?.id,
),
),
ExecuteRequest::BatchRunAction(_) => {
return Ok(Default::default())
}
// Server template
ExecuteRequest::LaunchServer(data) => (
@@ -374,17 +404,27 @@ pub async fn init_execution_update(
// Stack
ExecuteRequest::DeployStack(data) => (
Operation::DeployStack,
if data.service.is_some() {
Operation::DeployStackService
} else {
Operation::DeployStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDeployStack(_data) => {
return Ok(Default::default())
}
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDeployStackIfChanged(_data) => {
return Ok(Default::default())
}
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
Operation::StartStackService
@@ -395,6 +435,16 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::PullStack(data) => (
if data.service.is_some() {
Operation::PullStackService
} else {
Operation::PullStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::RestartStack(data) => (
if data.service.is_some() {
Operation::RestartStackService
@@ -436,11 +486,18 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::DestroyStack(data) => (
Operation::DestroyStack,
if data.service.is_some() {
Operation::DestroyStackService
} else {
Operation::DestroyStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDestroyStack(_data) => {
return Ok(Default::default())
}
};
let mut update = make_update(target, operation, user);
update.in_progress();

View File

@@ -132,11 +132,6 @@ impl RepoExecution for BuildRepo {
}
}
#[derive(Deserialize)]
pub struct RepoWebhookPath {
pub option: RepoWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RepoWebhookOption {
@@ -220,6 +215,7 @@ impl StackExecution for DeployStack {
if stack.config.webhook_force_deploy {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack.id,
service: None,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
@@ -244,11 +240,6 @@ impl StackExecution for DeployStack {
}
}
#[derive(Deserialize)]
pub struct StackWebhookPath {
pub option: StackWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum StackWebhookOption {
@@ -340,11 +331,6 @@ impl SyncExecution for RunSync {
}
}
#[derive(Deserialize)]
pub struct SyncWebhookPath {
pub option: SyncWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SyncWebhookOption {
@@ -410,7 +396,7 @@ fn procedure_locks() -> &'static ListenerLockCache {
pub async fn handle_procedure_webhook<B: super::VerifyBranch>(
procedure: Procedure,
target_branch: String,
target_branch: &str,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
@@ -425,7 +411,7 @@ pub async fn handle_procedure_webhook<B: super::VerifyBranch>(
}
if target_branch != ANY_BRANCH {
B::verify_branch(&body, &target_branch)?;
B::verify_branch(&body, target_branch)?;
}
let user = git_webhook_user().to_owned();
@@ -457,7 +443,7 @@ fn action_locks() -> &'static ListenerLockCache {
pub async fn handle_action_webhook<B: super::VerifyBranch>(
action: Action,
target_branch: String,
target_branch: &str,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
@@ -471,7 +457,7 @@ pub async fn handle_action_webhook<B: super::VerifyBranch>(
}
if target_branch != ANY_BRANCH {
B::verify_branch(&body, &target_branch)?;
B::verify_branch(&body, target_branch)?;
}
let user = git_webhook_user().to_owned();

View File

@@ -3,7 +3,9 @@ use komodo_client::entities::{
action::Action, build::Build, procedure::Procedure, repo::Repo,
resource::Resource, stack::Stack, sync::ResourceSync,
};
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use tracing::Instrument;
use crate::resource::KomodoResource;
@@ -12,8 +14,8 @@ use super::{
resources::{
handle_action_webhook, handle_build_webhook,
handle_procedure_webhook, handle_repo_webhook,
handle_stack_webhook, handle_sync_webhook, RepoWebhookPath,
StackWebhookPath, SyncWebhookPath,
handle_stack_webhook, handle_sync_webhook, RepoWebhookOption,
StackWebhookOption, SyncWebhookOption,
},
CustomSecret, VerifyBranch, VerifySecret,
};
@@ -24,7 +26,14 @@ struct Id {
}
#[derive(Deserialize)]
struct Branch {
struct IdAndOption<T> {
id: String,
option: T,
}
#[derive(Deserialize)]
struct IdAndBranch {
id: String,
#[serde(default = "default_branch")]
branch: String,
}
@@ -64,7 +73,7 @@ pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
.route(
"/repo/:id/:option",
post(
|Path(Id { id }), Path(RepoWebhookPath { option }), headers: HeaderMap, body: String| async move {
|Path(IdAndOption::<RepoWebhookOption> { id, option }), headers: HeaderMap, body: String| async move {
let repo =
auth_webhook::<P, Repo>(&id, headers, &body).await?;
tokio::spawn(async move {
@@ -90,7 +99,7 @@ pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
.route(
"/stack/:id/:option",
post(
|Path(Id { id }), Path(StackWebhookPath { option }), headers: HeaderMap, body: String| async move {
|Path(IdAndOption::<StackWebhookOption> { id, option }), headers: HeaderMap, body: String| async move {
let stack =
auth_webhook::<P, Stack>(&id, headers, &body).await?;
tokio::spawn(async move {
@@ -116,7 +125,7 @@ pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
.route(
"/sync/:id/:option",
post(
|Path(Id { id }), Path(SyncWebhookPath { option }), headers: HeaderMap, body: String| async move {
|Path(IdAndOption::<SyncWebhookOption> { id, option }), headers: HeaderMap, body: String| async move {
let sync =
auth_webhook::<P, ResourceSync>(&id, headers, &body).await?;
tokio::spawn(async move {
@@ -142,19 +151,19 @@ pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
.route(
"/procedure/:id/:branch",
post(
|Path(Id { id }), Path(Branch { branch }), headers: HeaderMap, body: String| async move {
|Path(IdAndBranch { id, branch }), headers: HeaderMap, body: String| async move {
let procedure =
auth_webhook::<P, Procedure>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("ProcedureWebhook", id);
async {
let res = handle_procedure_webhook::<P>(
procedure, branch, body,
procedure, &branch, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for procedure {id} | {e:#}"
"Failed at running webhook for procedure {id} | target branch: {branch} | {e:#}"
);
}
}
@@ -168,19 +177,19 @@ pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
.route(
"/action/:id/:branch",
post(
|Path(Id { id }), Path(Branch { branch }), headers: HeaderMap, body: String| async move {
|Path(IdAndBranch { id, branch }), headers: HeaderMap, body: String| async move {
let action =
auth_webhook::<P, Action>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("ActionWebhook", id);
async {
let res = handle_action_webhook::<P>(
action, branch, body,
action, &branch, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for action {id} | {e:#}"
"Failed at running webhook for action {id} | target branch: {branch} | {e:#}"
);
}
}
@@ -202,7 +211,10 @@ where
P: VerifySecret,
R: KomodoResource + CustomSecret,
{
let resource = crate::resource::get::<R>(id).await?;
P::verify_secret(headers, body, R::custom_secret(&resource))?;
let resource = crate::resource::get::<R>(id)
.await
.status_code(StatusCode::BAD_REQUEST)?;
P::verify_secret(headers, body, R::custom_secret(&resource))
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(resource)
}

View File

@@ -41,6 +41,7 @@ pub async fn insert_deployments_status_unknown(
id: deployment.id,
state: DeploymentState::Unknown,
container: None,
update_available: false,
},
prev,
}

View File

@@ -62,6 +62,7 @@ pub struct CachedDeploymentStatus {
pub id: String,
pub state: DeploymentState,
pub container: Option<ContainerListItem>,
pub update_available: bool,
}
#[derive(Default, Clone, Debug)]
@@ -117,12 +118,13 @@ async fn refresh_server_cache(ts: i64) {
#[instrument(level = "debug")]
pub async fn update_cache_for_server(server: &Server) {
let (deployments, repos, stacks) = tokio::join!(
let (deployments, builds, repos, stacks) = tokio::join!(
find_collect(
&db_client().deployments,
doc! { "config.server_id": &server.id },
None,
),
find_collect(&db_client().builds, doc! {}, None,),
find_collect(
&db_client().repos,
doc! { "config.server_id": &server.id },
@@ -136,6 +138,7 @@ pub async fn update_cache_for_server(server: &Server) {
);
let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let builds = builds.inspect_err(|e| error!("failed to get builds list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
@@ -211,8 +214,19 @@ pub async fn update_cache_for_server(server: &Server) {
container.server_id = Some(server.id.clone())
});
tokio::join!(
resources::update_deployment_cache(deployments, &containers),
resources::update_stack_cache(stacks, &containers),
resources::update_deployment_cache(
server.name.clone(),
deployments,
&containers,
&images,
&builds,
),
resources::update_stack_cache(
server.name.clone(),
stacks,
&containers,
&images
),
);
insert_server_status(
server,

View File

@@ -1,24 +1,53 @@
use std::{
collections::HashSet,
sync::{Mutex, OnceLock},
};
use anyhow::Context;
use komodo_client::entities::{
deployment::{Deployment, DeploymentState},
docker::container::ContainerListItem,
stack::{Stack, StackService, StackServiceNames},
use komodo_client::{
api::execute::{Deploy, DeployStack},
entities::{
alert::{Alert, AlertData, SeverityLevel},
build::Build,
deployment::{Deployment, DeploymentImage, DeploymentState},
docker::{
container::{ContainerListItem, ContainerStateStatusEnum},
image::ImageListItem,
},
komodo_timestamp,
stack::{Stack, StackService, StackServiceNames, StackState},
user::auto_redeploy_user,
ResourceTarget,
},
};
use crate::{
alert::send_alerts,
api::execute::{self, ExecuteRequest},
helpers::query::get_stack_state_from_containers,
stack::{
compose_container_match_regex,
services::extract_services_from_stack,
},
state::{deployment_status_cache, stack_status_cache},
state::{
action_states, db_client, deployment_status_cache,
stack_status_cache,
},
};
use super::{CachedDeploymentStatus, CachedStackStatus, History};
fn deployment_alert_sent_cache() -> &'static Mutex<HashSet<String>> {
static CACHE: OnceLock<Mutex<HashSet<String>>> = OnceLock::new();
CACHE.get_or_init(Default::default)
}
pub async fn update_deployment_cache(
server_name: String,
deployments: Vec<Deployment>,
containers: &[ContainerListItem],
images: &[ImageListItem],
builds: &[Build],
) {
let deployment_status_cache = deployment_status_cache();
for deployment in deployments {
@@ -34,6 +63,109 @@ pub async fn update_deployment_cache(
.as_ref()
.map(|c| c.state.into())
.unwrap_or(DeploymentState::NotDeployed);
let image = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let (build_name, build_version) = builds
.iter()
.find(|build| build.id == build_id)
.map(|b| (b.name.as_ref(), b.config.version))
.unwrap_or(("Unknown", Default::default()));
let version = if version.is_none() {
build_version.to_string()
} else {
version.to_string()
};
format!("{build_name}:{version}")
}
DeploymentImage::Image { image } => image,
};
let update_available = if let Some(ContainerListItem {
image_id: Some(curr_image_id),
..
}) = &container
{
images
.iter()
.find(|i| i.name == image)
.map(|i| &i.id != curr_image_id)
.unwrap_or_default()
} else {
false
};
if update_available {
if deployment.config.auto_update {
if state == DeploymentState::Running
&& !action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await
.busy()
.unwrap_or(true)
{
let deployment = deployment.name.clone();
tokio::spawn(async move {
if let Err(e) = execute::inner_handler(
ExecuteRequest::Deploy(Deploy {
deployment: deployment.clone(),
stop_time: None,
stop_signal: None,
}),
auto_redeploy_user().to_owned(),
)
.await
{
warn!(
"Failed to auto update Deployment {deployment} | {e:#}"
)
}
});
}
} else if state == DeploymentState::Running
&& deployment.config.send_alerts
&& !deployment_alert_sent_cache()
.lock()
.unwrap()
.contains(&deployment.id)
{
// Add that it is already sent to the cache, so another alert won't be sent.
deployment_alert_sent_cache()
.lock()
.unwrap()
.insert(deployment.id.clone());
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Deployment(deployment.id.clone()),
data: AlertData::DeploymentImageUpdateAvailable {
id: deployment.id.clone(),
name: deployment.name,
server_name: server_name.clone(),
server_id: deployment.config.server_id,
image,
},
};
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!(
"Failed to record Deployment update avaialable to db | {e:#}"
);
}
send_alerts(&[alert]).await;
}
} else {
// If it sees there is no longer update available, remove
// from the sent cache, so on next `update_available = true`
// the cache is empty and a fresh alert will be sent.
deployment_alert_sent_cache()
.lock()
.unwrap()
.remove(&deployment.id);
}
deployment_status_cache
.insert(
deployment.id.clone(),
@@ -42,6 +174,7 @@ pub async fn update_deployment_cache(
id: deployment.id,
state,
container,
update_available,
},
prev,
}
@@ -51,38 +184,139 @@ pub async fn update_deployment_cache(
}
}
/// (StackId, Service)
fn stack_alert_sent_cache(
) -> &'static Mutex<HashSet<(String, String)>> {
static CACHE: OnceLock<Mutex<HashSet<(String, String)>>> =
OnceLock::new();
CACHE.get_or_init(Default::default)
}
pub async fn update_stack_cache(
server_name: String,
stacks: Vec<Stack>,
containers: &[ContainerListItem],
images: &[ImageListItem],
) {
let stack_status_cache = stack_status_cache();
for stack in stacks {
let services = match extract_services_from_stack(&stack, false)
.await
{
Ok(services) => services,
Err(e) => {
warn!("failed to extract services for stack {}. cannot match services to containers. (update status cache) | {e:?}", stack.name);
continue;
let services = extract_services_from_stack(&stack);
let mut services_with_containers = services.iter().map(|StackServiceNames { service_name, container_name, image }| {
let container = containers.iter().find(|container| {
match compose_container_match_regex(container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
return false
}
}.is_match(&container.name)
}).cloned();
let update_available = if let Some(ContainerListItem { image_id: Some(curr_image_id), .. }) = &container {
images
.iter()
.find(|i| &i.name == image)
.map(|i| &i.id != curr_image_id)
.unwrap_or_default()
} else {
false
};
if update_available {
if !stack.config.auto_update
&& stack.config.send_alerts
&& container.is_some()
&& container.as_ref().unwrap().state == ContainerStateStatusEnum::Running
&& !stack_alert_sent_cache()
.lock()
.unwrap()
.contains(&(stack.id.clone(), service_name.clone()))
{
stack_alert_sent_cache()
.lock()
.unwrap()
.insert((stack.id.clone(), service_name.clone()));
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Stack(stack.id.clone()),
data: AlertData::StackImageUpdateAvailable {
id: stack.id.clone(),
name: stack.name.clone(),
server_name: server_name.clone(),
server_id: stack.config.server_id.clone(),
service: service_name.clone(),
image: image.clone(),
},
};
tokio::spawn(async move {
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!(
"Failed to record Stack update avaialable to db | {e:#}"
);
}
send_alerts(&[alert]).await;
});
}
} else {
stack_alert_sent_cache()
.lock()
.unwrap()
.remove(&(stack.id.clone(), service_name.clone()));
}
};
let mut services_with_containers = services.iter().map(|StackServiceNames { service_name, container_name }| {
let container = containers.iter().find(|container| {
match compose_container_match_regex(container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
return false
}
}.is_match(&container.name)
}).cloned();
StackService {
service: service_name.clone(),
container,
}
}).collect::<Vec<_>>();
StackService {
service: service_name.clone(),
image: image.to_string(),
container,
update_available,
}
}).collect::<Vec<_>>();
let update_available =
services_with_containers.iter().any(|service| {
service.update_available
// Only consider running services with available updates
&& service
.container
.as_ref()
.map(|c| c.state == ContainerStateStatusEnum::Running)
.unwrap_or_default()
});
let state = get_stack_state_from_containers(
&stack.config.ignore_services,
&services,
containers,
);
if update_available
&& stack.config.auto_update
&& state == StackState::Running
&& !action_states()
.stack
.get_or_insert_default(&stack.id)
.await
.busy()
.unwrap_or(true)
{
let stack = stack.name.clone();
tokio::spawn(async move {
if let Err(e) = execute::inner_handler(
ExecuteRequest::DeployStack(DeployStack {
stack: stack.clone(),
service: None,
stop_time: None,
}),
auto_redeploy_user().to_owned(),
)
.await
{
warn!("Failed auto update Stack {stack} | {e:#}")
}
});
}
services_with_containers
.sort_by(|a, b| a.service.cmp(&b.service));
let prev = stack_status_cache
@@ -91,11 +325,7 @@ pub async fn update_stack_cache(
.map(|s| s.curr.state);
let status = CachedStackStatus {
id: stack.id.clone(),
state: get_stack_state_from_containers(
&stack.config.ignore_services,
&services,
containers,
),
state,
services: services_with_containers,
};
stack_status_cache

View File

@@ -40,6 +40,9 @@ impl super::KomodoResource for Builder {
builder: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let (builder_type, instance_type) = match builder.config {
BuilderConfig::Url(_) => {
(BuilderConfigVariant::Url.to_string(), None)
}
BuilderConfig::Server(config) => (
BuilderConfigVariant::Server.to_string(),
Some(config.server_id),

View File

@@ -72,6 +72,19 @@ impl super::KomodoResource for Deployment {
}
DeploymentImage::Image { image } => (image, None),
};
let (image, update_available) = status
.as_ref()
.and_then(|s| {
s.curr.container.as_ref().map(|c| {
(
c.image
.clone()
.unwrap_or_else(|| String::from("Unknown")),
s.curr.update_available,
)
})
})
.unwrap_or((build_image, false));
DeploymentListItem {
name: deployment.name,
id: deployment.id,
@@ -85,16 +98,8 @@ impl super::KomodoResource for Deployment {
status: status.as_ref().and_then(|s| {
s.curr.container.as_ref().and_then(|c| c.status.to_owned())
}),
image: status
.as_ref()
.and_then(|s| {
s.curr.container.as_ref().map(|c| {
c.image
.clone()
.unwrap_or_else(|| String::from("Unknown"))
})
})
.unwrap_or(build_image),
image,
update_available,
server_id: deployment.config.server_id,
build_id,
},

View File

@@ -7,7 +7,7 @@ use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::{future::join_all, FutureExt};
use komodo_client::{
api::write::CreateTag,
api::{read::ExportResourcesToToml, write::CreateTag},
entities::{
komodo_timestamp,
permission::PermissionLevel,
@@ -18,6 +18,7 @@ use komodo_client::{
user::{system_user, User},
Operation, ResourceTarget, ResourceTargetVariant,
},
parsers::parse_string_list,
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
@@ -242,6 +243,79 @@ pub async fn get_check_permissions<T: KomodoResource>(
}
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource<T: KomodoResource>(
user: &User,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let resource_type = T::resource_type();
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Add in the resource level global base permission
let resource_base = get::<T>(resource_id).await?.base_permission;
if resource_base > base {
base = resource_base;
}
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
level
}
});
Ok(permission)
}
// ======
// LIST
// ======
@@ -325,79 +399,6 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
Ok(Some(ids.into_iter().collect()))
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource<T: KomodoResource>(
user: &User,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let resource_type = T::resource_type();
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Add in the resource level global base permission
let resource_base = get::<T>(resource_id).await?.base_permission;
if resource_base > base {
base = resource_base;
}
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
level
}
});
Ok(permission)
}
#[instrument(level = "debug")]
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
@@ -410,6 +411,23 @@ pub async fn list_for_user<T: KomodoResource>(
list_for_user_using_document::<T>(filters, user).await
}
#[instrument(level = "debug")]
pub async fn list_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_pattern::<T>(
pattern, query, user, all_tags,
)
.await?
.into_iter()
.map(|resource| T::to_list_item(resource));
Ok(join_all(list).await)
}
#[instrument(level = "debug")]
pub async fn list_for_user_using_document<T: KomodoResource>(
filters: Document,
user: &User,
@@ -421,6 +439,55 @@ pub async fn list_for_user_using_document<T: KomodoResource>(
Ok(join_all(list).await)
}
/// Lists full resource matching wildcard syntax,
/// or regex if wrapped with "\\"
///
/// ## Example
/// ```
/// let items = list_full_for_user_using_match_string::<Build>("foo-*", Default::default(), user, all_tags).await?;
/// let items = list_full_for_user_using_match_string::<Build>("\\^foo-.*$\\", Default::default(), user, all_tags).await?;
/// ```
#[instrument(level = "debug")]
pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
let resources =
list_full_for_user::<T>(query, user, all_tags).await?;
let patterns = parse_string_list(pattern);
let mut names = HashSet::<String>::new();
for pattern in patterns {
if pattern.starts_with('\\') && pattern.ends_with('\\') {
let regex = regex::Regex::new(&pattern[1..(pattern.len() - 1)])
.context("Regex matching string invalid")?;
for resource in &resources {
if regex.is_match(&resource.name) {
names.insert(resource.name.clone());
}
}
} else {
let wildcard = wildcard::Wildcard::new(pattern.as_bytes())
.context("Wildcard matching string invalid")?;
for resource in &resources {
if wildcard.is_match(resource.name.as_bytes()) {
names.insert(resource.name.clone());
}
}
};
}
Ok(
resources
.into_iter()
.filter(|resource| names.contains(resource.name.as_str()))
.collect(),
)
}
#[instrument(level = "debug")]
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
@@ -831,6 +898,16 @@ pub async fn delete<T: KomodoResource>(
}
let target = resource_target::<T>(resource.id.clone());
let toml = State
.resolve(
ExportResourcesToToml {
targets: vec![target.clone()],
..Default::default()
},
user.clone(),
)
.await?
.toml;
let mut update =
make_update(target.clone(), T::delete_operation(), user);
@@ -843,13 +920,14 @@ pub async fn delete<T: KomodoResource>(
delete_one_by_id(T::coll(), &resource.id, None)
.await
.with_context(|| {
format!("failed to delete {} from database", T::resource_type())
format!("Failed to delete {} from database", T::resource_type())
})?;
update.push_simple_log(
&format!("delete {}", T::resource_type()),
format!("deleted {} {}", T::resource_type(), resource.name),
&format!("Delete {}", T::resource_type()),
format!("Deleted {} {}", T::resource_type(), resource.name),
);
update.push_simple_log("Deleted Toml", toml);
if let Err(e) = T::post_delete(&resource, &mut update).await {
update.push_error_log("post delete", format_serror(&e.into()));

View File

@@ -178,6 +178,13 @@ async fn validate_config(
}
params.procedure = procedure.id;
}
Execution::BatchRunProcedure(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::RunAction(params) => {
let action = super::get_check_permissions::<Action>(
&params.action,
@@ -187,6 +194,13 @@ async fn validate_config(
.await?;
params.action = action.id;
}
Execution::BatchRunAction(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::RunBuild(params) => {
let build = super::get_check_permissions::<Build>(
&params.build,
@@ -196,6 +210,13 @@ async fn validate_config(
.await?;
params.build = build.id;
}
Execution::BatchRunBuild(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CancelBuild(params) => {
let build = super::get_check_permissions::<Build>(
&params.build,
@@ -215,6 +236,23 @@ async fn validate_config(
.await?;
params.deployment = deployment.id;
}
Execution::BatchDeploy(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::PullDeployment(params) => {
let deployment =
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
}
Execution::StartDeployment(params) => {
let deployment =
super::get_check_permissions::<Deployment>(
@@ -275,6 +313,13 @@ async fn validate_config(
.await?;
params.deployment = deployment.id;
}
Execution::BatchDestroyDeployment(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CloneRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -284,6 +329,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchCloneRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::PullRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -293,6 +345,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchPullRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::BuildRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -302,6 +361,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchBuildRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CancelRepoBuild(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -528,6 +594,13 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDeployStack(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::DeployStackIfChanged(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -537,6 +610,22 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDeployStackIfChanged(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::PullStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
}
Execution::StartStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -591,6 +680,13 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDestroyStack(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::Sleep(_) => {}
}
}

View File

@@ -1,4 +1,6 @@
use async_timing_util::{wait_until_timelength, Timelength};
use std::time::Duration;
use async_timing_util::{get_timelength_in_ms, Timelength};
use komodo_client::{
api::write::{
RefreshBuildCache, RefreshRepoCache, RefreshResourceSyncPending,
@@ -10,6 +12,7 @@ use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
api::execute::pull_deployment_inner,
config::core_config,
state::{db_client, State},
};
@@ -20,9 +23,11 @@ pub fn spawn_resource_refresh_loop() {
.try_into()
.expect("Invalid resource poll interval");
tokio::spawn(async move {
refresh_all().await;
let mut interval = tokio::time::interval(Duration::from_millis(
get_timelength_in_ms(interval) as u64,
));
loop {
wait_until_timelength(interval, 3000).await;
interval.tick().await;
refresh_all().await;
}
});
@@ -30,6 +35,7 @@ pub fn spawn_resource_refresh_loop() {
async fn refresh_all() {
refresh_stacks().await;
refresh_deployments().await;
refresh_builds().await;
refresh_repos().await;
refresh_syncs().await;
@@ -60,6 +66,43 @@ async fn refresh_stacks() {
}
}
async fn refresh_deployments() {
let servers = find_collect(&db_client().servers, None, None)
.await
.inspect_err(|e| {
warn!(
"Failed to get Servers from database in refresh task | {e:#}"
)
})
.unwrap_or_default();
let Ok(deployments) = find_collect(&db_client().deployments, None, None)
.await
.inspect_err(|e| {
warn!(
"Failed to get Deployments from database in refresh task | {e:#}"
)
})
else {
return;
};
for deployment in deployments {
if deployment.config.poll_for_updates
|| deployment.config.auto_update
{
if let Some(server) =
servers.iter().find(|s| s.id == deployment.config.server_id)
{
let name = deployment.name.clone();
if let Err(e) =
pull_deployment_inner(deployment, server).await
{
warn!("Failed to pull latest image for Deployment {name} | {e:#}");
}
}
}
}
}
async fn refresh_builds() {
let Ok(builds) = find_collect(&db_client().builds, None, None)
.await

View File

@@ -47,6 +47,7 @@ impl super::KomodoResource for Server {
info: ServerListItemInfo {
state: status.map(|s| s.state).unwrap_or_default(),
region: server.config.region,
address: server.config.address,
send_unreachable_alerts: server
.config
.send_unreachable_alerts,

View File

@@ -9,7 +9,7 @@ use komodo_client::{
stack::{
PartialStackConfig, Stack, StackConfig, StackConfigDiff,
StackInfo, StackListItem, StackListItemInfo,
StackQuerySpecifics, StackState,
StackQuerySpecifics, StackServiceWithUpdate, StackState,
},
update::Update,
user::{stack_user, User},
@@ -56,21 +56,21 @@ impl super::KomodoResource for Stack {
let state =
status.as_ref().map(|s| s.curr.state).unwrap_or_default();
let project_name = stack.project_name(false);
let services = match (
state,
stack.info.deployed_services,
stack.info.latest_services,
) {
// Always use latest if its down.
(StackState::Down, _, latest_services) => latest_services,
// Also use latest if deployed services is empty.
(_, Some(deployed_services), _) => deployed_services,
// Otherwise use deployed services
(_, _, latest_services) => latest_services,
}
.into_iter()
.map(|service| service.service_name)
.collect();
let services = status
.as_ref()
.map(|s| {
s.curr
.services
.iter()
.map(|service| StackServiceWithUpdate {
service: service.service.clone(),
image: service.image.clone(),
update_available: service.update_available,
})
.collect::<Vec<_>>()
})
.unwrap_or_default();
// This is only true if it is KNOWN to be true. so other cases are false.
let (project_missing, status) =
if stack.config.server_id.is_empty()
@@ -98,6 +98,7 @@ impl super::KomodoResource for Stack {
} else {
(false, None)
};
StackListItem {
id: stack.id,
name: stack.name,

View File

@@ -56,7 +56,7 @@ pub async fn execute_compose<T: ExecuteCompose>(
if let Some(service) = &service {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for service stack {service}"),
format!("Execution requested for Stack service {service}"),
))
}

View File

@@ -17,7 +17,7 @@ pub struct RemoteComposeContents {
}
/// Returns Result<(read paths, error paths, logs, short hash, commit message)>
pub async fn get_remote_compose_contents(
pub async fn get_repo_compose_contents(
stack: &Stack,
// Collect any files which are missing in the repo.
mut missing_files: Option<&mut Vec<String>>,

View File

@@ -1,64 +1,30 @@
use anyhow::Context;
use komodo_client::entities::{
stack::{ComposeFile, ComposeService, Stack, StackServiceNames},
FileContents,
use komodo_client::entities::stack::{
ComposeFile, ComposeService, ComposeServiceDeploy, Stack,
StackServiceNames,
};
use super::remote::{
get_remote_compose_contents, RemoteComposeContents,
};
/// Passing fresh will re-extract services from compose file, whether local or remote (repo)
pub async fn extract_services_from_stack(
pub fn extract_services_from_stack(
stack: &Stack,
fresh: bool,
) -> anyhow::Result<Vec<StackServiceNames>> {
if !fresh {
if let Some(services) = &stack.info.deployed_services {
return Ok(services.clone());
} else {
return Ok(stack.info.latest_services.clone());
}
}
let compose_contents = if stack.config.file_contents.is_empty() {
let RemoteComposeContents {
successful,
errored,
..
} = get_remote_compose_contents(stack, None).await.context(
"failed to get remote compose files to extract services",
)?;
if !errored.is_empty() {
let mut e = anyhow::Error::msg("Trace root");
for err in errored {
e = e.context(format!("{}: {}", err.path, err.contents));
) -> Vec<StackServiceNames> {
if let Some(mut services) = stack.info.deployed_services.clone() {
if services.iter().any(|service| service.image.is_empty()) {
for service in
services.iter_mut().filter(|s| s.image.is_empty())
{
service.image = stack
.info
.latest_services
.iter()
.find(|s| s.service_name == service.service_name)
.map(|s| s.image.clone())
.unwrap_or_default();
}
return Err(
e.context("Failed to read one or more remote compose files"),
);
}
successful
services
} else {
vec![FileContents {
path: String::from("compose.yaml"),
contents: stack.config.file_contents.clone(),
}]
};
let mut res = Vec::new();
for FileContents { path, contents } in &compose_contents {
extract_services_into_res(
&stack.project_name(true),
contents,
&mut res,
)
.with_context(|| {
format!("failed to extract services from file at path: {path}")
})?;
stack.info.latest_services.clone()
}
Ok(res)
}
pub fn extract_services_into_res(
@@ -69,16 +35,43 @@ pub fn extract_services_into_res(
let compose = serde_yaml::from_str::<ComposeFile>(compose_contents)
.context("failed to parse service names from compose contents")?;
let services = compose.services.into_iter().map(
|(service_name, ComposeService { container_name, .. })| {
StackServiceNames {
container_name: container_name.unwrap_or_else(|| {
format!("{project_name}-{service_name}")
}),
service_name,
}
let mut services = Vec::with_capacity(compose.services.capacity());
for (
service_name,
ComposeService {
container_name,
deploy,
image,
},
);
) in compose.services
{
let image = image.unwrap_or_default();
match deploy {
Some(ComposeServiceDeploy {
replicas: Some(replicas),
}) if replicas > 1 => {
for i in 1..1 + replicas {
services.push(StackServiceNames {
container_name: format!(
"{project_name}-{service_name}-{i}"
),
service_name: format!("{service_name}-{i}"),
image: image.clone(),
});
}
}
_ => {
services.push(StackServiceNames {
container_name: container_name.unwrap_or_else(|| {
format!("{project_name}-{service_name}")
}),
service_name,
image,
});
}
}
}
res.extend(services);

View File

@@ -97,6 +97,7 @@ pub async fn deploy_from_cache(
ResourceTarget::Stack(name) => {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: name.to_string(),
service: None,
stop_time: None,
});

View File

@@ -360,6 +360,7 @@ impl ResourceSyncTrait for Procedure {
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::BatchRunProcedure(_config) => {}
Execution::RunAction(config) => {
config.action = resources
.actions
@@ -367,6 +368,7 @@ impl ResourceSyncTrait for Procedure {
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::BatchRunAction(_config) => {}
Execution::RunBuild(config) => {
config.build = resources
.builds
@@ -374,6 +376,7 @@ impl ResourceSyncTrait for Procedure {
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::BatchRunBuild(_config) => {}
Execution::CancelBuild(config) => {
config.build = resources
.builds
@@ -388,6 +391,14 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchDeploy(_config) => {}
Execution::PullDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartDeployment(config) => {
config.deployment = resources
.deployments
@@ -430,6 +441,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchDestroyDeployment(_config) => {}
Execution::CloneRepo(config) => {
config.repo = resources
.repos
@@ -437,6 +449,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchCloneRepo(_config) => {}
Execution::PullRepo(config) => {
config.repo = resources
.repos
@@ -444,6 +457,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchPullRepo(_config) => {}
Execution::BuildRepo(config) => {
config.repo = resources
.repos
@@ -451,6 +465,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchBuildRepo(_config) => {}
Execution::CancelRepoBuild(config) => {
config.repo = resources
.repos
@@ -626,6 +641,7 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDeployStack(_config) => {}
Execution::DeployStackIfChanged(config) => {
config.stack = resources
.stacks
@@ -633,6 +649,14 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDeployStackIfChanged(_config) => {}
Execution::PullStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::StartStack(config) => {
config.stack = resources
.stacks
@@ -675,6 +699,7 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDestroyStack(_config) => {}
Execution::Sleep(_) => {}
}
}

View File

@@ -390,6 +390,7 @@ impl ToToml for Builder {
let empty_params = match resource.config {
PartialBuilderConfig::Aws(config) => config.is_none(),
PartialBuilderConfig::Server(config) => config.is_none(),
PartialBuilderConfig::Url(config) => config.is_none(),
};
if empty_params {
// toml_pretty will remove empty map
@@ -414,6 +415,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunProcedure(_exec) => {}
Execution::RunAction(exec) => exec.action.clone_from(
all
.actions
@@ -421,6 +423,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunAction(_exec) => {}
Execution::RunBuild(exec) => exec.build.clone_from(
all
.builds
@@ -428,6 +431,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunBuild(_exec) => {}
Execution::CancelBuild(exec) => exec.build.clone_from(
all
.builds
@@ -442,6 +446,16 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDeploy(_exec) => {}
Execution::PullDeployment(exec) => {
exec.deployment.clone_from(
all
.deployments
.get(&exec.deployment)
.map(|r| &r.name)
.unwrap_or(&String::new()),
)
}
Execution::StartDeployment(exec) => {
exec.deployment.clone_from(
all
@@ -496,6 +510,7 @@ impl ToToml for Procedure {
.unwrap_or(&String::new()),
)
}
Execution::BatchDestroyDeployment(_exec) => {}
Execution::CloneRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -503,6 +518,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchCloneRepo(_exec) => {}
Execution::PullRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -510,6 +526,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchPullRepo(_exec) => {}
Execution::BuildRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -517,6 +534,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchBuildRepo(_exec) => {}
Execution::CancelRepoBuild(exec) => exec.repo.clone_from(
all
.repos
@@ -710,6 +728,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDeployStack(_exec) => {}
Execution::DeployStackIfChanged(exec) => {
exec.stack.clone_from(
all
@@ -719,6 +738,14 @@ impl ToToml for Procedure {
.unwrap_or(&String::new()),
)
}
Execution::BatchDeployStackIfChanged(_exec) => {}
Execution::PullStack(exec) => exec.stack.clone_from(
all
.stacks
.get(&exec.stack)
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::StartStack(exec) => exec.stack.clone_from(
all
.stacks
@@ -761,6 +788,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDestroyStack(_exec) => {}
Execution::Sleep(_) | Execution::None(_) => {}
}
}

View File

@@ -21,6 +21,7 @@ environment_file.workspace = true
formatting.workspace = true
command.workspace = true
logger.workspace = true
cache.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }

View File

@@ -1,25 +1,22 @@
use std::path::PathBuf;
use std::{fmt::Write, path::PathBuf};
use anyhow::{anyhow, Context};
use command::run_komodo_command;
use formatting::format_serror;
use git::{write_commit_file, GitRes};
use komodo_client::entities::{
stack::ComposeProject, to_komodo_name, update::Log, CloneArgs,
FileContents,
};
use periphery_client::api::{
compose::*,
git::{PullOrCloneRepo, RepoActionResponse},
stack::ComposeProject, to_komodo_name, update::Log, FileContents,
};
use periphery_client::api::{compose::*, git::RepoActionResponse};
use resolver_api::Resolve;
use serde::{Deserialize, Serialize};
use tokio::fs;
use crate::{
compose::{compose_up, docker_compose},
compose::{compose_up, docker_compose, write_stack, WriteStackRes},
config::periphery_config,
helpers::log_grep,
docker::docker_login,
helpers::{log_grep, pull_or_clone_stack},
State,
};
@@ -249,59 +246,7 @@ impl Resolve<WriteCommitComposeContents> for State {
}: WriteCommitComposeContents,
_: (),
) -> anyhow::Result<RepoActionResponse> {
if stack.config.files_on_host {
return Err(anyhow!(
"Wrong method called for files on host stack"
));
}
if stack.config.repo.is_empty() {
return Err(anyhow!("Repo is not configured"));
}
let root = periphery_config()
.stack_dir
.join(to_komodo_name(&stack.name));
let mut args: CloneArgs = (&stack).into();
// Set the clone destination to the one created for this run
args.destination = Some(root.display().to_string());
let git_token = match git_token {
Some(token) => Some(token),
None => {
if !stack.config.git_account.is_empty() {
match crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
) {
Ok(token) => Some(token.to_string()),
Err(e) => {
return Err(
e.context("Failed to find required git token"),
);
}
}
} else {
None
}
}
};
State
.resolve(
PullOrCloneRepo {
args,
git_token,
environment: vec![],
env_file_path: stack.config.env_file_path.clone(),
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await?;
let root = pull_or_clone_stack(&stack, git_token).await?;
let file_path = stack
.config
@@ -334,6 +279,119 @@ impl Resolve<WriteCommitComposeContents> for State {
//
impl<'a> WriteStackRes for &'a mut ComposePullResponse {
fn logs(&mut self) -> &mut Vec<Log> {
&mut self.logs
}
}
impl Resolve<ComposePull> for State {
#[instrument(
name = "ComposePull",
skip(self, git_token, registry_token)
)]
async fn resolve(
&self,
ComposePull {
stack,
service,
git_token,
registry_token,
}: ComposePull,
_: (),
) -> anyhow::Result<ComposePullResponse> {
let mut res = ComposePullResponse::default();
let (run_directory, env_file_path) =
write_stack(&stack, git_token, &mut res).await?;
// Canonicalize the path to ensure it exists, and is the cleanest path to the run directory.
let run_directory = run_directory.canonicalize().context(
"Failed to validate run directory on host after stack write (canonicalize error)",
)?;
let file_paths = stack
.file_paths()
.iter()
.map(|path| {
(
path,
// This will remove any intermediate uneeded '/./' in the path
run_directory.join(path).components().collect::<PathBuf>(),
)
})
.collect::<Vec<_>>();
for (path, full_path) in &file_paths {
if !full_path.exists() {
return Err(anyhow!("Missing compose file at {path}"));
}
}
let docker_compose = docker_compose();
let service_arg = service
.as_ref()
.map(|service| format!(" {service}"))
.unwrap_or_default();
let file_args = if stack.config.file_paths.is_empty() {
String::from("compose.yaml")
} else {
stack.config.file_paths.join(" -f ")
};
// Login to the registry to pull private images, if provider / account are set
if !stack.config.registry_provider.is_empty()
&& !stack.config.registry_account.is_empty()
{
docker_login(
&stack.config.registry_provider,
&stack.config.registry_account,
registry_token.as_deref(),
)
.await
.with_context(|| {
format!(
"domain: {} | account: {}",
stack.config.registry_provider,
stack.config.registry_account
)
})
.context("failed to login to image registry")?;
}
let env_file = env_file_path
.map(|path| format!(" --env-file {path}"))
.unwrap_or_default();
let additional_env_files = stack
.config
.additional_env_files
.iter()
.fold(String::new(), |mut output, file| {
let _ = write!(output, " --env-file {file}");
output
});
let project_name = stack.project_name(false);
let log = run_komodo_command(
"compose pull",
run_directory.as_ref(),
format!(
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} pull{service_arg}",
),
false,
)
.await;
res.logs.push(log);
Ok(res)
}
}
//
impl Resolve<ComposeUp> for State {
#[instrument(
name = "ComposeUp",

View File

@@ -1,12 +1,20 @@
use std::sync::OnceLock;
use cache::TimeoutCache;
use command::run_komodo_command;
use komodo_client::entities::{
deployment::extract_registry_domain,
docker::image::{Image, ImageHistoryResponseItem},
komodo_timestamp,
update::Log,
};
use periphery_client::api::image::*;
use resolver_api::Resolve;
use crate::{docker::docker_client, State};
use crate::{
docker::{docker_client, docker_login},
State,
};
//
@@ -36,6 +44,68 @@ impl Resolve<ImageHistory> for State {
//
/// Wait this long after a pull to allow another pull through
const PULL_TIMEOUT: i64 = 5_000;
fn pull_cache() -> &'static TimeoutCache<String, Log> {
static PULL_CACHE: OnceLock<TimeoutCache<String, Log>> =
OnceLock::new();
PULL_CACHE.get_or_init(Default::default)
}
impl Resolve<PullImage> for State {
#[instrument(name = "PullImage", skip(self))]
async fn resolve(
&self,
PullImage {
name,
account,
token,
}: PullImage,
_: (),
) -> anyhow::Result<Log> {
// Acquire the image lock
let lock = pull_cache().get_lock(name.clone()).await;
// Lock the image lock, prevents simultaneous pulls by
// ensuring simultaneous pulls will wait for first to finish
// and checking cached results.
let mut locked = lock.lock().await;
// Early return from cache if lasted pulled with PULL_TIMEOUT
if locked.last_ts + PULL_TIMEOUT > komodo_timestamp() {
return locked.clone_res();
}
let res = async {
docker_login(
&extract_registry_domain(&name)?,
account.as_deref().unwrap_or_default(),
token.as_deref(),
)
.await?;
anyhow::Ok(
run_komodo_command(
"docker pull",
None,
format!("docker pull {name}"),
false,
)
.await,
)
}
.await;
// Set the cache with results. Any other calls waiting on the lock will
// then immediately also use this same result.
locked.set(&res, komodo_timestamp());
res
}
}
//
impl Resolve<DeleteImage> for State {
#[instrument(name = "DeleteImage", skip(self))]
async fn resolve(

View File

@@ -82,6 +82,7 @@ pub enum PeripheryRequest {
// Compose (Write)
WriteComposeContentsToHost(WriteComposeContentsToHost),
WriteCommitComposeContents(WriteCommitComposeContents),
ComposePull(ComposePull),
ComposeUp(ComposeUp),
ComposeExecution(ComposeExecution),
@@ -121,6 +122,7 @@ pub enum PeripheryRequest {
ImageHistory(ImageHistory),
// Image (Write)
PullImage(PullImage),
DeleteImage(DeleteImage),
PruneImages(PruneImages),

View File

@@ -43,7 +43,7 @@ pub async fn compose_up(
// Will also set additional fields on the reponse.
// Use the env_file_path in the compose command.
let (run_directory, env_file_path) =
write_stack(&stack, git_token, res)
write_stack(&stack, git_token, &mut *res)
.await
.context("Failed to write / clone compose file")?;
@@ -206,7 +206,7 @@ pub async fn compose_up(
"compose pull",
run_directory.as_ref(),
format!(
"{docker_compose} -p {project_name} -f {file_args}{env_file} pull{service_arg}",
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} pull{service_arg}",
),
false,
)
@@ -289,7 +289,7 @@ pub async fn compose_up(
// Run compose up
let extra_args = parse_extra_args(&stack.config.extra_args);
let command = format!(
"{docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}",
"{docker_compose} -p {project_name} -f {file_args}{env_file}{additional_env_files} up -d{extra_args}{service_arg}",
);
let log = if stack.config.skip_secret_interp {
@@ -330,13 +330,35 @@ pub async fn compose_up(
Ok(())
}
pub trait WriteStackRes {
fn logs(&mut self) -> &mut Vec<Log>;
fn add_remote_error(&mut self, _contents: FileContents) {}
fn set_commit_hash(&mut self, _hash: Option<String>) {}
fn set_commit_message(&mut self, _message: Option<String>) {}
}
impl<'a> WriteStackRes for &'a mut ComposeUpResponse {
fn logs(&mut self) -> &mut Vec<Log> {
&mut self.logs
}
fn add_remote_error(&mut self, contents: FileContents) {
self.remote_errors.push(contents);
}
fn set_commit_hash(&mut self, hash: Option<String>) {
self.commit_hash = hash;
}
fn set_commit_message(&mut self, message: Option<String>) {
self.commit_message = message;
}
}
/// Either writes the stack file_contents to a file, or clones the repo.
/// Returns (run_directory, env_file_path)
async fn write_stack<'a>(
stack: &'a Stack,
pub async fn write_stack(
stack: &Stack,
git_token: Option<String>,
res: &mut ComposeUpResponse,
) -> anyhow::Result<(PathBuf, Option<&'a str>)> {
mut res: impl WriteStackRes,
) -> anyhow::Result<(PathBuf, Option<&str>)> {
let root = periphery_config()
.stack_dir
.join(to_komodo_name(&stack.name));
@@ -361,7 +383,7 @@ async fn write_stack<'a>(
.skip_secret_interp
.then_some(&periphery_config().secrets),
run_directory.as_ref(),
&mut res.logs,
res.logs(),
)
.await
{
@@ -399,7 +421,7 @@ async fn write_stack<'a>(
.skip_secret_interp
.then_some(&periphery_config().secrets),
run_directory.as_ref(),
&mut res.logs,
res.logs(),
)
.await
{
@@ -452,9 +474,9 @@ async fn write_stack<'a>(
Err(e) => {
let error = format_serror(&e.into());
res
.logs
.logs()
.push(Log::error("no git token", error.clone()));
res.remote_errors.push(FileContents {
res.add_remote_error(FileContents {
path: Default::default(),
contents: error,
});
@@ -523,8 +545,10 @@ async fn write_stack<'a>(
let error = format_serror(
&e.context("failed to pull stack repo").into(),
);
res.logs.push(Log::error("pull stack repo", error.clone()));
res.remote_errors.push(FileContents {
res
.logs()
.push(Log::error("pull stack repo", error.clone()));
res.add_remote_error(FileContents {
path: Default::default(),
contents: error,
});
@@ -534,11 +558,11 @@ async fn write_stack<'a>(
}
};
res.logs.extend(logs);
res.commit_hash = commit_hash;
res.commit_message = commit_message;
res.logs().extend(logs);
res.set_commit_hash(commit_hash);
res.set_commit_message(commit_message);
if !all_logs_success(&res.logs) {
if !all_logs_success(res.logs()) {
return Err(anyhow!("Stopped after repo pull failure"));
}

View File

@@ -1,4 +1,4 @@
use std::sync::OnceLock;
use std::{collections::HashMap, sync::OnceLock};
use anyhow::{anyhow, Context};
use bollard::{
@@ -40,7 +40,7 @@ impl DockerClient {
pub async fn list_containers(
&self,
) -> anyhow::Result<Vec<ContainerListItem>> {
self
let mut containers = self
.docker
.list_containers(Some(ListContainersOptions::<String> {
all: true,
@@ -48,8 +48,8 @@ impl DockerClient {
}))
.await?
.into_iter()
.map(|container| {
Ok(ContainerListItem {
.flat_map(|container| {
anyhow::Ok(ContainerListItem {
server_id: None,
name: container
.names
@@ -75,9 +75,12 @@ impl DockerClient {
networks: container
.network_settings
.and_then(|settings| {
settings
.networks
.map(|networks| networks.into_keys().collect())
settings.networks.map(|networks| {
let mut keys =
networks.into_keys().collect::<Vec<_>>();
keys.sort();
keys
})
})
.unwrap_or_default(),
volumes: container
@@ -92,7 +95,26 @@ impl DockerClient {
labels: container.labels.unwrap_or_default(),
})
})
.collect()
.collect::<Vec<_>>();
let container_id_to_network = containers
.iter()
.filter_map(|c| Some((c.id.clone()?, c.network_mode.clone()?)))
.collect::<HashMap<_, _>>();
// Fix containers which use `container:container_id` network_mode,
// by replacing with the referenced network mode.
containers.iter_mut().for_each(|container| {
let Some(network_name) = &container.network_mode else {
return;
};
let Some(container_id) =
network_name.strip_prefix("container:")
else {
return;
};
container.network_mode =
container_id_to_network.get(container_id).cloned();
});
Ok(containers)
}
pub async fn inspect_container(
@@ -519,7 +541,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<NetworkListItem>> {
self
let networks = self
.docker
.list_networks::<String>(None)
.await?
@@ -545,7 +567,7 @@ impl DockerClient {
}),
None => false,
};
Ok(NetworkListItem {
NetworkListItem {
name: network.name,
id: network.id,
created: network.created,
@@ -559,9 +581,10 @@ impl DockerClient {
attachable: network.attachable,
ingress: network.ingress,
in_use,
})
}
})
.collect()
.collect();
Ok(networks)
}
pub async fn inspect_network(
@@ -628,7 +651,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<ImageListItem>> {
self
let images = self
.docker
.list_images::<String>(None)
.await?
@@ -641,7 +664,7 @@ impl DockerClient {
.map(|id| id == &image.id)
.unwrap_or_default()
});
Ok(ImageListItem {
ImageListItem {
name: image
.repo_tags
.into_iter()
@@ -652,9 +675,10 @@ impl DockerClient {
created: image.created,
size: image.size,
in_use,
})
}
})
.collect()
.collect();
Ok(images)
}
pub async fn inspect_image(
@@ -761,7 +785,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<VolumeListItem>> {
self
let volumes = self
.docker
.list_volumes::<String>(None)
.await?
@@ -786,7 +810,7 @@ impl DockerClient {
let in_use = containers.iter().any(|container| {
container.volumes.iter().any(|name| &volume.name == name)
});
Ok(VolumeListItem {
VolumeListItem {
name: volume.name,
driver: volume.driver,
mountpoint: volume.mountpoint,
@@ -794,9 +818,10 @@ impl DockerClient {
size: volume.usage_data.map(|data| data.size),
scope,
in_use,
})
}
})
.collect()
.collect();
Ok(volumes)
}
pub async fn inspect_volume(

View File

@@ -1,10 +1,17 @@
use anyhow::Context;
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use komodo_client::{
entities::{EnvironmentVar, SearchCombinator},
entities::{
stack::Stack, to_komodo_name, CloneArgs, EnvironmentVar,
SearchCombinator,
},
parsers::QUOTE_PATTERN,
};
use periphery_client::api::git::PullOrCloneRepo;
use resolver_api::Resolve;
use crate::config::periphery_config;
use crate::{config::periphery_config, State};
pub fn git_token(
domain: &str,
@@ -89,3 +96,65 @@ pub fn interpolate_variables(
true,
)
}
/// Returns path to root directory of the stack repo.
pub async fn pull_or_clone_stack(
stack: &Stack,
git_token: Option<String>,
) -> anyhow::Result<PathBuf> {
if stack.config.files_on_host {
return Err(anyhow!(
"Wrong method called for files on host stack"
));
}
if stack.config.repo.is_empty() {
return Err(anyhow!("Repo is not configured"));
}
let root = periphery_config()
.stack_dir
.join(to_komodo_name(&stack.name));
let mut args: CloneArgs = stack.into();
// Set the clone destination to the one created for this run
args.destination = Some(root.display().to_string());
let git_token = match git_token {
Some(token) => Some(token),
None => {
if !stack.config.git_account.is_empty() {
match crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
) {
Ok(token) => Some(token.to_string()),
Err(e) => {
return Err(
e.context("Failed to find required git token"),
);
}
}
} else {
None
}
}
};
State
.resolve(
PullOrCloneRepo {
args,
git_token,
environment: vec![],
env_file_path: stack.config.env_file_path.clone(),
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await?;
Ok(root)
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Runs the target Action. Response: [Update]
#[typeshare]
@@ -26,3 +26,31 @@ pub struct RunAction {
/// Id or name
pub action: String,
}
/// Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResponse]
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunAction {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* actions
/// foo-*
/// # add some more
/// extra-action-1, extra-action-2
/// ```
pub pattern: String,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
//
@@ -36,6 +36,36 @@ pub struct RunBuild {
//
/// Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunBuild {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* builds
/// foo-*
/// # add some more
/// extra-build-1, extra-build-2
/// ```
pub pattern: String,
}
//
/// Cancels the target build.
/// Only does anything if the build is `building` when called.
/// Response: [Update]

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::{update::Update, TerminationSignal};
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Deploys the container for the target deployment. Response: [Update].
///
@@ -41,6 +41,57 @@ pub struct Deploy {
//
/// Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeploy {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* deployments
/// foo-*
/// # add some more
/// extra-deployment-1, extra-deployment-2
/// ```
pub pattern: String,
}
//
/// Pulls the image for the target deployment. Response: [Update]
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(Update)]
pub struct PullDeployment {
/// Name or id
pub deployment: String,
}
//
/// Starts the container for the target deployment. Response: [Update]
///
/// 1. Runs `docker start ${container_name}`.
@@ -187,3 +238,33 @@ pub struct DestroyDeployment {
/// Override the default termination max time.
pub time: Option<i32>,
}
//
/// Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDestroyDeployment {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* deployments
/// foo-*
/// # add some more
/// extra-deployment-1, extra-deployment-2
/// ```
pub pattern: String,
}

View File

@@ -27,7 +27,7 @@ pub use sync::*;
use crate::{
api::write::CommitSync,
entities::{NoData, I64},
entities::{update::Update, NoData, _Serror, I64},
};
pub trait KomodoExecuteRequest: HasResponse {}
@@ -59,27 +59,36 @@ pub enum Execution {
// ACTION
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// PROCEDURE
RunProcedure(RunProcedure),
BatchRunProcedure(BatchRunProcedure),
// BUILD
RunBuild(RunBuild),
BatchRunBuild(BatchRunBuild),
CancelBuild(CancelBuild),
// DEPLOYMENT
Deploy(Deploy),
BatchDeploy(BatchDeploy),
PullDeployment(PullDeployment),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
UnpauseDeployment(UnpauseDeployment),
StopDeployment(StopDeployment),
DestroyDeployment(DestroyDeployment),
BatchDestroyDeployment(BatchDestroyDeployment),
// REPO
CloneRepo(CloneRepo),
BatchCloneRepo(BatchCloneRepo),
PullRepo(PullRepo),
BatchPullRepo(BatchPullRepo),
BuildRepo(BuildRepo),
BatchBuildRepo(BatchBuildRepo),
CancelRepoBuild(CancelRepoBuild),
// SERVER (Container)
@@ -113,13 +122,17 @@ pub enum Execution {
// STACK
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
PullStack(PullStack),
StartStack(StartStack),
RestartStack(RestartStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
StopStack(StopStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// SLEEP
Sleep(Sleep),
@@ -131,3 +144,34 @@ pub struct Sleep {
#[serde(default)]
pub duration_ms: I64,
}
#[typeshare]
pub type BatchExecutionResponse = Vec<BatchExecutionResponseItem>;
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "status", content = "data")]
pub enum BatchExecutionResponseItem {
Ok(Update),
Err(BatchExecutionResponseItemErr),
}
impl From<Result<Update, BatchExecutionResponseItemErr>>
for BatchExecutionResponseItem
{
fn from(
value: Result<Update, BatchExecutionResponseItemErr>,
) -> Self {
match value {
Ok(update) => Self::Ok(update),
Err(e) => Self::Err(e),
}
}
}
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatchExecutionResponseItemErr {
pub name: String,
pub error: _Serror,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Runs the target Procedure. Response: [Update]
#[typeshare]
@@ -26,3 +26,31 @@ pub struct RunProcedure {
/// Id or name
pub procedure: String,
}
/// Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunProcedure {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* procedures
/// foo-*
/// # add some more
/// extra-procedure-1, extra-procedure-2
/// ```
pub pattern: String,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
//
@@ -39,6 +39,36 @@ pub struct CloneRepo {
//
/// Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchCloneRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Pulls the target repo. Response: [Update].
///
/// Note. Repo must have server attached at `server_id`.
@@ -65,6 +95,36 @@ pub struct PullRepo {
//
/// Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchPullRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Builds the target repo, using the attached builder. Response: [Update].
///
/// Note. Repo must have builder attached at `builder_id`.
@@ -95,6 +155,36 @@ pub struct BuildRepo {
//
/// Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchBuildRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Cancels the target repo build.
/// Only does anything if the repo build is `building` when called.
/// Response: [Update]

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Deploys the target stack. `docker compose up`. Response: [Update]
#[typeshare]
@@ -25,11 +25,45 @@ use super::KomodoExecuteRequest;
pub struct DeployStack {
/// Id or name
pub stack: String,
/// Optionally specify a specific service to "compose up"
pub service: Option<String>,
/// Override the default termination max time.
/// Only used if the stack needs to be taken down first.
pub stop_time: Option<i32>,
}
//
/// Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeployStack {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}
//
/// Checks deployed contents vs latest contents,
/// and only if any changes found
/// will `docker compose up`. Response: [Update]
@@ -56,6 +90,59 @@ pub struct DeployStackIfChanged {
//
/// Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeployStackIfChanged {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}
//
/// Pulls images for the target stack. `docker compose pull`. Response: [Update]
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(Update)]
pub struct PullStack {
/// Id or name
pub stack: String,
/// Optionally specify a specific service to start
pub service: Option<String>,
}
//
/// Starts the target stack. `docker compose start`. Response: [Update]
#[typeshare]
#[derive(
@@ -192,9 +279,41 @@ pub struct StopStack {
pub struct DestroyStack {
/// Id or name
pub stack: String,
/// Optionally specify a specific service to destroy
pub service: Option<String>,
/// Pass `--remove-orphans`
#[serde(default)]
pub remove_orphans: bool,
/// Override the default termination max time.
pub stop_time: Option<i32>,
}
//
/// Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDestroyStack {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///d
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}

View File

@@ -179,7 +179,7 @@ impl GetBuildMonthlyStatsResponse {
/// Retrieve versions of the build that were built in the past and available for deployment,
/// sorted by most recent first.
/// Response: [GetBuildVersionsResponse].
/// Response: [ListBuildVersionsResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits,

View File

@@ -170,7 +170,7 @@ pub type SearchDeploymentLogResponse = Log;
//
/// Get the deployment container's stats using `docker stats`.
/// Response: [DockerContainerStats].
/// Response: [GetDeploymentStatsResponse].
///
/// Note. This call will hit the underlying server directly for most up to date stats.
#[typeshare]

View File

@@ -27,7 +27,7 @@ pub type GetGitProviderAccountResponse = GitProviderAccount;
//
/// List git provider accounts matching optional query.
/// Response: [ListGitProvidersResponse].
/// Response: [ListGitProviderAccountsResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits,
@@ -64,7 +64,7 @@ pub type GetDockerRegistryAccountResponse = DockerRegistryAccount;
//
/// List docker registry accounts matching optional query.
/// Response: [ListDockerRegistrysResponse].
/// Response: [ListDockerRegistryAccountsResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits,

View File

@@ -373,7 +373,7 @@ pub type SearchContainerLogResponse = Log;
//
/// Inspect a docker container on the server. Response: [Container].
/// Find the attached resource for a container. Either Deployment or Stack. Response: [GetResourceMatchingContainerResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,
@@ -388,6 +388,7 @@ pub struct GetResourceMatchingContainer {
pub container: String,
}
/// Response for [GetResourceMatchingContainer]. Resource is either Deployment, Stack, or None.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetResourceMatchingContainerResponse {

View File

@@ -51,7 +51,7 @@ pub type ListStackServicesResponse = Vec<StackService>;
//
/// Get a stack service's log. Response: [GetStackContainersResponse].
/// Get a stack service's log. Response: [GetStackServiceLogResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,

View File

@@ -46,6 +46,22 @@ pub struct CopyDeployment {
//
/// Create a Deployment from an existing container. Response: [Deployment].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(Deployment)]
pub struct CreateDeploymentFromContainer {
/// The name or id of the existing container.
pub name: String,
/// The server id or name on which container exists.
pub server: String,
}
//
/// Deletes the deployment at the given id, and returns the deleted deployment.
/// Response: [Deployment].
///

View File

@@ -47,7 +47,7 @@ pub type UpdateGitProviderAccountResponse = GitProviderAccount;
//
/// **Admin only.** Delete a git provider account.
/// Response: [User].
/// Response: [DeleteGitProviderAccountResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,

View File

@@ -0,0 +1,213 @@
use serde::{de::Visitor, Deserializer};
pub fn maybe_string_i64_deserializer<'de, D>(
deserializer: D,
) -> Result<i64, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(MaybeStringI64Visitor)
}
pub fn option_maybe_string_i64_deserializer<'de, D>(
deserializer: D,
) -> Result<Option<i64>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(OptionMaybeStringI64Visitor)
}
struct MaybeStringI64Visitor;
impl<'de> Visitor<'de> for MaybeStringI64Visitor {
type Value = i64;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(formatter, "number or string number")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
v.parse::<i64>().map_err(E::custom)
}
fn visit_f32<E>(self, v: f32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i8<E>(self, v: i8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i16<E>(self, v: i16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v)
}
fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u16<E>(self, v: u16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
}
struct OptionMaybeStringI64Visitor;
impl<'de> Visitor<'de> for OptionMaybeStringI64Visitor {
type Value = Option<i64>;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(formatter, "null or number or string number")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
MaybeStringI64Visitor.visit_str(v).map(Some)
}
fn visit_f32<E>(self, v: f32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i8<E>(self, v: i8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i16<E>(self, v: i16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v))
}
fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u16<E>(self, v: u16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
}

View File

@@ -4,6 +4,7 @@ mod conversion;
mod environment;
mod file_contents;
mod labels;
mod maybe_string_i64;
mod string_list;
mod term_signal_labels;
@@ -11,5 +12,6 @@ pub use conversion::*;
pub use environment::*;
pub use file_contents::*;
pub use labels::*;
pub use maybe_string_i64::*;
pub use string_list::*;
pub use term_signal_labels::*;

View File

@@ -144,6 +144,20 @@ pub enum AlertData {
to: DeploymentState,
},
/// A Deployment has an image update available
DeploymentImageUpdateAvailable {
/// The id of the deployment
id: String,
/// The name of the deployment
name: String,
/// The server id of server that the deployment is on
server_id: String,
/// The server name
server_name: String,
/// The image with update
image: String,
},
/// A stack's state has changed unexpectedly.
StackStateChange {
/// The id of the stack
@@ -160,6 +174,22 @@ pub enum AlertData {
to: StackState,
},
/// A Stack has an image update available
StackImageUpdateAvailable {
/// The id of the stack
id: String,
/// The name of the stack
name: String,
/// The server id of server that the stack is on
server_id: String,
/// The server name
server_name: String,
/// The service name to update
service: String,
/// The image with update
image: String,
},
/// An AWS builder failed to terminate.
AwsBuilderTerminationFailed {
/// The id of the aws instance which failed to terminate

View File

@@ -252,10 +252,10 @@ pub struct BuildConfig {
/// Secret arguments.
///
/// These values remain hidden in the final image by using
/// docker secret mounts. See [https://docs.docker.com/build/building/secrets].
/// docker secret mounts. See <https://docs.docker.com/build/building/secrets>.
///
/// The values can be used in RUN commands:
/// ```
/// ```sh
/// RUN --mount=type=secret,id=SECRET_KEY \
/// SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
/// ```

View File

@@ -48,10 +48,13 @@ pub struct BuilderListItemInfo {
#[serde(tag = "type", content = "params")]
#[allow(clippy::large_enum_variant)]
pub enum BuilderConfig {
/// Use a connected server an image builder.
/// Use a Periphery address as a Builder.
Url(UrlBuilderConfig),
/// Use a connected server as a Builder.
Server(ServerBuilderConfig),
/// Use EC2 instances spawned on demand as an image builder.
/// Use EC2 instances spawned on demand as a Builder.
Aws(AwsBuilderConfig),
}
@@ -76,19 +79,21 @@ impl Default for BuilderConfig {
#[serde(tag = "type", content = "params")]
#[allow(clippy::large_enum_variant)]
pub enum PartialBuilderConfig {
Url(#[serde(default)] _PartialUrlBuilderConfig),
Server(#[serde(default)] _PartialServerBuilderConfig),
Aws(#[serde(default)] _PartialAwsBuilderConfig),
}
impl Default for PartialBuilderConfig {
fn default() -> Self {
Self::Aws(Default::default())
Self::Url(Default::default())
}
}
impl MaybeNone for PartialBuilderConfig {
fn is_none(&self) -> bool {
match self {
PartialBuilderConfig::Url(config) => config.is_none(),
PartialBuilderConfig::Server(config) => config.is_none(),
PartialBuilderConfig::Aws(config) => config.is_none(),
}
@@ -98,6 +103,7 @@ impl MaybeNone for PartialBuilderConfig {
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BuilderConfigDiff {
Url(UrlBuilderConfigDiff),
Server(ServerBuilderConfigDiff),
Aws(AwsBuilderConfigDiff),
}
@@ -105,6 +111,9 @@ pub enum BuilderConfigDiff {
impl From<BuilderConfigDiff> for PartialBuilderConfig {
fn from(value: BuilderConfigDiff) -> Self {
match value {
BuilderConfigDiff::Url(diff) => {
PartialBuilderConfig::Url(diff.into())
}
BuilderConfigDiff::Server(diff) => {
PartialBuilderConfig::Server(diff.into())
}
@@ -120,6 +129,9 @@ impl Diff for BuilderConfigDiff {
&self,
) -> impl Iterator<Item = partial_derive2::FieldDiff> {
match self {
BuilderConfigDiff::Url(diff) => {
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
}
BuilderConfigDiff::Server(diff) => {
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
}
@@ -138,10 +150,27 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
partial: PartialBuilderConfig,
) -> BuilderConfigDiff {
match self {
BuilderConfig::Url(original) => match partial {
PartialBuilderConfig::Url(partial) => {
BuilderConfigDiff::Url(original.partial_diff(partial))
}
PartialBuilderConfig::Server(partial) => {
let default = ServerBuilderConfig::default();
BuilderConfigDiff::Server(default.partial_diff(partial))
}
PartialBuilderConfig::Aws(partial) => {
let default = AwsBuilderConfig::default();
BuilderConfigDiff::Aws(default.partial_diff(partial))
}
},
BuilderConfig::Server(original) => match partial {
PartialBuilderConfig::Server(partial) => {
BuilderConfigDiff::Server(original.partial_diff(partial))
}
PartialBuilderConfig::Url(partial) => {
let default = UrlBuilderConfig::default();
BuilderConfigDiff::Url(default.partial_diff(partial))
}
PartialBuilderConfig::Aws(partial) => {
let default = AwsBuilderConfig::default();
BuilderConfigDiff::Aws(default.partial_diff(partial))
@@ -151,6 +180,10 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
PartialBuilderConfig::Aws(partial) => {
BuilderConfigDiff::Aws(original.partial_diff(partial))
}
PartialBuilderConfig::Url(partial) => {
let default = UrlBuilderConfig::default();
BuilderConfigDiff::Url(default.partial_diff(partial))
}
PartialBuilderConfig::Server(partial) => {
let default = ServerBuilderConfig::default();
BuilderConfigDiff::Server(default.partial_diff(partial))
@@ -163,6 +196,7 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
impl MaybeNone for BuilderConfigDiff {
fn is_none(&self) -> bool {
match self {
BuilderConfigDiff::Url(config) => config.is_none(),
BuilderConfigDiff::Server(config) => config.is_none(),
BuilderConfigDiff::Aws(config) => config.is_none(),
}
@@ -172,6 +206,9 @@ impl MaybeNone for BuilderConfigDiff {
impl From<PartialBuilderConfig> for BuilderConfig {
fn from(value: PartialBuilderConfig) -> BuilderConfig {
match value {
PartialBuilderConfig::Url(server) => {
BuilderConfig::Url(server.into())
}
PartialBuilderConfig::Server(server) => {
BuilderConfig::Server(server.into())
}
@@ -185,6 +222,9 @@ impl From<PartialBuilderConfig> for BuilderConfig {
impl From<BuilderConfig> for PartialBuilderConfig {
fn from(value: BuilderConfig) -> Self {
match value {
BuilderConfig::Url(config) => {
PartialBuilderConfig::Url(config.into())
}
BuilderConfig::Server(config) => {
PartialBuilderConfig::Server(config.into())
}
@@ -202,6 +242,16 @@ impl MergePartial for BuilderConfig {
partial: PartialBuilderConfig,
) -> BuilderConfig {
match partial {
PartialBuilderConfig::Url(partial) => match self {
BuilderConfig::Url(config) => {
let config = UrlBuilderConfig {
address: partial.address.unwrap_or(config.address),
passkey: partial.passkey.unwrap_or(config.passkey),
};
BuilderConfig::Url(config)
}
_ => BuilderConfig::Url(partial.into()),
},
PartialBuilderConfig::Server(partial) => match self {
BuilderConfig::Server(config) => {
let config = ServerBuilderConfig {
@@ -252,6 +302,42 @@ impl MergePartial for BuilderConfig {
}
}
#[typeshare(serialized_as = "Partial<UrlBuilderConfig>")]
pub type _PartialUrlBuilderConfig = PartialUrlBuilderConfig;
/// Configuration for a Komodo Url Builder.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Builder, Partial)]
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
#[partial(skip_serializing_none, from, diff)]
pub struct UrlBuilderConfig {
/// The address of the Periphery agent
#[serde(default = "default_address")]
pub address: String,
/// A custom passkey to use. Otherwise, use the default passkey.
#[serde(default)]
pub passkey: String,
}
fn default_address() -> String {
String::from("https://periphery:8120")
}
impl Default for UrlBuilderConfig {
fn default() -> Self {
Self {
address: default_address(),
passkey: Default::default(),
}
}
}
impl UrlBuilderConfig {
pub fn builder() -> UrlBuilderConfigBuilder {
UrlBuilderConfigBuilder::default()
}
}
#[typeshare(serialized_as = "Partial<ServerBuilderConfig>")]
pub type _PartialServerBuilderConfig = PartialServerBuilderConfig;
@@ -264,11 +350,17 @@ pub type _PartialServerBuilderConfig = PartialServerBuilderConfig;
#[partial(skip_serializing_none, from, diff)]
pub struct ServerBuilderConfig {
/// The server id of the builder
#[serde(alias = "server")]
#[serde(default, alias = "server")]
#[partial_attr(serde(alias = "server"))]
pub server_id: String,
}
impl ServerBuilderConfig {
pub fn builder() -> ServerBuilderConfigBuilder {
ServerBuilderConfigBuilder::default()
}
}
#[typeshare(serialized_as = "Partial<AwsBuilderConfig>")]
pub type _PartialAwsBuilderConfig = PartialAwsBuilderConfig;

View File

@@ -108,8 +108,8 @@ pub struct Env {
pub komodo_oidc_enabled: Option<bool>,
/// Override `oidc_provider`
pub komodo_oidc_provider: Option<String>,
/// Override `oidc_redirect`
pub komodo_oidc_redirect: Option<String>,
/// Override `oidc_redirect_host`
pub komodo_oidc_redirect_host: Option<String>,
/// Override `oidc_client_id`
pub komodo_oidc_client_id: Option<String>,
/// Override `oidc_client_id` from file
@@ -325,18 +325,22 @@ pub struct CoreConfig {
/// Configure OIDC provider address for
/// communcation directly with Komodo Core.
///
/// Note. Needs to be reachable from Komodo Core.
/// Eg. `https://accounts.example.internal/application/o/komodo`
///
/// `https://accounts.example.internal/application/o/komodo`
#[serde(default)]
pub oidc_provider: String,
/// Configure OIDC user redirect address.
/// This is the address users are redirected to in their browser,
/// and may be different from `oidc_provider`.
/// If not provided, the `oidc_provider` will be used.
/// Eg. `https://accounts.example.external/application/o/komodo`
/// Configure OIDC user redirect host.
///
/// This is the host address users are redirected to in their browser,
/// and may be different from `oidc_provider` host.
/// DO NOT include the `path` part, this must be inferred.
/// If not provided, the host will be the same as `oidc_provider`.
/// Eg. `https://accounts.example.external`
#[serde(default)]
pub oidc_redirect: String,
pub oidc_redirect_host: String,
/// Set OIDC client id
#[serde(default)]
@@ -580,7 +584,7 @@ impl CoreConfig {
local_auth: config.local_auth,
oidc_enabled: config.oidc_enabled,
oidc_provider: config.oidc_provider,
oidc_redirect: config.oidc_redirect,
oidc_redirect_host: config.oidc_redirect_host,
oidc_client_id: empty_or_redacted(&config.oidc_client_id),
oidc_client_secret: empty_or_redacted(
&config.oidc_client_secret,

View File

@@ -41,6 +41,8 @@ pub struct DeploymentListItemInfo {
pub status: Option<String>,
/// The image attached to the deployment.
pub image: String,
/// Whether there is a newer image available at the same tag.
pub update_available: bool,
/// The server that deployment sits on.
pub server_id: String,
/// An attached Komodo Build, if it exists.
@@ -87,6 +89,19 @@ pub struct DeploymentConfig {
#[builder(default)]
pub redeploy_on_build: bool,
/// Whether to poll for any updates to the image.
#[serde(default)]
#[builder(default)]
pub poll_for_updates: bool,
/// Whether to automatically redeploy when
/// newer a image is found. Will implicitly
/// enable `poll_for_updates`, you don't need to
/// enable both.
#[serde(default)]
#[builder(default)]
pub auto_update: bool,
/// Whether to send ContainerStateChange alerts for this deployment.
#[serde(default = "default_send_alerts")]
#[builder(default = "default_send_alerts()")]
@@ -217,6 +232,8 @@ impl Default for DeploymentConfig {
image_registry_account: Default::default(),
skip_secret_interp: Default::default(),
redeploy_on_build: Default::default(),
poll_for_updates: Default::default(),
auto_update: Default::default(),
term_signal_labels: Default::default(),
termination_signal: Default::default(),
termination_timeout: default_termination_timeout(),
@@ -417,6 +434,7 @@ pub fn term_signal_labels_from_str(
#[typeshare]
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
pub struct DeploymentActionState {
pub pulling: bool,
pub deploying: bool,
pub starting: bool,
pub restarting: bool,

View File

@@ -46,7 +46,7 @@ pub mod logger;
pub mod permission;
/// Subtypes of [Procedure][procedure::Procedure].
pub mod procedure;
/// Subtypes of [ProviderAccount][provider::ProviderAccount]
/// Subtypes of [GitProviderAccount][provider::GitProviderAccount] and [DockerRegistryAccount][provider::DockerRegistryAccount]
pub mod provider;
/// Subtypes of [Repo][repo::Repo].
pub mod repo;
@@ -168,7 +168,7 @@ pub fn get_image_name(
pub fn to_komodo_name(name: &str) -> String {
name
.to_lowercase()
.replace([' ', '.'], "_")
.replace([' ', '.', ',', '\n'], "_")
.trim()
.to_string()
}
@@ -392,7 +392,7 @@ pub struct CloneArgs {
pub provider: String,
/// Use https (vs http).
pub https: bool,
/// Full repo identifier. <namespace>/<repo_name>
/// Full repo identifier. {namespace}/{repo_name}
pub repo: Option<String>,
/// Git Branch. Default: `main`
pub branch: String,
@@ -677,6 +677,7 @@ pub enum Operation {
DeleteStack,
WriteStackContents,
RefreshStackCache,
PullStack,
DeployStack,
StartStack,
RestartStack,
@@ -686,11 +687,14 @@ pub enum Operation {
DestroyStack,
// stack (service)
DeployStackService,
PullStackService,
StartStackService,
RestartStackService,
PauseStackService,
UnpauseStackService,
StopStackService,
DestroyStackService,
// deployment
CreateDeployment,
@@ -698,6 +702,7 @@ pub enum Operation {
RenameDeployment,
DeleteDeployment,
Deploy,
PullDeployment,
StartDeployment,
RestartDeployment,
PauseDeployment,

View File

@@ -87,7 +87,7 @@ pub struct DockerRegistryAccount {
///
/// For docker registry, this can include 'http://...',
/// however this is not recommended and won't work unless "insecure registries" are enabled
/// on your hosts. See [https://docs.docker.com/reference/cli/dockerd/#insecure-registries].
/// on your hosts. See <https://docs.docker.com/reference/cli/dockerd/#insecure-registries>.
#[cfg_attr(feature = "mongo", index)]
#[serde(default = "default_registry_domain")]
#[partial_default(default_registry_domain())]

View File

@@ -27,6 +27,8 @@ pub struct ServerListItemInfo {
pub state: ServerState,
/// Region of the server.
pub region: String,
/// Address of the server.
pub address: String,
/// Whether server is configured to send unreachable alerts.
pub send_unreachable_alerts: bool,
/// Whether server is configured to send cpu alerts.

View File

@@ -127,7 +127,7 @@ apt upgrade -y
curl -fsSL https://get.docker.com | sh
systemctl enable docker.service
systemctl enable containerd.service
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | HOME=/root python3
systemctl enable periphery.service")
}

View File

@@ -121,7 +121,7 @@ runcmd:
- curl -fsSL https://get.docker.com | sh
- systemctl enable docker.service
- systemctl enable containerd.service
- curl -sSL 'https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py' | python3
- curl -sSL 'https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py' | HOME=/root python3
- systemctl enable periphery.service")
}

View File

@@ -11,6 +11,7 @@ use typeshare::typeshare;
use crate::deserializers::{
env_vars_deserializer, file_contents_deserializer,
option_env_vars_deserializer, option_file_contents_deserializer,
option_maybe_string_i64_deserializer,
option_string_list_deserializer, string_list_deserializer,
};
@@ -77,10 +78,10 @@ pub struct StackListItemInfo {
pub state: StackState,
/// A string given by docker conveying the status of the stack.
pub status: Option<String>,
/// The service names that are part of the stack.
/// The services that are part of the stack.
/// If deployed, will be `deployed_services`.
/// Otherwise, its `latest_services`
pub services: Vec<String>,
pub services: Vec<StackServiceWithUpdate>,
/// Whether the compose project is missing on the host.
/// Ie, it does not show up in `docker compose ls`.
/// If true, and the stack is not Down, this is an unhealthy state.
@@ -94,6 +95,16 @@ pub struct StackListItemInfo {
pub latest_hash: Option<String>,
}
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StackServiceWithUpdate {
pub service: String,
/// The service's image
pub image: String,
/// Whether there is a newer image available for this service
pub update_available: bool,
}
#[typeshare]
#[derive(
Debug,
@@ -223,6 +234,19 @@ pub struct StackConfig {
#[builder(default)]
pub run_build: bool,
/// Whether to poll for any updates to the images.
#[serde(default)]
#[builder(default)]
pub poll_for_updates: bool,
/// Whether to automatically redeploy when
/// newer images are found. Will implicitly
/// enable `poll_for_updates`, you don't need to
/// enable both.
#[serde(default)]
#[builder(default)]
pub auto_update: bool,
/// Whether to run `docker compose down` before `compose up`.
#[serde(default)]
#[builder(default)]
@@ -461,6 +485,8 @@ impl Default for StackConfig {
registry_account: Default::default(),
file_contents: Default::default(),
auto_pull: default_auto_pull(),
poll_for_updates: Default::default(),
auto_update: Default::default(),
ignore_services: Default::default(),
pre_deploy: Default::default(),
extra_args: Default::default(),
@@ -520,6 +546,9 @@ pub struct StackServiceNames {
/// This stores only 1. and 2., ie stacko-mongo.
/// Containers will be matched via regex like `^container_name-?[0-9]*$``
pub container_name: String,
/// The services image.
#[serde(default)]
pub image: String,
}
#[typeshare]
@@ -527,13 +556,18 @@ pub struct StackServiceNames {
pub struct StackService {
/// The service name
pub service: String,
/// The service image
pub image: String,
/// The container
pub container: Option<ContainerListItem>,
/// Whether there is an update available for this services image.
pub update_available: bool,
}
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]
pub struct StackActionState {
pub pulling: bool,
pub deploying: bool,
pub starting: bool,
pub restarting: bool,
@@ -563,8 +597,8 @@ impl super::resource::AddFilters for StackQuerySpecifics {
}
}
/// Keeping this minimal for now as its only needed to parse the service names / container names
#[typeshare]
/// Keeping this minimal for now as its only needed to parse the service names / container names,
/// and replica count. Not a typeshared type.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeFile {
/// If not provided, will default to the parent folder holding the compose file.
@@ -573,9 +607,18 @@ pub struct ComposeFile {
pub services: HashMap<String, ComposeService>,
}
#[typeshare]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeService {
pub image: Option<String>,
pub container_name: Option<String>,
pub deploy: Option<ComposeServiceDeploy>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeServiceDeploy {
#[serde(
default,
deserialize_with = "option_maybe_string_i64_deserializer"
)]
pub replicas: Option<i64>,
}

View File

@@ -55,13 +55,13 @@ pub fn komodo_client() -> &'static KomodoClient {
/// Default environment variables for the [KomodoClient].
#[derive(Deserialize)]
struct KomodoEnv {
pub struct KomodoEnv {
/// KOMODO_ADDRESS
komodo_address: String,
pub komodo_address: String,
/// KOMODO_API_KEY
komodo_api_key: String,
pub komodo_api_key: String,
/// KOMODO_API_SECRET
komodo_api_secret: String,
pub komodo_api_secret: String,
}
/// Client to interface with [Komodo](https://komo.do/docs/api#rust-client)

View File

@@ -1,6 +1,6 @@
{
"name": "komodo_client",
"version": "1.16.4",
"version": "1.16.9",
"description": "Komodo client package",
"homepage": "https://komo.do",
"main": "dist/lib.js",

View File

@@ -219,6 +219,7 @@ export type WriteResponses = {
// ==== DEPLOYMENT ====
CreateDeployment: Types.Deployment;
CopyDeployment: Types.Deployment;
CreateDeploymentFromContainer: Types.Deployment;
DeleteDeployment: Types.Deployment;
UpdateDeployment: Types.Deployment;
RenameDeployment: Types.Update;
@@ -349,28 +350,37 @@ export type ExecuteResponses = {
// ==== DEPLOYMENT ====
Deploy: Types.Update;
BatchDeploy: Types.BatchExecutionResponse;
PullDeployment: Types.Update;
StartDeployment: Types.Update;
RestartDeployment: Types.Update;
PauseDeployment: Types.Update;
UnpauseDeployment: Types.Update;
StopDeployment: Types.Update;
DestroyDeployment: Types.Update;
BatchDestroyDeployment: Types.BatchExecutionResponse;
// ==== BUILD ====
RunBuild: Types.Update;
BatchRunBuild: Types.BatchExecutionResponse;
CancelBuild: Types.Update;
// ==== REPO ====
CloneRepo: Types.Update;
BatchCloneRepo: Types.BatchExecutionResponse;
PullRepo: Types.Update;
BatchPullRepo: Types.BatchExecutionResponse;
BuildRepo: Types.Update;
BatchBuildRepo: Types.BatchExecutionResponse;
CancelRepoBuild: Types.Update;
// ==== PROCEDURE ====
RunProcedure: Types.Update;
BatchRunProcedure: Types.BatchExecutionResponse;
// ==== ACTION ====
RunAction: Types.Update;
BatchRunAction: Types.BatchExecutionResponse;
// ==== SERVER TEMPLATE ====
LaunchServer: Types.Update;
@@ -380,13 +390,17 @@ export type ExecuteResponses = {
// ==== STACK ====
DeployStack: Types.Update;
BatchDeployStack: Types.BatchExecutionResponse;
DeployStackIfChanged: Types.Update;
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
PullStack: Types.Update;
StartStack: Types.Update;
RestartStack: Types.Update;
StopStack: Types.Update;
PauseStack: Types.Update;
UnpauseStack: Types.Update;
DestroyStack: Types.Update;
BatchDestroyStack: Types.BatchExecutionResponse;
// ==== STACK Service ====
DeployStackService: Types.Update;

View File

@@ -1,5 +1,5 @@
/*
Generated by typeshare 1.11.0
Generated by typeshare 1.12.0
*/
export interface MongoIdObj {
@@ -198,6 +198,12 @@ export interface AlerterQuerySpecifics {
export type AlerterQuery = ResourceQuery<AlerterQuerySpecifics>;
export type BatchExecutionResponseItem =
| { status: "Ok", data: Update }
| { status: "Err", data: BatchExecutionResponseItemErr };
export type BatchExecutionResponse = BatchExecutionResponseItem[];
export interface Version {
major: number;
minor: number;
@@ -313,10 +319,10 @@ export interface BuildConfig {
* Secret arguments.
*
* These values remain hidden in the final image by using
* docker secret mounts. See [https://docs.docker.com/build/building/secrets].
* docker secret mounts. See <https://docs.docker.com/build/building/secrets>.
*
* The values can be used in RUN commands:
* ```
* ```sh
* RUN --mount=type=secret,id=SECRET_KEY \
* SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
* ```
@@ -389,9 +395,11 @@ export interface BuildQuerySpecifics {
export type BuildQuery = ResourceQuery<BuildQuerySpecifics>;
export type BuilderConfig =
/** Use a connected server an image builder. */
/** Use a Periphery address as a Builder. */
| { type: "Url", params: UrlBuilderConfig }
/** Use a connected server as a Builder. */
| { type: "Server", params: ServerBuilderConfig }
/** Use EC2 instances spawned on demand as an image builder. */
/** Use EC2 instances spawned on demand as a Builder. */
| { type: "Aws", params: AwsBuilderConfig };
export type Builder = Resource<BuilderConfig, undefined>;
@@ -418,19 +426,28 @@ export type Execution =
/** The "null" execution. Does nothing. */
| { type: "None", params: NoData }
| { type: "RunAction", params: RunAction }
| { type: "BatchRunAction", params: BatchRunAction }
| { type: "RunProcedure", params: RunProcedure }
| { type: "BatchRunProcedure", params: BatchRunProcedure }
| { type: "RunBuild", params: RunBuild }
| { type: "BatchRunBuild", params: BatchRunBuild }
| { type: "CancelBuild", params: CancelBuild }
| { type: "Deploy", params: Deploy }
| { type: "BatchDeploy", params: BatchDeploy }
| { type: "PullDeployment", params: PullDeployment }
| { type: "StartDeployment", params: StartDeployment }
| { type: "RestartDeployment", params: RestartDeployment }
| { type: "PauseDeployment", params: PauseDeployment }
| { type: "UnpauseDeployment", params: UnpauseDeployment }
| { type: "StopDeployment", params: StopDeployment }
| { type: "DestroyDeployment", params: DestroyDeployment }
| { type: "BatchDestroyDeployment", params: BatchDestroyDeployment }
| { type: "CloneRepo", params: CloneRepo }
| { type: "BatchCloneRepo", params: BatchCloneRepo }
| { type: "PullRepo", params: PullRepo }
| { type: "BatchPullRepo", params: BatchPullRepo }
| { type: "BuildRepo", params: BuildRepo }
| { type: "BatchBuildRepo", params: BatchBuildRepo }
| { type: "CancelRepoBuild", params: CancelRepoBuild }
| { type: "StartContainer", params: StartContainer }
| { type: "RestartContainer", params: RestartContainer }
@@ -456,13 +473,17 @@ export type Execution =
| { type: "RunSync", params: RunSync }
| { type: "CommitSync", params: CommitSync }
| { type: "DeployStack", params: DeployStack }
| { type: "BatchDeployStack", params: BatchDeployStack }
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
| { type: "PullStack", params: PullStack }
| { type: "StartStack", params: StartStack }
| { type: "RestartStack", params: RestartStack }
| { type: "PauseStack", params: PauseStack }
| { type: "UnpauseStack", params: UnpauseStack }
| { type: "StopStack", params: StopStack }
| { type: "DestroyStack", params: DestroyStack }
| { type: "BatchDestroyStack", params: BatchDestroyStack }
| { type: "Sleep", params: Sleep };
/** Allows to enable / disabled procedures in the sequence / parallel vec on the fly */
@@ -540,7 +561,7 @@ export interface DockerRegistryAccount {
*
* For docker registry, this can include 'http://...',
* however this is not recommended and won't work unless "insecure registries" are enabled
* on your hosts. See [https://docs.docker.com/reference/cli/dockerd/#insecure-registries].
* on your hosts. See <https://docs.docker.com/reference/cli/dockerd/#insecure-registries>.
*/
domain: string;
/** The account username */
@@ -762,6 +783,13 @@ export interface DeploymentConfig {
skip_secret_interp?: boolean;
/** Whether to redeploy the deployment whenever the attached build finishes. */
redeploy_on_build?: boolean;
/** Whether to poll for any updates to the image. */
poll_for_updates?: boolean;
/**
* Whether to automatically redeploy when a
* newer image is found.
*/
auto_update?: boolean;
/** Whether to send ContainerStateChange alerts for this deployment. */
send_alerts: boolean;
/** Configure quick links that are displayed in the resource header */
@@ -840,6 +868,8 @@ export interface DeploymentListItemInfo {
status?: string;
/** The image attached to the deployment. */
image: string;
/** Whether there is a newer image available at the same tag. */
update_available: boolean;
/** The server that deployment sits on. */
server_id: string;
/** An attached Komodo Build, if it exists. */
@@ -957,6 +987,19 @@ export type AlertData =
from: DeploymentState;
/** The current container state */
to: DeploymentState;
}}
/** A Deployment has an image update available */
| { type: "DeploymentImageUpdateAvailable", data: {
/** The id of the deployment */
id: string;
/** The name of the deployment */
name: string;
/** The server id of server that the deployment is on */
server_id: string;
/** The server name */
server_name: string;
/** The image with update */
image: string;
}}
/** A stack's state has changed unexpectedly. */
| { type: "StackStateChange", data: {
@@ -972,6 +1015,21 @@ export type AlertData =
from: StackState;
/** The current stack state */
to: StackState;
}}
/** A Stack has an image update available */
| { type: "StackImageUpdateAvailable", data: {
/** The id of the stack */
id: string;
/** The name of the stack */
name: string;
/** The server id of server that the stack is on */
server_id: string;
/** The server name */
server_name: string;
/** The service name to update */
service: string;
/** The image with update */
image: string;
}}
/** An AWS builder failed to terminate. */
| { type: "AwsBuilderTerminationFailed", data: {
@@ -1061,6 +1119,7 @@ export interface Log {
export type GetContainerLogResponse = Log;
export interface DeploymentActionState {
pulling: boolean;
deploying: boolean;
starting: boolean;
restarting: boolean;
@@ -1450,6 +1509,7 @@ export type ServerTemplate = Resource<ServerTemplateConfig, undefined>;
export type GetServerTemplateResponse = ServerTemplate;
export interface StackActionState {
pulling: boolean;
deploying: boolean;
starting: boolean;
restarting: boolean;
@@ -1486,6 +1546,13 @@ export interface StackConfig {
* Combine with build_extra_args for custom behaviors.
*/
run_build?: boolean;
/** Whether to poll for any updates to the images. */
poll_for_updates?: boolean;
/**
* Whether to automatically redeploy when a
* newer images are found.
*/
auto_update?: boolean;
/** Whether to run `docker compose down` before `compose up`. */
destroy_before_deploy?: boolean;
/** Whether to skip secret interpolation into the stack environment variables. */
@@ -1624,6 +1691,8 @@ export interface StackServiceNames {
* Containers will be matched via regex like `^container_name-?[0-9]*$``
*/
container_name: string;
/** The services image. */
image?: string;
}
export interface StackInfo {
@@ -1803,6 +1872,7 @@ export enum Operation {
DeleteStack = "DeleteStack",
WriteStackContents = "WriteStackContents",
RefreshStackCache = "RefreshStackCache",
PullStack = "PullStack",
DeployStack = "DeployStack",
StartStack = "StartStack",
RestartStack = "RestartStack",
@@ -1810,16 +1880,20 @@ export enum Operation {
UnpauseStack = "UnpauseStack",
StopStack = "StopStack",
DestroyStack = "DestroyStack",
DeployStackService = "DeployStackService",
PullStackService = "PullStackService",
StartStackService = "StartStackService",
RestartStackService = "RestartStackService",
PauseStackService = "PauseStackService",
UnpauseStackService = "UnpauseStackService",
StopStackService = "StopStackService",
DestroyStackService = "DestroyStackService",
CreateDeployment = "CreateDeployment",
UpdateDeployment = "UpdateDeployment",
RenameDeployment = "RenameDeployment",
DeleteDeployment = "DeleteDeployment",
Deploy = "Deploy",
PullDeployment = "PullDeployment",
StartDeployment = "StartDeployment",
RestartDeployment = "RestartDeployment",
PauseDeployment = "PauseDeployment",
@@ -3142,6 +3216,8 @@ export interface ServerListItemInfo {
state: ServerState;
/** Region of the server. */
region: string;
/** Address of the server. */
address: string;
/** Whether server is configured to send unreachable alerts. */
send_unreachable_alerts: boolean;
/** Whether server is configured to send cpu alerts. */
@@ -3159,8 +3235,12 @@ export type ListServersResponse = ServerListItem[];
export interface StackService {
/** The service name */
service: string;
/** The service image */
image: string;
/** The container */
container?: ContainerListItem;
/** Whether there is an update available for this services image. */
update_available: boolean;
}
export type ListStackServicesResponse = StackService[];
@@ -3188,6 +3268,14 @@ export enum StackState {
Unknown = "unknown",
}
export interface StackServiceWithUpdate {
service: string;
/** The service's image */
image: string;
/** Whether there is a newer image available for this service */
update_available: boolean;
}
export interface StackListItemInfo {
/** The server that stack is deployed on. */
server_id: string;
@@ -3206,11 +3294,11 @@ export interface StackListItemInfo {
/** A string given by docker conveying the status of the stack. */
status?: string;
/**
* The service names that are part of the stack.
* The services that are part of the stack.
* If deployed, will be `deployed_services`.
* Otherwise, its `latest_services`
*/
services: string[];
services: StackServiceWithUpdate[];
/**
* Whether the compose project is missing on the host.
* Ie, it does not show up in `docker compose ls`.
@@ -3387,6 +3475,8 @@ export type _PartialStackConfig = Partial<StackConfig>;
export type _PartialTag = Partial<Tag>;
export type _PartialUrlBuilderConfig = Partial<UrlBuilderConfig>;
export interface __Serror {
error: string;
trace: string[];
@@ -3512,6 +3602,198 @@ export interface AwsServerTemplateConfig {
user_data: string;
}
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchBuildRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchCloneRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeploy {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeployStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeployStackIfChanged {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDestroyDeployment {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDestroyStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
* d
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
export interface BatchExecutionResponseItemErr {
name: string;
error: _Serror;
}
/** Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchPullRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResponse] */
export interface BatchRunAction {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* actions
* foo-*
* # add some more
* extra-action-1, extra-action-2
* ```
*/
pattern: string;
}
/** Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchRunBuild {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* builds
* foo-*
* # add some more
* extra-build-1, extra-build-2
* ```
*/
pattern: string;
}
/** Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchRunProcedure {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* procedures
* foo-*
* # add some more
* extra-procedure-1, extra-procedure-2
* ```
*/
pattern: string;
}
/**
* Builds the target repo, using the attached builder. Response: [Update].
*
@@ -3563,7 +3845,7 @@ export interface CloneArgs {
provider: string;
/** Use https (vs http). */
https: boolean;
/** Full repo identifier. <namespace>/<repo_name> */
/** Full repo identifier. {namespace}/{repo_name} */
repo?: string;
/** Git Branch. Default: `main` */
branch: string;
@@ -3605,18 +3887,6 @@ export interface CommitSync {
sync: string;
}
export interface ComposeService {
image?: string;
container_name?: string;
}
/** Keeping this minimal for now as its only needed to parse the service names / container names */
export interface ComposeFile {
/** If not provided, will default to the parent folder holding the compose file. */
name?: string;
services?: Record<string, ComposeService>;
}
export interface Conversion {
/** reference on the server. */
local: string;
@@ -3811,6 +4081,7 @@ export interface CreateBuildWebhook {
/** Partial representation of [BuilderConfig] */
export type PartialBuilderConfig =
| { type: "Url", params: _PartialUrlBuilderConfig }
| { type: "Server", params: _PartialServerBuilderConfig }
| { type: "Aws", params: _PartialAwsBuilderConfig };
@@ -3830,6 +4101,14 @@ export interface CreateDeployment {
config?: _PartialDeploymentConfig;
}
/** Create a Deployment from an existing container. Response: [Deployment]. */
export interface CreateDeploymentFromContainer {
/** The name or id of the existing container. */
name: string;
/** The server id or name on which container exists. */
server: string;
}
/**
* **Admin only.** Create a docker registry account.
* Response: [DockerRegistryAccount].
@@ -4116,7 +4395,7 @@ export interface DeleteDockerRegistryAccount {
/**
* **Admin only.** Delete a git provider account.
* Response: [User].
* Response: [DeleteGitProviderAccountResponse].
*/
export interface DeleteGitProviderAccount {
/** The id of the git provider to delete */
@@ -4305,6 +4584,8 @@ export interface Deploy {
export interface DeployStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to "compose up" */
service?: string;
/**
* Override the default termination max time.
* Only used if the stack needs to be taken down first.
@@ -4363,6 +4644,8 @@ export interface DestroyDeployment {
export interface DestroyStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to destroy */
service?: string;
/** Pass `--remove-orphans` */
remove_orphans?: boolean;
/** Override the default termination max time. */
@@ -4695,7 +4978,7 @@ export interface GetDeploymentLog {
/**
* Get the deployment container's stats using `docker stats`.
* Response: [DockerContainerStats].
* Response: [GetDeploymentStatsResponse].
*
* Note. This call will hit the underlying server directly for most up to date stats.
*/
@@ -4925,7 +5208,7 @@ export interface GetReposSummaryResponse {
unknown: number;
}
/** Inspect a docker container on the server. Response: [Container]. */
/** Find the attached resource for a container. Either Deployment or Stack. Response: [GetResourceMatchingContainerResponse]. */
export interface GetResourceMatchingContainer {
/** Id or name */
server: string;
@@ -4933,6 +5216,7 @@ export interface GetResourceMatchingContainer {
container: string;
}
/** Response for [GetResourceMatchingContainer]. Resource is either Deployment, Stack, or None. */
export interface GetResourceMatchingContainerResponse {
resource?: ResourceTarget;
}
@@ -5046,7 +5330,7 @@ export interface GetStackActionState {
stack: string;
}
/** Get a stack service's log. Response: [GetStackContainersResponse]. */
/** Get a stack service's log. Response: [GetStackServiceLogResponse]. */
export interface GetStackServiceLog {
/** Id or name */
stack: string;
@@ -5462,7 +5746,7 @@ export interface ListApiKeysForServiceUser {
/**
* Retrieve versions of the build that were built in the past and available for deployment,
* sorted by most recent first.
* Response: [GetBuildVersionsResponse].
* Response: [ListBuildVersionsResponse].
*/
export interface ListBuildVersions {
/** Id or name */
@@ -5593,7 +5877,7 @@ export interface ListDockerRegistriesFromConfig {
/**
* List docker registry accounts matching optional query.
* Response: [ListDockerRegistrysResponse].
* Response: [ListDockerRegistryAccountsResponse].
*/
export interface ListDockerRegistryAccounts {
/** Optionally filter by accounts with a specific domain. */
@@ -5680,7 +5964,7 @@ export interface ListFullStacks {
/**
* List git provider accounts matching optional query.
* Response: [ListGitProvidersResponse].
* Response: [ListGitProviderAccountsResponse].
*/
export interface ListGitProviderAccounts {
/** Optionally filter by accounts with a specific domain. */
@@ -6041,6 +6325,12 @@ export interface PruneVolumes {
server: string;
}
/** Pulls the image for the target deployment. Response: [Update] */
export interface PullDeployment {
/** Name or id */
deployment: string;
}
/**
* Pulls the target repo. Response: [Update].
*
@@ -6054,6 +6344,14 @@ export interface PullRepo {
repo: string;
}
/** Pulls images for the target stack. `docker compose pull`. Response: [Update] */
export interface PullStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to start */
service?: string;
}
/**
* Push a resource to the front of the users 10 most recently viewed resources.
* Response: [NoData].
@@ -6452,7 +6750,7 @@ export interface SearchStackServiceLog {
/** Configuration for a Komodo Server Builder. */
export interface ServerBuilderConfig {
/** The server id of the builder */
server_id: string;
server_id?: string;
}
/** The health of a part of the server. */
@@ -6977,6 +7275,14 @@ export interface UpdateVariableValue {
value: string;
}
/** Configuration for a Komodo Url Builder. */
export interface UrlBuilderConfig {
/** The address of the Periphery agent */
address: string;
/** A custom passkey to use. Otherwise, use the default passkey. */
passkey?: string;
}
/** Update file contents in Files on Server or Git Repo mode. Response: [Update]. */
export interface WriteStackFileContents {
/** The name or id of the target Stack. */
@@ -7035,28 +7341,41 @@ export type ExecuteRequest =
| { type: "PruneBuildx", params: PruneBuildx }
| { type: "PruneSystem", params: PruneSystem }
| { type: "Deploy", params: Deploy }
| { type: "BatchDeploy", params: BatchDeploy }
| { type: "PullDeployment", params: PullDeployment }
| { type: "StartDeployment", params: StartDeployment }
| { type: "RestartDeployment", params: RestartDeployment }
| { type: "PauseDeployment", params: PauseDeployment }
| { type: "UnpauseDeployment", params: UnpauseDeployment }
| { type: "StopDeployment", params: StopDeployment }
| { type: "DestroyDeployment", params: DestroyDeployment }
| { type: "BatchDestroyDeployment", params: BatchDestroyDeployment }
| { type: "DeployStack", params: DeployStack }
| { type: "BatchDeployStack", params: BatchDeployStack }
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
| { type: "PullStack", params: PullStack }
| { type: "StartStack", params: StartStack }
| { type: "RestartStack", params: RestartStack }
| { type: "StopStack", params: StopStack }
| { type: "PauseStack", params: PauseStack }
| { type: "UnpauseStack", params: UnpauseStack }
| { type: "DestroyStack", params: DestroyStack }
| { type: "BatchDestroyStack", params: BatchDestroyStack }
| { type: "RunBuild", params: RunBuild }
| { type: "BatchRunBuild", params: BatchRunBuild }
| { type: "CancelBuild", params: CancelBuild }
| { type: "CloneRepo", params: CloneRepo }
| { type: "BatchCloneRepo", params: BatchCloneRepo }
| { type: "PullRepo", params: PullRepo }
| { type: "BatchPullRepo", params: BatchPullRepo }
| { type: "BuildRepo", params: BuildRepo }
| { type: "BatchBuildRepo", params: BatchBuildRepo }
| { type: "CancelRepoBuild", params: CancelRepoBuild }
| { type: "RunProcedure", params: RunProcedure }
| { type: "BatchRunProcedure", params: BatchRunProcedure }
| { type: "RunAction", params: RunAction }
| { type: "BatchRunAction", params: BatchRunAction }
| { type: "LaunchServer", params: LaunchServer }
| { type: "RunSync", params: RunSync };
@@ -7220,6 +7539,7 @@ export type WriteRequest =
| { type: "CreateNetwork", params: CreateNetwork }
| { type: "CreateDeployment", params: CreateDeployment }
| { type: "CopyDeployment", params: CopyDeployment }
| { type: "CreateDeploymentFromContainer", params: CreateDeploymentFromContainer }
| { type: "DeleteDeployment", params: DeleteDeployment }
| { type: "UpdateDeployment", params: UpdateDeployment }
| { type: "RenameDeployment", params: RenameDeployment }

View File

@@ -122,7 +122,29 @@ pub struct WriteCommitComposeContents {
//
/// Rewrites the compose directory, pulls any images, takes down existing containers,
/// and runs docker compose up.
/// and runs docker compose up. Response: [ComposePullResponse]
#[derive(Debug, Clone, Serialize, Deserialize, Request)]
#[response(ComposePullResponse)]
pub struct ComposePull {
/// The stack to deploy
pub stack: Stack,
/// Only deploy one service
pub service: Option<String>,
/// If provided, use it to login in. Otherwise check periphery local registries.
pub git_token: Option<String>,
/// If provided, use it to login in. Otherwise check periphery local git providers.
pub registry_token: Option<String>,
}
/// Response for [ComposePull]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposePullResponse {
pub logs: Vec<Log>,
}
//
/// docker compose up.
#[derive(Debug, Clone, Serialize, Deserialize, Request)]
#[response(ComposeUpResponse)]
pub struct ComposeUp {

View File

@@ -23,6 +23,19 @@ pub struct ImageHistory {
//
#[derive(Debug, Clone, Serialize, Deserialize, Request)]
#[response(Log)]
pub struct PullImage {
/// The name of the image.
pub name: String,
/// Optional account to use to pull the image
pub account: Option<String>,
/// Override registry token for account with one sent from core.
pub token: Option<String>,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Request)]
#[response(Log)]
pub struct DeleteImage {

View File

@@ -78,8 +78,9 @@ KOMODO_JWT_TTL="1-day"
KOMODO_OIDC_ENABLED=false
## Must reachable from Komodo Core container
# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo
## Must be reachable by users (optional if it is the same as above).
# KOMODO_OIDC_REDIRECT=https://oidc.provider.external/application/o/komodo
## Change the host to one reachable be reachable by users (optional if it is the same as above).
## DO NOT include the `path` part of the URL.
# KOMODO_OIDC_REDIRECT_HOST=https://oidc.provider.external
## Your client credentials
# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE
# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE

View File

@@ -152,15 +152,18 @@ oidc_enabled = false
## Optional, no default.
oidc_provider = "https://oidc.provider.internal/application/o/komodo"
## Configure OIDC user redirect address.
## Configure OIDC user redirect host.
##
## This is the address users are redirected to in their browser,
## and may be different from `oidc_provider` depending on your networking.
## This is the host address users are redirected to in their browser,
## and may be different from `oidc_provider` host depending on your networking.
## If not provided (or empty string ""), the `oidc_provider` will be used.
##
## Env: KOMODO_OIDC_REDIRECT
## Note. DO NOT include the `path` part of the URL.
## Example: `https://oidc.provider.external`
##
## Env: KOMODO_OIDC_REDIRECT_HOST
## Optional, no default.
oidc_redirect = ""
oidc_redirect_host = ""
## Give the OIDC Client ID.
## Env: KOMODO_OIDC_CLIENT_ID or KOMODO_OIDC_CLIENT_ID_FILE

View File

@@ -22,7 +22,7 @@ apt upgrade -y
curl -fsSL https://get.docker.com | sh
systemctl enable docker.service
systemctl enable containerd.service
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | HOME=/root python3
systemctl enable periphery.service
```

View File

@@ -0,0 +1,86 @@
# Procedures and Actions
For orchestrations involving multiple Resources, Komodo offers the `Procedure` and `Action` resource types.
## Procedures
`Procedures` are compositions of many executions, such as `RunBuild` and `DeployStack`.
The executions are grouped into a series of `Stages`, where each `Stage` contains one or more executions
to run **_all at once_**. The Procedure will wait until all of the executions in a `Stage` are complete before moving
on to the next stage. In short, the executions in a `Stage` are run **_in parallel_**, and the stages themselves are
executed **_sequentially_**.
### Batch Executions
Many executions have a `Batch` version you can select, for example [**BatchDeployStackIfChanged**](https://docs.rs/komodo_client/latest/komodo_client/api/execute/struct.BatchDeployStackIfChanged.html). With this, you can match multiple Stacks by name
using [**wildcard syntax**](https://docs.rs/wildcard/latest/wildcard) and [**regex**](https://docs.rs/regex/latest/regex).
### TOML Example
Like all Resources, `Procedures` have a TOML representation, and can be managed in `ResourceSyncs`.
```toml
[[procedure]]
name = "pull-deploy"
description = "Pulls stack-repo, deploys stacks"
[[procedure.config.stage]]
name = "Pull Repo"
executions = [
{ execution.type = "PullRepo", execution.params.pattern = "stack-repo" },
]
[[procedure.config.stage]]
name = "Deploy if changed"
executions = [
# Uses the Batch version, witch matches many stacks by pattern
# This one matches all stacks prefixed with `foo-` (wildcard) and `bar-` (regex).
{ execution.type = "BatchDeployStackIfChanged", execution.params.pattern = "foo-* , \\^bar-.*$\\" },
]
```
## Actions
`Actions` give users the power of Typescript to write calls to the Komodo API.
For example, an `Action` script like this will align the versions and branches of many `Builds`.
```ts
const VERSION = "1.16.5";
const BRANCH = "dev/" + VERSION;
const APPS = ["core", "periphery"];
const ARCHS = ["x86", "aarch64"];
await komodo.write("UpdateVariableValue", {
name: "KOMODO_DEV_VERSION",
value: VERSION,
});
console.log("Updated KOMODO_DEV_VERSION to " + VERSION);
for (const app of APPS) {
for (const arch of ARCHS) {
const name = `komodo-${app}-${arch}-dev`;
await komodo.write("UpdateBuild", {
id: name,
config: {
version: VERSION as any,
branch: BRANCH,
},
});
console.log(
`Updated Build ${name} to version ${VERSION} and branch ${BRANCH}`,
);
}
}
for (const arch of ARCHS) {
const name = `periphery-bin-${arch}-dev`;
await komodo.write("UpdateRepo", {
id: name,
config: {
branch: BRANCH,
},
});
console.log(`Updated Repo ${name} to branch ${BRANCH}`);
}
```

View File

@@ -23,9 +23,10 @@ automatically execute syncs upon pushes to the configured branch.
name = "server-prod"
description = "the prod server"
tags = ["prod"]
config.address = "http://localhost:8120"
config.region = "AshburnDc1"
config.enabled = true # default: false
[server.config]
address = "http://localhost:8120"
region = "AshburnDc1"
enabled = true # default: false
```
### Builder and build
@@ -38,14 +39,15 @@ config.enabled = true # default: false
name = "builder-01"
tags = []
config.type = "Aws"
config.params.region = "us-east-2"
config.params.ami_id = "ami-0e9bd154667944680"
[builder.config.params]
region = "us-east-2"
ami_id = "ami-0e9bd154667944680"
# These things come from your specific setup
config.params.subnet_id = "subnet-xxxxxxxxxxxxxxxxxx"
config.params.key_pair_name = "xxxxxxxx"
config.params.assign_public_ip = true
config.params.use_public_ip = true
config.params.security_group_ids = [
subnet_id = "subnet-xxxxxxxxxxxxxxxxxx"
key_pair_name = "xxxxxxxx"
assign_public_ip = true
use_public_ip = true
security_group_ids = [
"sg-xxxxxxxxxxxxxxxxxx",
"sg-xxxxxxxxxxxxxxxxxx"
]
@@ -56,19 +58,21 @@ config.params.security_group_ids = [
name = "test_logger"
description = "Logs randomly at INFO, WARN, ERROR levels to test logging setups"
tags = ["test"]
config.builder_id = "builder-01"
config.repo = "mbecker20/test_logger"
config.branch = "master"
config.git_account = "mbecker20"
config.image_registry.type = "Standard"
config.image_registry.params.domain = "github.com" # or your custom domain
config.image_registry.params.account = "your_username"
config.image_registry.params.organization = "your_organization" # optinoal
[build.config]
builder_id = "builder-01"
repo = "mbecker20/test_logger"
branch = "master"
git_account = "mbecker20"
image_registry.type = "Standard"
image_registry.params.domain = "github.com" # or your custom domain
image_registry.params.account = "your_username"
image_registry.params.organization = "your_organization" # optinoal
# Set docker labels
config.labels = """
labels = """
org.opencontainers.image.source = https://github.com/mbecker20/test_logger
org.opencontainers.image.description = Logs randomly at INFO, WARN, ERROR levels to test logging setups
org.opencontainers.image.licenses = GPL-3.0"""
org.opencontainers.image.licenses = GPL-3.0
"""
```
### Deployments
@@ -76,7 +80,8 @@ org.opencontainers.image.licenses = GPL-3.0"""
- [Deployment config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/deployment/struct.DeploymentConfig.html)
```toml
[[variable]] # Declare variables
# Declare variables
[[variable]]
name = "OTLP_ENDPOINT"
value = "http://localhost:4317"
@@ -91,20 +96,26 @@ tags = ["test"]
# - has relevant config updates.
# - the attached build has new version.
deploy = true
config.server_id = "server-01"
config.image.type = "Build"
config.image.params.build = "test_logger"
[deployment.config]
server_id = "server-01"
image.type = "Build"
image.params.build = "test_logger"
# set the volumes / bind mounts
config.volumes = """
volumes = """
# Supports comments
/data/logs = /etc/logs
/data/config = /etc/config"""
# And other formats (eg yaml list)
- "/data/config:/etc/config"
"""
# Set the environment variables
config.environment = """
OTLP_ENDPOINT = [[OTLP_ENDPOINT]] # interpolate variables into the envs. (they also support comments using '#')
environment = """
# Comments supported
OTLP_ENDPOINT = [[OTLP_ENDPOINT]] # interpolate variables into the envs.
VARIABLE_1 = value_1
VARIABLE_2 = value_2"""
VARIABLE_2 = value_2
"""
# Set Docker labels
config.labels = "deployment.type = logger"
labels = "deployment.type = logger"
##
@@ -116,17 +127,19 @@ deploy = true
# Create a dependency on test-logger-01. This deployment will only be deployed after test-logger-01 is deployed.
# Additionally, any sync deploy of test-logger-01 will also trigger sync deploy of this deployment.
after = ["test-logger-01"]
config.server_id = "server-01"
config.image.type = "Build"
config.image.params.build = "test_logger"
config.volumes = """
[deployment.config]
server_id = "server-01"
image.type = "Build"
image.params.build = "test_logger"
volumes = """
/data/logs = /etc/logs
/data/config = /etc/config"""
config.environment = """
environment = """
VARIABLE_1 = value_1
VARIABLE_2 = value_2"""
VARIABLE_2 = value_2
"""
# Set Docker labels
config.labels = "deployment.type = logger"
labels = "deployment.type = logger"
```
### Stack
@@ -140,11 +153,12 @@ description = "stack test"
deploy = true
after = ["test-logger-01"] # Stacks can depend on deployments, and vice versa.
tags = ["test"]
config.server_id = "server-prod"
config.file_paths = ["mongo.yaml", "redis.yaml"]
config.git_provider = "git.mogh.tech"
config.git_account = "mbecker20" # clone private repo by specifying account
config.repo = "mbecker20/stack_test"
[stack.config]
server_id = "server-prod"
file_paths = ["mongo.yaml", "redis.yaml"]
git_provider = "git.mogh.tech"
git_account = "mbecker20" # clone private repo by specifying account
repo = "mbecker20/stack_test"
```
### Procedure
@@ -157,28 +171,28 @@ name = "test-procedure"
description = "Do some things in a specific order"
tags = ["test"]
# Each stage will be executed one after the other (in sequence)
[[procedure.config.stage]]
name = "Build stuff"
enabled = true
# The executions within a stage will be run in parallel. The stage completes when all executions finish.
executions = [
{ execution.type = "RunBuild", execution.params.build = "test_logger", enabled = true },
{ execution.type = "PullRepo", execution.params.repo = "komodo-periphery", enabled = true },
{ execution.type = "RunBuild", execution.params.build = "test_logger" },
# Uses the Batch version, witch matches many builds by pattern
# This one matches all builds prefixed with `foo-` (wildcard) and `bar-` (regex).
{ execution.type = "BatchRunBuild", execution.params.pattern = "foo-* , \\^bar-.*$\\" },
{ execution.type = "PullRepo", execution.params.repo = "komodo-periphery" },
]
[[procedure.config.stage]]
name = "Deploy test logger 1"
enabled = true
executions = [
{ execution.type = "Deploy", execution.params.deployment = "test-logger-01", enabled = true }
{ execution.type = "Deploy", execution.params.deployment = "test-logger-01" },
{ execution.type = "Deploy", execution.params.deployment = "test-logger-03", enabled = false },
]
[[procedure.config.stage]]
name = "Deploy test logger 2"
enabled = true
enabled = false
executions = [
{ execution.type = "Deploy", execution.params.deployment = "test-logger-02", enabled = true }
{ execution.type = "Deploy", execution.params.deployment = "test-logger-02" }
]
```
@@ -191,15 +205,19 @@ executions = [
name = "komodo-periphery"
description = "Builds new versions of the periphery binary. Requires Rust installed on the host."
tags = ["komodo"]
config.server_id = "server-01"
config.git_provider = "git.mogh.tech" # use an alternate git provider (default is github.com)
config.git_account = "mbecker20"
config.repo = "mbecker20/komodo"
[repo.config]
server_id = "server-01"
git_provider = "git.mogh.tech" # use an alternate git provider (default is github.com)
git_account = "mbecker20"
repo = "mbecker20/komodo"
# Run an action after the repo is pulled
config.on_pull.path = "."
config.on_pull.command = """
/root/.cargo/bin/cargo build -p komodo_periphery --release && \
cp ./target/release/periphery /root/periphery"""
on_pull.path = "."
on_pull.command = """
# Supports comments
/root/.cargo/bin/cargo build -p komodo_periphery --release
# Multiple lines will be combined together using '&&'
cp ./target/release/periphery /root/periphery
"""
```
### User Group:

View File

@@ -30,7 +30,7 @@ https://${HOST}/listener/${AUTH_TYPE}/${RESOURCE_TYPE}/${ID_OR_NAME}/${EXECUTION
- **`EXECUTION`**:
- Which executions are available depends on the `RESOURCE_TYPE`. Builds only have the `/build` action.
Repos can select between `/pull`, `/clone`, or `/build`. Stacks have `/deploy` and `/refresh`, and Resource Syncs have `/sync` and `/refresh`.
- For **Procedures and Actions**, this will be the **branch to listen to for pushes**, or `__ALL__` to trigger
- For **Procedures and Actions**, this will be the **branch to listen to for pushes**, or `__ANY__` to trigger
on pushes to any branch.
## Create the webhook on the Git Provider

View File

@@ -59,6 +59,7 @@ const sidebars: SidebarsConfig = {
},
"docker-compose",
"variables",
"procedures",
"permissioning",
"sync-resources",
"webhooks",

View File

@@ -160,6 +160,7 @@ export type WriteResponses = {
CreateNetwork: Types.Update;
CreateDeployment: Types.Deployment;
CopyDeployment: Types.Deployment;
CreateDeploymentFromContainer: Types.Deployment;
DeleteDeployment: Types.Deployment;
UpdateDeployment: Types.Deployment;
RenameDeployment: Types.Update;
@@ -262,30 +263,43 @@ export type ExecuteResponses = {
PruneBuildx: Types.Update;
PruneSystem: Types.Update;
Deploy: Types.Update;
BatchDeploy: Types.BatchExecutionResponse;
PullDeployment: Types.Update;
StartDeployment: Types.Update;
RestartDeployment: Types.Update;
PauseDeployment: Types.Update;
UnpauseDeployment: Types.Update;
StopDeployment: Types.Update;
DestroyDeployment: Types.Update;
BatchDestroyDeployment: Types.BatchExecutionResponse;
RunBuild: Types.Update;
BatchRunBuild: Types.BatchExecutionResponse;
CancelBuild: Types.Update;
CloneRepo: Types.Update;
BatchCloneRepo: Types.BatchExecutionResponse;
PullRepo: Types.Update;
BatchPullRepo: Types.BatchExecutionResponse;
BuildRepo: Types.Update;
BatchBuildRepo: Types.BatchExecutionResponse;
CancelRepoBuild: Types.Update;
RunProcedure: Types.Update;
BatchRunProcedure: Types.BatchExecutionResponse;
RunAction: Types.Update;
BatchRunAction: Types.BatchExecutionResponse;
LaunchServer: Types.Update;
RunSync: Types.Update;
DeployStack: Types.Update;
BatchDeployStack: Types.BatchExecutionResponse;
DeployStackIfChanged: Types.Update;
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
PullStack: Types.Update;
StartStack: Types.Update;
RestartStack: Types.Update;
StopStack: Types.Update;
PauseStack: Types.Update;
UnpauseStack: Types.Update;
DestroyStack: Types.Update;
BatchDestroyStack: Types.BatchExecutionResponse;
DeployStackService: Types.Update;
StartStackService: Types.Update;
RestartStackService: Types.Update;

View File

@@ -203,6 +203,14 @@ export interface AlerterQuerySpecifics {
types: AlerterEndpoint["type"][];
}
export type AlerterQuery = ResourceQuery<AlerterQuerySpecifics>;
export type BatchExecutionResponseItem = {
status: "Ok";
data: Update;
} | {
status: "Err";
data: BatchExecutionResponseItemErr;
};
export type BatchExecutionResponse = BatchExecutionResponseItem[];
export interface Version {
major: number;
minor: number;
@@ -315,10 +323,10 @@ export interface BuildConfig {
* Secret arguments.
*
* These values remain hidden in the final image by using
* docker secret mounts. See [https://docs.docker.com/build/building/secrets].
* docker secret mounts. See <https://docs.docker.com/build/building/secrets>.
*
* The values can be used in RUN commands:
* ```
* ```sh
* RUN --mount=type=secret,id=SECRET_KEY \
* SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
* ```
@@ -383,12 +391,17 @@ export interface BuildQuerySpecifics {
}
export type BuildQuery = ResourceQuery<BuildQuerySpecifics>;
export type BuilderConfig =
/** Use a connected server an image builder. */
/** Use a Periphery address as a Builder. */
{
type: "Url";
params: UrlBuilderConfig;
}
/** Use a connected server as a Builder. */
| {
type: "Server";
params: ServerBuilderConfig;
}
/** Use EC2 instances spawned on demand as an image builder. */
/** Use EC2 instances spawned on demand as a Builder. */
| {
type: "Aws";
params: AwsBuilderConfig;
@@ -416,18 +429,33 @@ export type Execution =
} | {
type: "RunAction";
params: RunAction;
} | {
type: "BatchRunAction";
params: BatchRunAction;
} | {
type: "RunProcedure";
params: RunProcedure;
} | {
type: "BatchRunProcedure";
params: BatchRunProcedure;
} | {
type: "RunBuild";
params: RunBuild;
} | {
type: "BatchRunBuild";
params: BatchRunBuild;
} | {
type: "CancelBuild";
params: CancelBuild;
} | {
type: "Deploy";
params: Deploy;
} | {
type: "BatchDeploy";
params: BatchDeploy;
} | {
type: "PullDeployment";
params: PullDeployment;
} | {
type: "StartDeployment";
params: StartDeployment;
@@ -446,15 +474,27 @@ export type Execution =
} | {
type: "DestroyDeployment";
params: DestroyDeployment;
} | {
type: "BatchDestroyDeployment";
params: BatchDestroyDeployment;
} | {
type: "CloneRepo";
params: CloneRepo;
} | {
type: "BatchCloneRepo";
params: BatchCloneRepo;
} | {
type: "PullRepo";
params: PullRepo;
} | {
type: "BatchPullRepo";
params: BatchPullRepo;
} | {
type: "BuildRepo";
params: BuildRepo;
} | {
type: "BatchBuildRepo";
params: BatchBuildRepo;
} | {
type: "CancelRepoBuild";
params: CancelRepoBuild;
@@ -530,9 +570,18 @@ export type Execution =
} | {
type: "DeployStack";
params: DeployStack;
} | {
type: "BatchDeployStack";
params: BatchDeployStack;
} | {
type: "DeployStackIfChanged";
params: DeployStackIfChanged;
} | {
type: "BatchDeployStackIfChanged";
params: BatchDeployStackIfChanged;
} | {
type: "PullStack";
params: PullStack;
} | {
type: "StartStack";
params: StartStack;
@@ -551,6 +600,9 @@ export type Execution =
} | {
type: "DestroyStack";
params: DestroyStack;
} | {
type: "BatchDestroyStack";
params: BatchDestroyStack;
} | {
type: "Sleep";
params: Sleep;
@@ -620,7 +672,7 @@ export interface DockerRegistryAccount {
*
* For docker registry, this can include 'http://...',
* however this is not recommended and won't work unless "insecure registries" are enabled
* on your hosts. See [https://docs.docker.com/reference/cli/dockerd/#insecure-registries].
* on your hosts. See <https://docs.docker.com/reference/cli/dockerd/#insecure-registries>.
*/
domain: string;
/** The account username */
@@ -833,6 +885,13 @@ export interface DeploymentConfig {
skip_secret_interp?: boolean;
/** Whether to redeploy the deployment whenever the attached build finishes. */
redeploy_on_build?: boolean;
/** Whether to poll for any updates to the image. */
poll_for_updates?: boolean;
/**
* Whether to automatically redeploy when a
* newer image is found.
*/
auto_update?: boolean;
/** Whether to send ContainerStateChange alerts for this deployment. */
send_alerts: boolean;
/** Configure quick links that are displayed in the resource header */
@@ -908,6 +967,8 @@ export interface DeploymentListItemInfo {
status?: string;
/** The image attached to the deployment. */
image: string;
/** Whether there is a newer image available at the same tag. */
update_available: boolean;
/** The server that deployment sits on. */
server_id: string;
/** An attached Komodo Build, if it exists. */
@@ -1030,6 +1091,22 @@ export type AlertData =
to: DeploymentState;
};
}
/** A Deployment has an image update available */
| {
type: "DeploymentImageUpdateAvailable";
data: {
/** The id of the deployment */
id: string;
/** The name of the deployment */
name: string;
/** The server id of server that the deployment is on */
server_id: string;
/** The server name */
server_name: string;
/** The image with update */
image: string;
};
}
/** A stack's state has changed unexpectedly. */
| {
type: "StackStateChange";
@@ -1048,6 +1125,24 @@ export type AlertData =
to: StackState;
};
}
/** A Stack has an image update available */
| {
type: "StackImageUpdateAvailable";
data: {
/** The id of the stack */
id: string;
/** The name of the stack */
name: string;
/** The server id of server that the stack is on */
server_id: string;
/** The server name */
server_name: string;
/** The service name to update */
service: string;
/** The image with update */
image: string;
};
}
/** An AWS builder failed to terminate. */
| {
type: "AwsBuilderTerminationFailed";
@@ -1138,6 +1233,7 @@ export interface Log {
}
export type GetContainerLogResponse = Log;
export interface DeploymentActionState {
pulling: boolean;
deploying: boolean;
starting: boolean;
restarting: boolean;
@@ -1504,6 +1600,7 @@ export type ServerTemplateConfig =
export type ServerTemplate = Resource<ServerTemplateConfig, undefined>;
export type GetServerTemplateResponse = ServerTemplate;
export interface StackActionState {
pulling: boolean;
deploying: boolean;
starting: boolean;
restarting: boolean;
@@ -1538,6 +1635,13 @@ export interface StackConfig {
* Combine with build_extra_args for custom behaviors.
*/
run_build?: boolean;
/** Whether to poll for any updates to the images. */
poll_for_updates?: boolean;
/**
* Whether to automatically redeploy when a
* newer images are found.
*/
auto_update?: boolean;
/** Whether to run `docker compose down` before `compose up`. */
destroy_before_deploy?: boolean;
/** Whether to skip secret interpolation into the stack environment variables. */
@@ -1674,6 +1778,8 @@ export interface StackServiceNames {
* Containers will be matched via regex like `^container_name-?[0-9]*$``
*/
container_name: string;
/** The services image. */
image?: string;
}
export interface StackInfo {
/**
@@ -1840,6 +1946,7 @@ export declare enum Operation {
DeleteStack = "DeleteStack",
WriteStackContents = "WriteStackContents",
RefreshStackCache = "RefreshStackCache",
PullStack = "PullStack",
DeployStack = "DeployStack",
StartStack = "StartStack",
RestartStack = "RestartStack",
@@ -1847,16 +1954,20 @@ export declare enum Operation {
UnpauseStack = "UnpauseStack",
StopStack = "StopStack",
DestroyStack = "DestroyStack",
DeployStackService = "DeployStackService",
PullStackService = "PullStackService",
StartStackService = "StartStackService",
RestartStackService = "RestartStackService",
PauseStackService = "PauseStackService",
UnpauseStackService = "UnpauseStackService",
StopStackService = "StopStackService",
DestroyStackService = "DestroyStackService",
CreateDeployment = "CreateDeployment",
UpdateDeployment = "UpdateDeployment",
RenameDeployment = "RenameDeployment",
DeleteDeployment = "DeleteDeployment",
Deploy = "Deploy",
PullDeployment = "PullDeployment",
StartDeployment = "StartDeployment",
RestartDeployment = "RestartDeployment",
PauseDeployment = "PauseDeployment",
@@ -3049,6 +3160,8 @@ export interface ServerListItemInfo {
state: ServerState;
/** Region of the server. */
region: string;
/** Address of the server. */
address: string;
/** Whether server is configured to send unreachable alerts. */
send_unreachable_alerts: boolean;
/** Whether server is configured to send cpu alerts. */
@@ -3063,8 +3176,12 @@ export type ListServersResponse = ServerListItem[];
export interface StackService {
/** The service name */
service: string;
/** The service image */
image: string;
/** The container */
container?: ContainerListItem;
/** Whether there is an update available for this services image. */
update_available: boolean;
}
export type ListStackServicesResponse = StackService[];
export declare enum StackState {
@@ -3089,6 +3206,13 @@ export declare enum StackState {
/** Server not reachable */
Unknown = "unknown"
}
export interface StackServiceWithUpdate {
service: string;
/** The service's image */
image: string;
/** Whether there is a newer image available for this service */
update_available: boolean;
}
export interface StackListItemInfo {
/** The server that stack is deployed on. */
server_id: string;
@@ -3107,11 +3231,11 @@ export interface StackListItemInfo {
/** A string given by docker conveying the status of the stack. */
status?: string;
/**
* The service names that are part of the stack.
* The services that are part of the stack.
* If deployed, will be `deployed_services`.
* Otherwise, its `latest_services`
*/
services: string[];
services: StackServiceWithUpdate[];
/**
* Whether the compose project is missing on the host.
* Ie, it does not show up in `docker compose ls`.
@@ -3227,6 +3351,7 @@ export type _PartialServerBuilderConfig = Partial<ServerBuilderConfig>;
export type _PartialServerConfig = Partial<ServerConfig>;
export type _PartialStackConfig = Partial<StackConfig>;
export type _PartialTag = Partial<Tag>;
export type _PartialUrlBuilderConfig = Partial<UrlBuilderConfig>;
export interface __Serror {
error: string;
trace: string[];
@@ -3345,6 +3470,186 @@ export interface AwsServerTemplateConfig {
/** The user data to deploy the instance with. */
user_data: string;
}
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchBuildRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchCloneRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeploy {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeployStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDeployStackIfChanged {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDestroyDeployment {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchDestroyStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
* d
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
export interface BatchExecutionResponseItemErr {
name: string;
error: _Serror;
}
/** Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchPullRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResponse] */
export interface BatchRunAction {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* actions
* foo-*
* # add some more
* extra-action-1, extra-action-2
* ```
*/
pattern: string;
}
/** Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchRunBuild {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* builds
* foo-*
* # add some more
* extra-build-1, extra-build-2
* ```
*/
pattern: string;
}
/** Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResponse]. */
export interface BatchRunProcedure {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* procedures
* foo-*
* # add some more
* extra-procedure-1, extra-procedure-2
* ```
*/
pattern: string;
}
/**
* Builds the target repo, using the attached builder. Response: [Update].
*
@@ -3392,7 +3697,7 @@ export interface CloneArgs {
provider: string;
/** Use https (vs http). */
https: boolean;
/** Full repo identifier. <namespace>/<repo_name> */
/** Full repo identifier. {namespace}/{repo_name} */
repo?: string;
/** Git Branch. Default: `main` */
branch: string;
@@ -3431,16 +3736,6 @@ export interface CommitSync {
/** Id or name */
sync: string;
}
export interface ComposeService {
image?: string;
container_name?: string;
}
/** Keeping this minimal for now as its only needed to parse the service names / container names */
export interface ComposeFile {
/** If not provided, will default to the parent folder holding the compose file. */
name?: string;
services?: Record<string, ComposeService>;
}
export interface Conversion {
/** reference on the server. */
local: string;
@@ -3617,6 +3912,9 @@ export interface CreateBuildWebhook {
}
/** Partial representation of [BuilderConfig] */
export type PartialBuilderConfig = {
type: "Url";
params: _PartialUrlBuilderConfig;
} | {
type: "Server";
params: _PartialServerBuilderConfig;
} | {
@@ -3637,6 +3935,13 @@ export interface CreateDeployment {
/** Optional partial config to initialize the deployment with. */
config?: _PartialDeploymentConfig;
}
/** Create a Deployment from an existing container. Response: [Deployment]. */
export interface CreateDeploymentFromContainer {
/** The name or id of the existing container. */
name: string;
/** The server id or name on which container exists. */
server: string;
}
/**
* **Admin only.** Create a docker registry account.
* Response: [DockerRegistryAccount].
@@ -3895,7 +4200,7 @@ export interface DeleteDockerRegistryAccount {
}
/**
* **Admin only.** Delete a git provider account.
* Response: [User].
* Response: [DeleteGitProviderAccountResponse].
*/
export interface DeleteGitProviderAccount {
/** The id of the git provider to delete */
@@ -4066,6 +4371,8 @@ export interface Deploy {
export interface DeployStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to "compose up" */
service?: string;
/**
* Override the default termination max time.
* Only used if the stack needs to be taken down first.
@@ -4120,6 +4427,8 @@ export interface DestroyDeployment {
export interface DestroyStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to destroy */
service?: string;
/** Pass `--remove-orphans` */
remove_orphans?: boolean;
/** Override the default termination max time. */
@@ -4416,7 +4725,7 @@ export interface GetDeploymentLog {
}
/**
* Get the deployment container's stats using `docker stats`.
* Response: [DockerContainerStats].
* Response: [GetDeploymentStatsResponse].
*
* Note. This call will hit the underlying server directly for most up to date stats.
*/
@@ -4623,13 +4932,14 @@ export interface GetReposSummaryResponse {
/** The number of repos with unknown state. */
unknown: number;
}
/** Inspect a docker container on the server. Response: [Container]. */
/** Find the attached resource for a container. Either Deployment or Stack. Response: [GetResourceMatchingContainerResponse]. */
export interface GetResourceMatchingContainer {
/** Id or name */
server: string;
/** The container name */
container: string;
}
/** Response for [GetResourceMatchingContainer]. Resource is either Deployment, Stack, or None. */
export interface GetResourceMatchingContainerResponse {
resource?: ResourceTarget;
}
@@ -4727,7 +5037,7 @@ export interface GetStackActionState {
/** Id or name */
stack: string;
}
/** Get a stack service's log. Response: [GetStackContainersResponse]. */
/** Get a stack service's log. Response: [GetStackServiceLogResponse]. */
export interface GetStackServiceLog {
/** Id or name */
stack: string;
@@ -5107,7 +5417,7 @@ export interface ListApiKeysForServiceUser {
/**
* Retrieve versions of the build that were built in the past and available for deployment,
* sorted by most recent first.
* Response: [GetBuildVersionsResponse].
* Response: [ListBuildVersionsResponse].
*/
export interface ListBuildVersions {
/** Id or name */
@@ -5224,7 +5534,7 @@ export interface ListDockerRegistriesFromConfig {
}
/**
* List docker registry accounts matching optional query.
* Response: [ListDockerRegistrysResponse].
* Response: [ListDockerRegistryAccountsResponse].
*/
export interface ListDockerRegistryAccounts {
/** Optionally filter by accounts with a specific domain. */
@@ -5298,7 +5608,7 @@ export interface ListFullStacks {
}
/**
* List git provider accounts matching optional query.
* Response: [ListGitProvidersResponse].
* Response: [ListGitProviderAccountsResponse].
*/
export interface ListGitProviderAccounts {
/** Optionally filter by accounts with a specific domain. */
@@ -5623,6 +5933,11 @@ export interface PruneVolumes {
/** Id or name */
server: string;
}
/** Pulls the image for the target deployment. Response: [Update] */
export interface PullDeployment {
/** Name or id */
deployment: string;
}
/**
* Pulls the target repo. Response: [Update].
*
@@ -5635,6 +5950,13 @@ export interface PullRepo {
/** Id or name */
repo: string;
}
/** Pulls images for the target stack. `docker compose pull`. Response: [Update] */
export interface PullStack {
/** Id or name */
stack: string;
/** Optionally specify a specific service to start */
service?: string;
}
/**
* Push a resource to the front of the users 10 most recently viewed resources.
* Response: [NoData].
@@ -5999,7 +6321,7 @@ export interface SearchStackServiceLog {
/** Configuration for a Komodo Server Builder. */
export interface ServerBuilderConfig {
/** The server id of the builder */
server_id: string;
server_id?: string;
}
/** The health of a part of the server. */
export interface ServerHealthState {
@@ -6478,6 +6800,13 @@ export interface UpdateVariableValue {
/** The value to set. */
value: string;
}
/** Configuration for a Komodo Url Builder. */
export interface UrlBuilderConfig {
/** The address of the Periphery agent */
address: string;
/** A custom passkey to use. Otherwise, use the default passkey. */
passkey?: string;
}
/** Update file contents in Files on Server or Git Repo mode. Response: [Update]. */
export interface WriteStackFileContents {
/** The name or id of the target Stack. */
@@ -6586,6 +6915,12 @@ export type ExecuteRequest = {
} | {
type: "Deploy";
params: Deploy;
} | {
type: "BatchDeploy";
params: BatchDeploy;
} | {
type: "PullDeployment";
params: PullDeployment;
} | {
type: "StartDeployment";
params: StartDeployment;
@@ -6604,12 +6939,24 @@ export type ExecuteRequest = {
} | {
type: "DestroyDeployment";
params: DestroyDeployment;
} | {
type: "BatchDestroyDeployment";
params: BatchDestroyDeployment;
} | {
type: "DeployStack";
params: DeployStack;
} | {
type: "BatchDeployStack";
params: BatchDeployStack;
} | {
type: "DeployStackIfChanged";
params: DeployStackIfChanged;
} | {
type: "BatchDeployStackIfChanged";
params: BatchDeployStackIfChanged;
} | {
type: "PullStack";
params: PullStack;
} | {
type: "StartStack";
params: StartStack;
@@ -6628,30 +6975,51 @@ export type ExecuteRequest = {
} | {
type: "DestroyStack";
params: DestroyStack;
} | {
type: "BatchDestroyStack";
params: BatchDestroyStack;
} | {
type: "RunBuild";
params: RunBuild;
} | {
type: "BatchRunBuild";
params: BatchRunBuild;
} | {
type: "CancelBuild";
params: CancelBuild;
} | {
type: "CloneRepo";
params: CloneRepo;
} | {
type: "BatchCloneRepo";
params: BatchCloneRepo;
} | {
type: "PullRepo";
params: PullRepo;
} | {
type: "BatchPullRepo";
params: BatchPullRepo;
} | {
type: "BuildRepo";
params: BuildRepo;
} | {
type: "BatchBuildRepo";
params: BatchBuildRepo;
} | {
type: "CancelRepoBuild";
params: CancelRepoBuild;
} | {
type: "RunProcedure";
params: RunProcedure;
} | {
type: "BatchRunProcedure";
params: BatchRunProcedure;
} | {
type: "RunAction";
params: RunAction;
} | {
type: "BatchRunAction";
params: BatchRunAction;
} | {
type: "LaunchServer";
params: LaunchServer;
@@ -7117,6 +7485,9 @@ export type WriteRequest = {
} | {
type: "CopyDeployment";
params: CopyDeployment;
} | {
type: "CreateDeploymentFromContainer";
params: CreateDeploymentFromContainer;
} | {
type: "DeleteDeployment";
params: DeleteDeployment;

View File

@@ -1,5 +1,5 @@
/*
Generated by typeshare 1.11.0
Generated by typeshare 1.12.0
*/
/** The levels of permission that a User or UserGroup can have on a resource. */
export var PermissionLevel;
@@ -145,6 +145,7 @@ export var Operation;
Operation["DeleteStack"] = "DeleteStack";
Operation["WriteStackContents"] = "WriteStackContents";
Operation["RefreshStackCache"] = "RefreshStackCache";
Operation["PullStack"] = "PullStack";
Operation["DeployStack"] = "DeployStack";
Operation["StartStack"] = "StartStack";
Operation["RestartStack"] = "RestartStack";
@@ -152,16 +153,20 @@ export var Operation;
Operation["UnpauseStack"] = "UnpauseStack";
Operation["StopStack"] = "StopStack";
Operation["DestroyStack"] = "DestroyStack";
Operation["DeployStackService"] = "DeployStackService";
Operation["PullStackService"] = "PullStackService";
Operation["StartStackService"] = "StartStackService";
Operation["RestartStackService"] = "RestartStackService";
Operation["PauseStackService"] = "PauseStackService";
Operation["UnpauseStackService"] = "UnpauseStackService";
Operation["StopStackService"] = "StopStackService";
Operation["DestroyStackService"] = "DestroyStackService";
Operation["CreateDeployment"] = "CreateDeployment";
Operation["UpdateDeployment"] = "UpdateDeployment";
Operation["RenameDeployment"] = "RenameDeployment";
Operation["DeleteDeployment"] = "DeleteDeployment";
Operation["Deploy"] = "Deploy";
Operation["PullDeployment"] = "PullDeployment";
Operation["StartDeployment"] = "StartDeployment";
Operation["RestartDeployment"] = "RestartDeployment";
Operation["PauseDeployment"] = "PauseDeployment";

View File

@@ -340,6 +340,8 @@ export const ProviderSelector = ({
if (value === "Custom") {
onSelect("");
setCustomMode(true);
} else if (value === "None") {
onSelect("");
} else {
onSelect(value);
}
@@ -365,7 +367,8 @@ export const ProviderSelector = ({
!providers.includes(selected) && (
<SelectItem value={selected}>{selected}</SelectItem>
)}
{showCustom && <SelectItem value={"Custom"}>Custom</SelectItem>}
{showCustom && <SelectItem value="Custom">Custom</SelectItem>}
{!showCustom && <SelectItem value="None">None</SelectItem>}
</SelectContent>
</Select>
);
@@ -908,44 +911,45 @@ export const ImageRegistryConfig = ({
/>
</div>
</ConfigItem>
{organizations.length > 0 && (
<ConfigItem
label="Organization"
description="Push the build under an organization namespace, rather than the account namespace."
>
<OrganizationSelector
organizations={organizations}
selected={registry?.organization!}
set={(organization) =>
setRegistry({
...registry,
organization,
})
}
disabled={disabled}
/>
</ConfigItem>
)}
{registry && (
<ConfigItem
label="Account"
description="Select the account used to authenticate against the registry."
>
<AccountSelector
id={resource_id}
type="Builder"
account_type="docker"
provider={registry.domain!}
selected={registry.account}
onSelect={(account) =>
setRegistry({
...registry,
account,
})
}
disabled={disabled}
/>
</ConfigItem>
{registry?.domain && (
<>
<ConfigItem
label="Account"
description="Select the account used to authenticate against the registry."
>
<AccountSelector
id={resource_id}
type="Builder"
account_type="docker"
provider={registry.domain!}
selected={registry.account}
onSelect={(account) =>
setRegistry({
...registry,
account,
})
}
disabled={disabled}
/>
</ConfigItem>
<ConfigItem
label="Organization"
description="Push the build under an organization / project namespace, rather than the account namespace."
>
<OrganizationSelector
organizations={organizations}
selected={registry?.organization!}
set={(organization) =>
setRegistry({
...registry,
organization,
})
}
disabled={disabled}
/>
</ConfigItem>
</>
)}
</>
);
@@ -963,7 +967,7 @@ const OrganizationSelector = ({
disabled: boolean;
}) => {
const [customMode, setCustomMode] = useState(false);
if (customMode || organizations.length === 0) {
if (customMode) {
return (
<Input
placeholder="Input custom organization name"

View File

@@ -5,6 +5,7 @@ import { Link, Outlet, useNavigate } from "react-router-dom";
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
@@ -36,7 +37,7 @@ export const Layout = () => {
<div className="h-screen overflow-y-scroll">
<div className="container">
<Sidebar />
<div className="lg:ml-64 lg:pl-8 py-24">
<div className="lg:ml-[200px] lg:pl-8 py-[88px]">
<Outlet />
</div>
</div>
@@ -184,7 +185,7 @@ export const Section = ({
{(title || icon || titleRight || titleOther || actions) && (
<div
className={cn(
"flex flex-wrap gap-2 justify-between py-1",
"flex flex-wrap gap-2 justify-between",
itemsCenterTitleRow ? "items-center" : "items-start"
)}
>
@@ -235,6 +236,7 @@ export const NewLayout = ({
<DialogContent>
<DialogHeader>
<DialogTitle>New {entityType}</DialogTitle>
<DialogDescription>Enter a unique name for the new {entityType}.</DialogDescription>
</DialogHeader>
<div className="flex flex-col gap-4 py-8">{children}</div>
<DialogFooter>

View File

@@ -16,6 +16,7 @@ export type MonacoLanguage =
| "toml"
| "json"
| "key_value"
| "string_list"
| "shell"
| "dockerfile"
| "rust"
@@ -28,11 +29,15 @@ export const MonacoEditor = ({
onValueChange,
language,
readOnly,
minHeight,
className,
}: {
value: string | undefined;
onValueChange?: (value: string) => void;
language: MonacoLanguage;
readOnly?: boolean;
minHeight?: number;
className?: string;
}) => {
const [editor, setEditor] =
useState<monaco.editor.IStandaloneCodeEditor | null>(null);
@@ -97,12 +102,8 @@ export const MonacoEditor = ({
containerNode.style.height = `${Math.max(
Math.ceil(contentHeight),
MIN_EDITOR_HEIGHT
minHeight ?? MIN_EDITOR_HEIGHT
)}px`;
// containerNode.style.height = `${Math.max(
// Math.min(Math.ceil(contentHeight), MAX_EDITOR_HEIGHT),
// MIN_EDITOR_HEIGHT
// )}px`;
}, [editor, line_count]);
const { theme: _theme } = useTheme();
@@ -131,7 +132,7 @@ export const MonacoEditor = ({
};
return (
<div className="mx-2 my-1 w-full">
<div className={cn("mx-2 my-1 w-full", className)}>
<Editor
language={language}
value={value}

View File

@@ -1,4 +1,4 @@
import { useRead, useUser } from "@lib/hooks";
import { useAllResources, useUser } from "@lib/hooks";
import { Button } from "@ui/button";
import {
CommandDialog,
@@ -12,18 +12,9 @@ import {
import { Home, Search, User } from "lucide-react";
import { Fragment, ReactNode, useMemo, useState } from "react";
import { useNavigate } from "react-router-dom";
import { cn } from "@lib/utils";
import { DeploymentComponents } from "./resources/deployment";
import { BuildComponents } from "./resources/build";
import { ServerComponents } from "./resources/server";
import { ProcedureComponents } from "./resources/procedure";
import { RepoComponents } from "./resources/repo";
import { BuilderComponents } from "./resources/builder";
import { AlerterComponents } from "./resources/alerter";
import { ServerTemplateComponents } from "./resources/server-template";
import { cn, RESOURCE_TARGETS, usableResourcePath } from "@lib/utils";
import { Badge } from "@ui/badge";
import { ResourceSyncComponents } from "./resources/resource-sync";
import { StackComponents } from "./resources/stack";
import { ResourceComponents } from "./resources";
export const OmniSearch = ({
className,
@@ -116,16 +107,7 @@ const useOmniItems = (
search: string
): Record<string, OmniItem[]> => {
const user = useUser().data;
const servers = useRead("ListServers", {}).data;
const deployments = useRead("ListDeployments", {}).data;
const stacks = useRead("ListStacks", {}).data;
const builds = useRead("ListBuilds", {}).data;
const repos = useRead("ListRepos", {}).data;
const procedures = useRead("ListProcedures", {}).data;
const builders = useRead("ListBuilders", {}).data;
const alerters = useRead("ListAlerters", {}).data;
const templates = useRead("ListServerTemplates", {}).data;
const syncs = useRead("ListResourceSyncs", {}).data;
const resources = useAllResources();
const searchTerms = search
.toLowerCase()
.split(" ")
@@ -139,66 +121,21 @@ const useOmniItems = (
icon: <Home className="w-4 h-4" />,
onSelect: () => nav("/"),
},
{
key: "Servers",
label: "Servers",
icon: <ServerComponents.Icon />,
onSelect: () => nav("/servers"),
},
{
key: "Deployments",
label: "Deployments",
icon: <DeploymentComponents.Icon />,
onSelect: () => nav("/deployments"),
},
{
key: "Stacks",
label: "Stacks",
icon: <StackComponents.Icon />,
onSelect: () => nav("/stacks"),
},
{
key: "Builds",
label: "Builds",
icon: <BuildComponents.Icon />,
onSelect: () => nav("/builds"),
},
{
key: "Repos",
label: "Repos",
icon: <RepoComponents.Icon />,
onSelect: () => nav("/repos"),
},
{
key: "Procedures",
label: "Procedures",
icon: <ProcedureComponents.Icon />,
onSelect: () => nav("/procedures"),
},
{
key: "Builders",
label: "Builders",
icon: <BuilderComponents.Icon />,
onSelect: () => nav("/builders"),
},
{
key: "Alerters",
label: "Alerters",
icon: <AlerterComponents.Icon />,
onSelect: () => nav("/alerters"),
},
{
key: "Templates",
label: "Templates",
icon: <ServerTemplateComponents.Icon />,
onSelect: () => nav("/server-templates"),
},
{
key: "Syncs",
label: "Syncs",
icon: <ResourceSyncComponents.Icon />,
onSelect: () => nav("/resource-syncs"),
},
...RESOURCE_TARGETS.map((_type) => {
const type =
_type === "ResourceSync"
? "Sync"
: _type === "ServerTemplate"
? "Template"
: _type;
const Components = ResourceComponents[_type];
return {
key: type + "s",
label: type + "s",
icon: <Components.Icon />,
onSelect: () => nav(usableResourcePath(_type)),
};
}),
(user?.admin && {
key: "Users",
label: "Users",
@@ -214,200 +151,39 @@ const useOmniItems = (
searchTerms.every((term) => label.includes(term))
);
}),
Servers:
servers
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"server".includes(term)
...Object.fromEntries(
RESOURCE_TARGETS.map((_type) => {
const type =
_type === "ResourceSync"
? "Sync"
: _type === "ServerTemplate"
? "Template"
: _type;
const lower = type.toLowerCase();
const Components = ResourceComponents[_type];
return [
type + "s",
resources[_type]
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
lower.includes(term)
)
)
)
.map((server) => ({
key: "server-" + server.name,
label: server.name,
icon: <ServerComponents.Icon id={server.id} />,
onSelect: () => nav(`/servers/${server.id}`),
})) || [],
Deployments:
deployments
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"deployment".includes(term)
)
)
.map((deployment) => ({
key: "deployment-" + deployment.name,
label: deployment.name,
icon: <DeploymentComponents.Icon id={deployment.id} />,
onSelect: () => nav(`/deployments/${deployment.id}`),
})) || [],
Stacks:
stacks
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"stack".includes(term)
)
)
.map((stack) => ({
key: "stack-" + stack.name,
label: stack.name,
icon: <StackComponents.Icon id={stack.id} />,
onSelect: () => nav(`/stacks/${stack.id}`),
})) || [],
Build:
builds
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"build".includes(term)
)
)
.map((build) => ({
key: "build-" + build.name,
label: build.name,
icon: <BuildComponents.Icon id={build.id} />,
onSelect: () => nav(`/builds/${build.id}`),
})) || [],
Repos:
repos
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"repo".includes(term)
)
)
.map((repo) => ({
key: "repo-" + repo.name,
label: repo.name,
icon: <RepoComponents.Icon id={repo.id} />,
onSelect: () => nav(`/repos/${repo.id}`),
})) || [],
Procedures:
procedures
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"procedure".includes(term)
)
)
.map((procedure) => ({
key: "procedure-" + procedure.name,
label: procedure.name,
icon: <ProcedureComponents.Icon id={procedure.id} />,
onSelect: () => nav(`/procedures/${procedure.id}`),
})) || [],
Builders:
builders
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"builder".includes(term)
)
)
.map((builder) => ({
key: "builder-" + builder.name,
label: builder.name,
icon: <BuilderComponents.Icon id={builder.id} />,
onSelect: () => nav(`/builders/${builder.id}`),
})) || [],
Alerters:
alerters
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"alerter".includes(term)
)
)
.map((alerter) => ({
key: "alerter-" + alerter.name,
label: alerter.name,
icon: <AlerterComponents.Icon id={alerter.id} />,
onSelect: () => nav(`/alerters/${alerter.id}`),
})) || [],
Templates:
templates
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"template".includes(term)
)
)
.map((template) => ({
key: "template-" + template.name,
label: template.name,
icon: <ServerTemplateComponents.Icon id={template.id} />,
onSelect: () => nav(`/server-templates/${template.id}`),
})) || [],
Syncs:
syncs
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"sync".includes(term)
)
)
.map((sync) => ({
key: "sync-" + sync.name,
label: sync.name,
icon: <ResourceSyncComponents.Icon id={sync.id} />,
onSelect: () => nav(`/resource-syncs/${sync.id}`),
})) || [],
.map((server) => ({
key: type + "-" + server.name,
label: server.name,
icon: <Components.Icon id={server.id} />,
onSelect: () =>
nav(`/${usableResourcePath(_type)}/${server.id}`),
})) || [],
];
})
),
}),
[
user,
servers,
deployments,
stacks,
builds,
repos,
procedures,
alerters,
builders,
templates,
syncs,
search,
]
[user, resources, search]
);
};

View File

@@ -118,17 +118,17 @@ export const ActionConfig = ({ id }: { id: string }) => {
value={branch}
onChange={(e) => setBranch(e.target.value)}
className="w-[200px]"
disabled={branch === "__ALL__"}
disabled={branch === "__ANY__"}
/>
<div className="flex items-center gap-2">
<div className="text-muted-foreground text-sm">
All branches:
No branch check:
</div>
<Switch
checked={branch === "__ALL__"}
checked={branch === "__ANY__"}
onCheckedChange={(checked) => {
if (checked) {
setBranch("__ALL__");
setBranch("__ANY__");
} else {
setBranch("main");
}

View File

@@ -1,23 +1,26 @@
import { ConfigItem } from "@components/config/util";
import { Types } from "komodo_client";
import { Badge } from "@ui/badge";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
} from "@ui/select";
import { Select, SelectContent, SelectItem, SelectTrigger } from "@ui/select";
import { MinusCircle } from "lucide-react";
const ALERT_TYPES: Types.AlertData["type"][] = [
// Server
"ServerUnreachable",
"ServerCpu",
"ServerMem",
"ServerDisk",
// State change
"ContainerStateChange",
"StackStateChange",
// Updates
"DeploymentImageUpdateAvailable",
"StackImageUpdateAvailable",
// Misc
"AwsBuilderTerminationFailed",
"ResourceSyncPendingUpdates",
"BuildFailed",
"AwsBuilderTerminationFailed",
"RepoBuildFailed",
];
export const AlertTypeConfig = ({
@@ -33,7 +36,11 @@ export const AlertTypeConfig = ({
(alert_type) => !alert_types.includes(alert_type)
);
return (
<ConfigItem label="Alert Types" description="Only send alerts of certain types." boldLabel>
<ConfigItem
label="Alert Types"
description="Only send alerts of certain types."
boldLabel
>
<div className="flex items-center gap-4">
{at.length ? (
<Select

View File

@@ -32,8 +32,10 @@ export const ResourcesConfig = ({
const [open, setOpen] = useState(false);
const [search, setSearch] = useState("");
const servers = useRead("ListServers", {}).data ?? [];
const stacks = useRead("ListStacks", {}).data ?? [];
const deployments = useRead("ListDeployments", {}).data ?? [];
const builds = useRead("ListBuilds", {}).data ?? [];
const repos = useRead("ListRepos", {}).data ?? [];
const syncs = useRead("ListResourceSyncs", {}).data ?? [];
const all_resources = [
...servers.map((server) => {
@@ -48,6 +50,16 @@ export const ResourcesConfig = ({
: false,
};
}),
...stacks.map((stack) => {
return {
type: "Stack",
id: stack.id,
name: stack.name.toLowerCase(),
enabled: resources.find((r) => r.type === "Stack" && r.id === stack.id)
? true
: false,
};
}),
...deployments.map((deployment) => ({
type: "Deployment",
id: deployment.id,
@@ -66,6 +78,14 @@ export const ResourcesConfig = ({
? true
: false,
})),
...repos.map((repo) => ({
type: "Repo",
id: repo.id,
name: repo.name.toLowerCase(),
enabled: resources.find((r) => r.type === "Repo" && r.id === repo.id)
? true
: false,
})),
...syncs.map((sync) => ({
type: "ResourceSync",
id: sync.id,

View File

@@ -22,6 +22,7 @@ export const BuilderConfig = ({ id }: { id: string }) => {
const config = useRead("GetBuilder", { builder: id }).data?.config;
if (config?.type === "Aws") return <AwsBuilderConfig id={id} />;
if (config?.type === "Server") return <ServerBuilderConfig id={id} />;
if (config?.type === "Url") return <UrlBuilderConfig id={id} />;
};
const AwsBuilderConfig = ({ id }: { id: string }) => {
@@ -301,6 +302,51 @@ const ServerBuilderConfig = ({ id }: { id: string }) => {
);
};
const UrlBuilderConfig = ({ id }: { id: string }) => {
const perms = useRead("GetPermissionLevel", {
target: { type: "Builder", id },
}).data;
const config = useRead("GetBuilder", { builder: id }).data?.config;
const [update, set] = useLocalStorage<Partial<Types.UrlBuilderConfig>>(
`url-builder-${id}-update-v1`,
{}
);
const { mutateAsync } = useWrite("UpdateBuilder");
if (!config) return null;
const disabled = perms !== Types.PermissionLevel.Write;
return (
<Config
disabled={disabled}
config={config.params as Types.UrlBuilderConfig}
update={update}
set={set}
onSave={async () => {
await mutateAsync({ id, config: { type: "Url", params: update } });
}}
components={{
"": [
{
label: "General",
labelHidden: true,
components: {
address: {
description: "The address of the Periphery agent",
placeholder: "https://periphery:8120",
},
passkey: {
description: "Use a custom passkey to authenticate with Periphery",
placeholder: "Custom passkey",
},
},
},
],
}}
/>
);
};
const ProvidersConfig = (params: {
type: "git" | "docker";
providers: Types.GitProvider[] | Types.DockerRegistry[];

Some files were not shown because too many files have changed in this diff Show More