Compare commits

..

8 Commits

Author SHA1 Message Date
mbecker20
54ba31dca9 gen ts types 2024-10-30 02:18:57 -04:00
Maxwell Becker
17d7ecb419 1.16.6 (#163)
* remove instrument from validate_cancel_build

* use type safe AllResources map - Action not showing omnisearch

* Stack support replicated services

* server docker nested tables

* fix container networks which use network of another container

* bump version

* add 'address' to ServerListItemInfo

* secrets list on variables page wraps

* fix user data script

* update default template user data

* improve sidebar layout styling

* fix network names shown on containers

* improve stack service / container page

* deleted resource log records Toml backup for later reference

* align all the tables

* add Url Builder type
2024-10-29 23:17:10 -07:00
mbecker20
38f3448790 add Procedures and Actions page 2024-10-28 00:57:42 -04:00
Maxwell Becker
ec88a6fa5a 1.16.5 (#159)
* repo pull lock

* implement BatchRunAction

* other batch methods
2024-10-27 20:56:56 -07:00
mbecker20
3820cd0ca2 make Build Organization configurable with custom value 2024-10-27 14:42:06 -04:00
mbecker20
419aa87bbb update resource toml examples to latest standard 2024-10-26 16:22:12 -04:00
Maxwell Becker
7a9ad42203 1.16.4 (#151)
* rust client improvements and docs

* sync rust client

* version 1.16.4

* UI support YAML / TOML utils, typed Deno namespace

* add ResourcesToml to typeshare

* add YAML and TOML convenience

* make the types available globally

* preload container with @std/yaml and @std/toml, clean up genned files

* add deno setup to alpine dockerfile
2024-10-26 12:15:34 -07:00
mbecker20
3f1cfa9064 update docs. Add Variables / Secrets docs 2024-10-25 00:02:12 -04:00
97 changed files with 12532 additions and 1701 deletions

87
Cargo.lock generated
View File

@@ -41,7 +41,7 @@ dependencies = [
[[package]]
name = "alerter"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"axum",
@@ -120,9 +120,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.90"
version = "1.0.91"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95"
checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8"
[[package]]
name = "arc-swap"
@@ -201,9 +201,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "aws-config"
version = "1.5.8"
version = "1.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6"
checksum = "2d6448cfb224dd6a9b9ac734f58622dd0d4751f3589f3b777345745f46b2eb14"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -268,9 +268,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.79.0"
version = "1.83.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95559916ae9d6ef69b104546098a4b4c57082db5d11571917916e9a69234c6ea"
checksum = "59ef9cdd731373735434b79a33fd1049525d78674c49b1e278d56544e388fe01"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -292,9 +292,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sso"
version = "1.46.0"
version = "1.47.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0dc2faec3205d496c7e57eff685dd944203df7ce16a4116d0281c44021788a7b"
checksum = "a8776850becacbd3a82a4737a9375ddb5c6832a51379f24443a98e61513f852c"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -314,9 +314,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ssooidc"
version = "1.47.0"
version = "1.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c93c241f52bc5e0476e259c953234dab7e2a35ee207ee202e86c0095ec4951dc"
checksum = "0007b5b8004547133319b6c4e87193eee2a0bcb3e4c18c75d09febe9dab7b383"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -336,9 +336,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sts"
version = "1.46.0"
version = "1.47.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b259429be94a3459fa1b00c5684faee118d74f9577cc50aebadc36e507c63b5f"
checksum = "9fffaa356e7f1c725908b75136d53207fa714e348f365671df14e95a60530ad3"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -432,9 +432,9 @@ dependencies = [
[[package]]
name = "aws-smithy-runtime"
version = "1.7.2"
version = "1.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a065c0fe6fdbdf9f11817eb68582b2ab4aff9e9c39e986ae48f7ec576c6322db"
checksum = "be28bd063fa91fd871d131fc8b68d7cd4c5fa0869bea68daca50dcb1cbd76be2"
dependencies = [
"aws-smithy-async",
"aws-smithy-http",
@@ -476,9 +476,9 @@ dependencies = [
[[package]]
name = "aws-smithy-types"
version = "1.2.7"
version = "1.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147100a7bea70fa20ef224a6bad700358305f5dc0f84649c53769761395b355b"
checksum = "07c9cdc179e6afbf5d391ab08c85eac817b51c87e1892a5edb5f7bbdc64314b4"
dependencies = [
"base64-simd",
"bytes",
@@ -943,7 +943,7 @@ dependencies = [
[[package]]
name = "command"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"komodo_client",
"run_command",
@@ -1355,7 +1355,7 @@ dependencies = [
[[package]]
name = "environment_file"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"thiserror",
]
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "formatting"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"serror",
]
@@ -1571,7 +1571,7 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "git"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"command",
@@ -2191,7 +2191,7 @@ dependencies = [
[[package]]
name = "komodo_cli"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"clap",
@@ -2207,7 +2207,7 @@ dependencies = [
[[package]]
name = "komodo_client"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2238,7 +2238,7 @@ dependencies = [
[[package]]
name = "komodo_core"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2292,11 +2292,12 @@ dependencies = [
"typeshare",
"urlencoding",
"uuid",
"wildcard",
]
[[package]]
name = "komodo_periphery"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2383,7 +2384,7 @@ dependencies = [
[[package]]
name = "logger"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"komodo_client",
@@ -3089,7 +3090,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "periphery_client"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"komodo_client",
@@ -3307,9 +3308,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@@ -3391,6 +3392,7 @@ dependencies = [
"base64 0.22.1",
"bytes",
"encoding_rs",
"futures-channel",
"futures-core",
"futures-util",
"h2 0.4.6",
@@ -3992,9 +3994,9 @@ dependencies = [
[[package]]
name = "serror"
version = "0.4.6"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8f432d878d404110352cfbaa031d8a6878a166cb7f50e00ab87d0508f8f68a0"
checksum = "715a997753611604c722411afbe11f83a89e00e39323dc9016db96a86cc04fc8"
dependencies = [
"anyhow",
"axum",
@@ -4326,18 +4328,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.64"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84"
checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.64"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3"
checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602"
dependencies = [
"proc-macro2",
"quote",
@@ -4790,9 +4792,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "typeshare"
version = "1.0.3"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f17399b76c2e743d58eac0635d7686e9c00f48cd4776f00695d9882a7d3187"
checksum = "19be0f411120091e76e13e5a0186d8e2bcc3e7e244afdb70152197f1a8486ceb"
dependencies = [
"chrono",
"serde",
@@ -4863,7 +4865,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "update_logger"
version = "1.16.3"
version = "1.16.6"
dependencies = [
"anyhow",
"komodo_client",
@@ -5072,6 +5074,15 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311"
[[package]]
name = "wildcard"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36241ad0795516b55e3b60e55c7f979d4f324e4aaea4c70d56b548b9164ee4d2"
dependencies = [
"thiserror",
]
[[package]]
name = "winapi"
version = "0.3.9"

View File

@@ -9,7 +9,7 @@ members = [
]
[workspace.package]
version = "1.16.3"
version = "1.16.6"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -32,7 +32,7 @@ git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.4.6", default-features = false }
serror = { version = "0.4.7", default-features = false }
slack = { version = "0.2.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
@@ -69,8 +69,8 @@ serde_yaml = "0.9.34"
toml = "0.8.19"
# ERROR
anyhow = "1.0.90"
thiserror = "1.0.64"
anyhow = "1.0.91"
thiserror = "1.0.65"
# LOGGING
opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio"] }
@@ -104,14 +104,15 @@ bollard = "0.17.1"
sysinfo = "0.32.0"
# CLOUD
aws-config = "1.5.8"
aws-sdk-ec2 = "1.77.0"
aws-config = "1.5.9"
aws-sdk-ec2 = "1.83.0"
# MISC
derive_builder = "0.20.2"
typeshare = "1.0.3"
typeshare = "1.0.4"
octorust = "0.7.0"
dashmap = "6.1.0"
wildcard = "0.2.0"
colored = "2.1.0"
regex = "1.11.0"
regex = "1.11.1"
bson = "2.13.0"

View File

@@ -1,13 +1,21 @@
use std::time::Duration;
use colored::Colorize;
use komodo_client::api::execute::Execution;
use komodo_client::{
api::execute::{BatchExecutionResponse, Execution},
entities::update::Update,
};
use crate::{
helpers::wait_for_enter,
state::{cli_args, komodo_client},
};
pub enum ExecutionResult {
Single(Update),
Batch(BatchExecutionResponse),
}
pub async fn run(execution: Execution) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
@@ -24,18 +32,30 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunAction(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunAction(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchRunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -54,15 +74,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchCloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchPullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchBuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -138,9 +170,15 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -159,6 +197,9 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::DestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchDestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -171,144 +212,234 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
info!("Running Execution...");
let res = match execution {
Execution::RunAction(request) => {
komodo_client().execute(request).await
}
Execution::RunProcedure(request) => {
komodo_client().execute(request).await
}
Execution::RunBuild(request) => {
komodo_client().execute(request).await
}
Execution::CancelBuild(request) => {
komodo_client().execute(request).await
}
Execution::Deploy(request) => {
komodo_client().execute(request).await
}
Execution::StartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::RestartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::PauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::StopDeployment(request) => {
komodo_client().execute(request).await
}
Execution::DestroyDeployment(request) => {
komodo_client().execute(request).await
}
Execution::CloneRepo(request) => {
komodo_client().execute(request).await
}
Execution::PullRepo(request) => {
komodo_client().execute(request).await
}
Execution::BuildRepo(request) => {
komodo_client().execute(request).await
}
Execution::CancelRepoBuild(request) => {
komodo_client().execute(request).await
}
Execution::StartContainer(request) => {
komodo_client().execute(request).await
}
Execution::RestartContainer(request) => {
komodo_client().execute(request).await
}
Execution::PauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::StopContainer(request) => {
komodo_client().execute(request).await
}
Execution::DestroyContainer(request) => {
komodo_client().execute(request).await
}
Execution::StartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::RestartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::StopAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PruneContainers(request) => {
komodo_client().execute(request).await
}
Execution::DeleteNetwork(request) => {
komodo_client().execute(request).await
}
Execution::PruneNetworks(request) => {
komodo_client().execute(request).await
}
Execution::DeleteImage(request) => {
komodo_client().execute(request).await
}
Execution::PruneImages(request) => {
komodo_client().execute(request).await
}
Execution::DeleteVolume(request) => {
komodo_client().execute(request).await
}
Execution::PruneVolumes(request) => {
komodo_client().execute(request).await
}
Execution::PruneDockerBuilders(request) => {
komodo_client().execute(request).await
}
Execution::PruneBuildx(request) => {
komodo_client().execute(request).await
}
Execution::PruneSystem(request) => {
komodo_client().execute(request).await
}
Execution::RunSync(request) => {
komodo_client().execute(request).await
}
Execution::CommitSync(request) => {
komodo_client().write(request).await
}
Execution::DeployStack(request) => {
komodo_client().execute(request).await
}
Execution::DeployStackIfChanged(request) => {
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
komodo_client().execute(request).await
}
Execution::RestartStack(request) => {
komodo_client().execute(request).await
}
Execution::PauseStack(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseStack(request) => {
komodo_client().execute(request).await
}
Execution::StopStack(request) => {
komodo_client().execute(request).await
}
Execution::DestroyStack(request) => {
komodo_client().execute(request).await
}
Execution::RunAction(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchRunAction(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::RunProcedure(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchRunProcedure(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::RunBuild(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchRunBuild(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::CancelBuild(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::Deploy(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchDeploy(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::StartDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::RestartDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PauseDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::UnpauseDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StopDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DestroyDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchDestroyDeployment(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::CloneRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchCloneRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::PullRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchPullRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::BuildRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchBuildRepo(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::CancelRepoBuild(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StartContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::RestartContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PauseContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::UnpauseContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StopContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DestroyContainer(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StartAllContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::RestartAllContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PauseAllContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::UnpauseAllContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StopAllContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneContainers(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DeleteNetwork(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneNetworks(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DeleteImage(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneImages(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DeleteVolume(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneVolumes(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneDockerBuilders(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneBuildx(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PruneSystem(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::RunSync(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::CommitSync(request) => komodo_client()
.write(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DeployStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchDeployStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::DeployStackIfChanged(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchDeployStackIfChanged(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::StartStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::RestartStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::PauseStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::UnpauseStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::StopStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::DestroyStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Single(u)),
Execution::BatchDestroyStack(request) => komodo_client()
.execute(request)
.await
.map(|u| ExecutionResult::Batch(u)),
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);
@@ -320,7 +451,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
};
match res {
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
Ok(ExecutionResult::Single(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
}
Ok(ExecutionResult::Batch(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
}
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}

View File

@@ -40,7 +40,9 @@ pub fn komodo_client() -> &'static KomodoClient {
creds
}
};
futures::executor::block_on(KomodoClient::new(url, key, secret))
.expect("failed to initialize Komodo client")
futures::executor::block_on(
KomodoClient::new(url, key, secret).with_healthcheck(),
)
.expect("failed to initialize Komodo client")
})
}

View File

@@ -48,13 +48,14 @@ serde_json.workspace = true
serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
wildcard.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true

View File

@@ -34,6 +34,12 @@ COPY --from=core-builder /builder/target/release/core /app
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Set $DENO_DIR and preload external Deno deps
ENV DENO_DIR=/action-cache/deno
RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
# Hint at the port
EXPOSE 9120

View File

@@ -29,6 +29,12 @@ COPY --from=core-builder /builder/target/release/core /app
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Set $DENO_DIR and preload external Deno deps
ENV DENO_DIR=/action-cache/deno
RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
# Hint at the port
EXPOSE 9120

View File

@@ -1,10 +1,15 @@
use std::collections::HashSet;
use std::{
collections::HashSet,
path::{Path, PathBuf},
str::FromStr,
sync::OnceLock,
};
use anyhow::Context;
use command::run_komodo_command;
use komodo_client::{
api::{
execute::RunAction,
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
@@ -20,6 +25,7 @@ use resolver_api::Resolve;
use tokio::fs;
use crate::{
api::execute::ExecuteRequest,
config::core_config,
helpers::{
interpolate::{
@@ -34,7 +40,26 @@ use crate::{
state::{action_states, db_client, State},
};
impl super::BatchExecute for BatchRunAction {
type Resource = Action;
fn single_request(action: String) -> ExecuteRequest {
ExecuteRequest::RunAction(RunAction { action })
}
}
impl Resolve<BatchRunAction, (User, Update)> for State {
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunAction { pattern }: BatchRunAction,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunAction>(&pattern, &user).await
}
}
impl Resolve<RunAction, (User, Update)> for State {
#[instrument(name = "RunAction", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunAction { action }: RunAction,
@@ -81,26 +106,22 @@ impl Resolve<RunAction, (User, Update)> for State {
.into_iter()
.collect::<Vec<_>>();
let path = core_config()
.action_directory
.join(format!("{}.ts", random_string(10)));
let file = format!("{}.ts", random_string(10));
let path = core_config().action_directory.join(&file);
if let Some(parent) = path.parent() {
let _ = fs::create_dir_all(parent).await;
}
fs::write(&path, contents).await.with_context(|| {
format!("Faild to write action file to {path:?}")
format!("Failed to write action file to {path:?}")
})?;
let mut res = run_komodo_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-read --allow-net --allow-import {}",
path.display()
),
format!("deno run --allow-all {}", path.display()),
false,
)
.await;
@@ -110,11 +131,7 @@ impl Resolve<RunAction, (User, Update)> for State {
res.stderr = svi::replace_in_string(&res.stderr, &replacers)
.replace(&secret, "<ACTION_API_SECRET>");
if let Err(e) = fs::remove_file(path).await {
warn!(
"Failed to delete action file after action execution | {e:#}"
);
}
cleanup_run(file + ".js", &path).await;
if let Err(e) = State
.resolve(DeleteApiKey { key }, action_user().to_owned())
@@ -187,6 +204,22 @@ fn full_contents(contents: &str, key: &str, secret: &str) -> String {
let base_url = format!("{protocol}://localhost:{port}");
format!(
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
import * as __YAML__ from 'jsr:@std/yaml';
import * as __TOML__ from 'jsr:@std/toml';
const YAML = {{
stringify: __YAML__.stringify,
parse: __YAML__.parse,
parseAll: __YAML__.parseAll,
parseDockerCompose: __YAML__.parse,
}}
const TOML = {{
stringify: __TOML__.stringify,
parse: __TOML__.parse,
parseResourceToml: __TOML__.parse,
parseCargoToml: __TOML__.parse,
}}
const komodo = KomodoClient('{base_url}', {{
type: 'api-key',
@@ -207,3 +240,84 @@ main().catch(error => {{
}}).then(() => console.log('🦎 Action completed successfully 🦎'));"
)
}
/// Cleans up file at given path.
/// ALSO if $DENO_DIR is set,
/// will clean up the generated file matching "file"
async fn cleanup_run(file: String, path: &Path) {
if let Err(e) = fs::remove_file(path).await {
warn!(
"Failed to delete action file after action execution | {e:#}"
);
}
// If $DENO_DIR is set (will be in container),
// will clean up the generated file matching "file" (NOT under path)
let Some(deno_dir) = deno_dir() else {
return;
};
delete_file(deno_dir.join("gen/file"), file).await;
}
fn deno_dir() -> Option<&'static Path> {
static DENO_DIR: OnceLock<Option<PathBuf>> = OnceLock::new();
DENO_DIR
.get_or_init(|| {
let deno_dir = std::env::var("DENO_DIR").ok()?;
PathBuf::from_str(&deno_dir).ok()
})
.as_deref()
}
/// file is just the terminating file path,
/// it may be nested multiple folder under path,
/// this will find the nested file and delete it.
/// Assumes the file is only there once.
fn delete_file(
dir: PathBuf,
file: String,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = bool> + Send>>
{
Box::pin(async move {
let Ok(mut dir) = fs::read_dir(dir).await else {
return false;
};
// Collect the nested folders for recursing
// only after checking all the files in directory.
let mut folders = Vec::<PathBuf>::new();
while let Ok(Some(entry)) = dir.next_entry().await {
let Ok(meta) = entry.metadata().await else {
continue;
};
if meta.is_file() {
let Ok(name) = entry.file_name().into_string() else {
continue;
};
if name == file {
if let Err(e) = fs::remove_file(entry.path()).await {
warn!(
"Failed to clean up generated file after action execution | {e:#}"
);
};
return true;
}
} else {
folders.push(entry.path());
}
}
if folders.len() == 1 {
// unwrap ok, folders definitely is not empty
let folder = folders.pop().unwrap();
delete_file(folder, file).await
} else {
// Check folders with file.clone
for folder in folders {
if delete_file(folder, file.clone()).await {
return true;
}
}
false
}
})
}

View File

@@ -4,7 +4,10 @@ use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::{CancelBuild, Deploy, RunBuild},
api::execute::{
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
RunBuild,
},
entities::{
alert::{Alert, AlertData, SeverityLevel},
all_logs_success,
@@ -51,6 +54,24 @@ use crate::{
use super::ExecuteRequest;
impl super::BatchExecute for BatchRunBuild {
type Resource = Build;
fn single_request(build: String) -> ExecuteRequest {
ExecuteRequest::RunBuild(RunBuild { build })
}
}
impl Resolve<BatchRunBuild, (User, Update)> for State {
#[instrument(name = "BatchRunBuild", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunBuild { pattern }: BatchRunBuild,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunBuild>(&pattern, &user).await
}
}
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -438,7 +459,6 @@ async fn handle_early_return(
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {

View File

@@ -37,6 +37,30 @@ use crate::{
state::{action_states, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchDeploy {
type Resource = Deployment;
fn single_request(deployment: String) -> ExecuteRequest {
ExecuteRequest::Deploy(Deploy {
deployment,
stop_signal: None,
stop_time: None,
})
}
}
impl Resolve<BatchDeploy, (User, Update)> for State {
#[instrument(name = "BatchDeploy", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeploy { pattern }: BatchDeploy,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeploy>(&pattern, &user).await
}
}
async fn setup_deployment_execution(
deployment: &str,
user: &User,

View File

@@ -2,13 +2,16 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::by_id::find_one_by_id;
@@ -21,6 +24,7 @@ use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
resource::{list_full_for_user_using_pattern, KomodoResource},
state::{db_client, State},
};
@@ -68,6 +72,7 @@ pub enum ExecuteRequest {
// ==== DEPLOYMENT ====
Deploy(Deploy),
BatchDeploy(BatchDeploy),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
@@ -77,29 +82,38 @@ pub enum ExecuteRequest {
// ==== STACK ====
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// ==== BUILD ====
RunBuild(RunBuild),
BatchRunBuild(BatchRunBuild),
CancelBuild(CancelBuild),
// ==== REPO ====
CloneRepo(CloneRepo),
BatchCloneRepo(BatchCloneRepo),
PullRepo(PullRepo),
BatchPullRepo(BatchPullRepo),
BuildRepo(BuildRepo),
BatchBuildRepo(BatchBuildRepo),
CancelRepoBuild(CancelRepoBuild),
// ==== PROCEDURE ====
RunProcedure(RunProcedure),
BatchRunProcedure(BatchRunProcedure),
// ==== ACTION ====
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
@@ -117,7 +131,25 @@ pub fn router() -> Router {
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> serror::Result<Json<Update>> {
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let res = match inner_handler(request, user).await? {
ExecutionResult::Single(update) => serde_json::to_string(&update)
.context("Failed to serialize Update")?,
ExecutionResult::Batch(res) => res,
};
Ok((TypedHeader(ContentType::json()), res))
}
enum ExecutionResult {
Single(Update),
/// The batch contents will be pre serialized here
Batch(String),
}
async fn inner_handler(
request: ExecuteRequest,
user: User,
) -> anyhow::Result<ExecutionResult> {
let req_id = Uuid::new_v4();
// need to validate no cancel is active before any update is created.
@@ -125,6 +157,17 @@ async fn handler(
let update = init_execution_update(&request, &user).await?;
// This will be the case for the Batch exections,
// they don't have their own updates.
// The batch calls also call "inner_handler" themselves,
// and in their case will spawn tasks, so that isn't necessary
// here either.
if update.operation == Operation::None {
return Ok(ExecutionResult::Batch(
task(req_id, request, user, update).await?,
));
}
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
@@ -160,7 +203,7 @@ async fn handler(
}
});
Ok(Json(update))
Ok(ExecutionResult::Single(update))
}
#[instrument(
@@ -200,3 +243,40 @@ async fn task(
res
}
trait BatchExecute {
type Resource: KomodoResource;
fn single_request(name: String) -> ExecuteRequest;
}
async fn batch_execute<E: BatchExecute>(
pattern: &str,
user: &User,
) -> anyhow::Result<BatchExecutionResponse> {
let resources = list_full_for_user_using_pattern::<E::Resource>(
&pattern,
Default::default(),
&user,
&[],
)
.await?;
let futures = resources.into_iter().map(|resource| {
let user = user.clone();
async move {
inner_handler(E::single_request(resource.name.clone()), user)
.await
.map(|r| {
let ExecutionResult::Single(update) = r else {
unreachable!()
};
update
})
.map_err(|e| BatchExecutionResponseItemErr {
name: resource.name,
error: e.into(),
})
.into()
}
});
Ok(join_all(futures).await)
}

View File

@@ -2,7 +2,9 @@ use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::{
api::execute::RunProcedure,
api::execute::{
BatchExecutionResponse, BatchRunProcedure, RunProcedure,
},
entities::{
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User,
@@ -18,6 +20,26 @@ use crate::{
state::{action_states, db_client, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchRunProcedure {
type Resource = Procedure;
fn single_request(procedure: String) -> ExecuteRequest {
ExecuteRequest::RunProcedure(RunProcedure { procedure })
}
}
impl Resolve<BatchRunProcedure, (User, Update)> for State {
#[instrument(name = "BatchRunProcedure", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchRunProcedure { pattern }: BatchRunProcedure,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchRunProcedure>(&pattern, &user).await
}
}
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -47,6 +47,24 @@ use crate::{
use super::ExecuteRequest;
impl super::BatchExecute for BatchCloneRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchCloneRepo, (User, Update)> for State {
#[instrument(name = "BatchCloneRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchCloneRepo { pattern }: BatchCloneRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchCloneRepo>(&pattern, &user).await
}
}
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -138,6 +156,24 @@ impl Resolve<CloneRepo, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchPullRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchPullRepo, (User, Update)> for State {
#[instrument(name = "BatchPullRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchPullRepo { pattern }: BatchPullRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchPullRepo>(&pattern, &user).await
}
}
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -271,6 +307,24 @@ async fn update_last_pulled_time(repo_name: &str) {
}
}
impl super::BatchExecute for BatchBuildRepo {
type Resource = Repo;
fn single_request(repo: String) -> ExecuteRequest {
ExecuteRequest::CloneRepo(CloneRepo { repo })
}
}
impl Resolve<BatchBuildRepo, (User, Update)> for State {
#[instrument(name = "BatchBuildRepo", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchBuildRepo { pattern }: BatchBuildRepo,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchBuildRepo>(&pattern, &user).await
}
}
impl Resolve<BuildRepo, (User, Update)> for State {
#[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -36,6 +36,29 @@ use crate::{
state::{action_states, db_client, State},
};
use super::ExecuteRequest;
impl super::BatchExecute for BatchDeployStack {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStack(DeployStack {
stack,
stop_time: None,
})
}
}
impl Resolve<BatchDeployStack, (User, Update)> for State {
#[instrument(name = "BatchDeployStack", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeployStack { pattern }: BatchDeployStack,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeployStack>(&pattern, &user).await
}
}
impl Resolve<DeployStack, (User, Update)> for State {
#[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -246,6 +269,28 @@ impl Resolve<DeployStack, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchDeployStackIfChanged {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStackIfChanged(DeployStackIfChanged {
stack,
stop_time: None,
})
}
}
impl Resolve<BatchDeployStackIfChanged, (User, Update)> for State {
#[instrument(name = "BatchDeployStackIfChanged", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDeployStackIfChanged { pattern }: BatchDeployStackIfChanged,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDeployStackIfChanged>(&pattern, &user)
.await
}
}
impl Resolve<DeployStackIfChanged, (User, Update)> for State {
async fn resolve(
&self,
@@ -418,6 +463,28 @@ impl Resolve<StopStack, (User, Update)> for State {
}
}
impl super::BatchExecute for BatchDestroyStack {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DestroyStack(DestroyStack {
stack,
remove_orphans: false,
stop_time: None,
})
}
}
impl Resolve<BatchDestroyStack, (User, Update)> for State {
#[instrument(name = "BatchDestroyStack", skip(self, user), fields(user_id = user.id))]
async fn resolve(
&self,
BatchDestroyStack { pattern }: BatchDestroyStack,
(user, _): (User, Update),
) -> anyhow::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDestroyStack>(&pattern, &user).await
}
}
impl Resolve<DestroyStack, (User, Update)> for State {
#[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -339,6 +339,7 @@ impl Resolve<ListSecrets, User> for State {
ResourceTarget::Server(id) => Some(id),
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => None,
BuilderConfig::Server(config) => Some(config.server_id),
BuilderConfig::Aws(config) => {
secrets.extend(config.secrets);
@@ -387,6 +388,7 @@ impl Resolve<ListGitProvidersFromConfig, User> for State {
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => {}
BuilderConfig::Server(config) => {
merge_git_providers_for_server(
&mut providers,
@@ -485,6 +487,7 @@ impl Resolve<ListDockerRegistriesFromConfig, User> for State {
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Url(_) => {}
BuilderConfig::Server(config) => {
merge_docker_registries_for_server(
&mut registries,

View File

@@ -42,9 +42,32 @@ pub async fn get_builder_periphery(
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
match builder.config {
BuilderConfig::Url(config) => {
if config.address.is_empty() {
return Err(anyhow!("Builder has not yet configured an address"));
}
let periphery = PeripheryClient::new(
config.address,
if config.passkey.is_empty() {
core_config().passkey.clone()
} else {
config.passkey
},
);
periphery
.health_check()
.await
.context("Url Builder failed health check")?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
return Err(anyhow!("Builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;

View File

@@ -4,9 +4,14 @@ use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use komodo_client::{
api::execute::Execution,
api::execute::*,
entities::{
action::Action,
build::Build,
deployment::Deployment,
procedure::Procedure,
repo::Repo,
stack::Stack,
update::{Log, Update},
user::procedure_user,
},
@@ -17,6 +22,7 @@ use tokio::sync::Mutex;
use crate::{
api::execute::ExecuteRequest,
resource::{list_full_for_user_using_pattern, KomodoResource},
state::{db_client, State},
};
@@ -79,11 +85,94 @@ pub async fn execute_procedure(
#[allow(dependency_on_unit_never_type_fallback)]
#[instrument(skip(update))]
async fn execute_stage(
executions: Vec<Execution>,
_executions: Vec<Execution>,
parent_id: &str,
parent_name: &str,
update: &Mutex<Update>,
) -> anyhow::Result<()> {
let mut executions = Vec::with_capacity(_executions.capacity());
for execution in _executions {
match execution {
Execution::BatchRunAction(exec) => {
extend_batch_exection::<BatchRunAction>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchRunProcedure(exec) => {
extend_batch_exection::<BatchRunProcedure>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchRunBuild(exec) => {
extend_batch_exection::<BatchRunBuild>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchCloneRepo(exec) => {
extend_batch_exection::<BatchCloneRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchPullRepo(exec) => {
extend_batch_exection::<BatchPullRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchBuildRepo(exec) => {
extend_batch_exection::<BatchBuildRepo>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeploy(exec) => {
extend_batch_exection::<BatchDeploy>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDestroyDeployment(exec) => {
extend_batch_exection::<BatchDestroyDeployment>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeployStack(exec) => {
extend_batch_exection::<BatchDeployStack>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDeployStackIfChanged(exec) => {
extend_batch_exection::<BatchDeployStackIfChanged>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDestroyStack(exec) => {
extend_batch_exection::<BatchDestroyStack>(
&exec.pattern,
&mut executions,
)
.await?;
}
execution => executions.push(execution),
}
}
let futures = executions.into_iter().map(|execution| async move {
let now = Instant::now();
add_line_to_update(
@@ -146,6 +235,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunProcedure(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunProcedure not implemented correctly"
));
}
Execution::RunAction(req) => {
let req = ExecuteRequest::RunAction(req);
let update = init_execution_update(&req, &user).await?;
@@ -162,6 +257,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunAction(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunAction not implemented correctly"
));
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -178,6 +279,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchRunBuild(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchRunBuild not implemented correctly"
));
}
Execution::CancelBuild(req) => {
let req = ExecuteRequest::CancelBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -210,6 +317,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeploy(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeploy not implemented correctly"
));
}
Execution::StartDeployment(req) => {
let req = ExecuteRequest::StartDeployment(req);
let update = init_execution_update(&req, &user).await?;
@@ -306,6 +419,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDestroyDeployment(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDestroyDeployment not implemented correctly"
));
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -322,6 +441,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchCloneRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchCloneRepo not implemented correctly"
));
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -338,6 +463,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchPullRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchPullRepo not implemented correctly"
));
}
Execution::BuildRepo(req) => {
let req = ExecuteRequest::BuildRepo(req);
let update = init_execution_update(&req, &user).await?;
@@ -354,6 +485,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchBuildRepo(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchBuildRepo not implemented correctly"
));
}
Execution::CancelRepoBuild(req) => {
let req = ExecuteRequest::CancelRepoBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -743,6 +880,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeployStack(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeployStack not implemented correctly"
));
}
Execution::DeployStackIfChanged(req) => {
let req = ExecuteRequest::DeployStackIfChanged(req);
let update = init_execution_update(&req, &user).await?;
@@ -759,6 +902,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDeployStackIfChanged(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDeployStackIfChanged not implemented correctly"
));
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;
@@ -855,6 +1004,12 @@ async fn execute_execution(
)
.await?
}
Execution::BatchDestroyStack(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchDestroyStack not implemented correctly"
));
}
Execution::Sleep(req) => {
let duration = Duration::from_millis(req.duration_ms as u64);
tokio::time::sleep(duration).await;
@@ -912,3 +1067,120 @@ async fn add_line_to_update(update: &Mutex<Update>, line: &str) {
error!("Failed to update an update during procedure | {e:#}");
};
}
async fn extend_batch_exection<E: ExtendBatch>(
pattern: &str,
executions: &mut Vec<Execution>,
) -> anyhow::Result<()> {
let more = list_full_for_user_using_pattern::<E::Resource>(
pattern,
Default::default(),
procedure_user(),
&[],
)
.await?
.into_iter()
.map(|resource| E::single_execution(resource.name));
executions.extend(more);
Ok(())
}
trait ExtendBatch {
type Resource: KomodoResource;
fn single_execution(name: String) -> Execution;
}
impl ExtendBatch for BatchRunProcedure {
type Resource = Procedure;
fn single_execution(procedure: String) -> Execution {
Execution::RunProcedure(RunProcedure { procedure })
}
}
impl ExtendBatch for BatchRunAction {
type Resource = Action;
fn single_execution(action: String) -> Execution {
Execution::RunAction(RunAction { action })
}
}
impl ExtendBatch for BatchRunBuild {
type Resource = Build;
fn single_execution(build: String) -> Execution {
Execution::RunBuild(RunBuild { build })
}
}
impl ExtendBatch for BatchCloneRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::CloneRepo(CloneRepo { repo })
}
}
impl ExtendBatch for BatchPullRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::PullRepo(PullRepo { repo })
}
}
impl ExtendBatch for BatchBuildRepo {
type Resource = Repo;
fn single_execution(repo: String) -> Execution {
Execution::BuildRepo(BuildRepo { repo })
}
}
impl ExtendBatch for BatchDeploy {
type Resource = Deployment;
fn single_execution(deployment: String) -> Execution {
Execution::Deploy(Deploy {
deployment,
stop_signal: None,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDestroyDeployment {
type Resource = Deployment;
fn single_execution(deployment: String) -> Execution {
Execution::DestroyDeployment(DestroyDeployment {
deployment,
signal: None,
time: None,
})
}
}
impl ExtendBatch for BatchDeployStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DeployStack(DeployStack {
stack,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDeployStackIfChanged {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DeployStackIfChanged(DeployStackIfChanged {
stack,
stop_time: None,
})
}
}
impl ExtendBatch for BatchDestroyStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::DestroyStack(DestroyStack {
stack,
remove_orphans: false,
stop_time: None,
})
}
}

View File

@@ -118,7 +118,7 @@ pub fn get_stack_state_from_containers(
if containers.is_empty() {
return StackState::Down;
}
if services.len() != containers.len() {
if services.len() > containers.len() {
return StackState::Unhealthy;
}
let running = containers.iter().all(|container| {

View File

@@ -261,6 +261,9 @@ pub async fn init_execution_update(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::BatchDeploy(_data) => {
return Ok(Default::default())
}
ExecuteRequest::StartDeployment(data) => (
Operation::StartDeployment,
ResourceTarget::Deployment(
@@ -305,6 +308,9 @@ pub async fn init_execution_update(
resource::get::<Build>(&data.build).await?.id,
),
),
ExecuteRequest::BatchRunBuild(_data) => {
return Ok(Default::default())
}
ExecuteRequest::CancelBuild(data) => (
Operation::CancelBuild,
ResourceTarget::Build(
@@ -319,18 +325,27 @@ pub async fn init_execution_update(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchCloneRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::PullRepo(data) => (
Operation::PullRepo,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchPullRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::BuildRepo(data) => (
Operation::BuildRepo,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BatchBuildRepo(_data) => {
return Ok(Default::default())
}
ExecuteRequest::CancelRepoBuild(data) => (
Operation::CancelRepoBuild,
ResourceTarget::Repo(
@@ -345,6 +360,9 @@ pub async fn init_execution_update(
resource::get::<Procedure>(&data.procedure).await?.id,
),
),
ExecuteRequest::BatchRunProcedure(_) => {
return Ok(Default::default())
}
// Action
ExecuteRequest::RunAction(data) => (
@@ -353,6 +371,9 @@ pub async fn init_execution_update(
resource::get::<Action>(&data.action).await?.id,
),
),
ExecuteRequest::BatchRunAction(_) => {
return Ok(Default::default())
}
// Server template
ExecuteRequest::LaunchServer(data) => (
@@ -379,12 +400,18 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDeployStack(_data) => {
return Ok(Default::default())
}
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDeployStackIfChanged(_data) => {
return Ok(Default::default())
}
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
Operation::StartStackService
@@ -441,6 +468,9 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchDestroyStack(_data) => {
return Ok(Default::default())
}
};
let mut update = make_update(target, operation, user);
update.in_progress();

View File

@@ -40,6 +40,9 @@ impl super::KomodoResource for Builder {
builder: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let (builder_type, instance_type) = match builder.config {
BuilderConfig::Url(_) => {
(BuilderConfigVariant::Url.to_string(), None)
}
BuilderConfig::Server(config) => (
BuilderConfigVariant::Server.to_string(),
Some(config.server_id),

View File

@@ -7,7 +7,7 @@ use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::{future::join_all, FutureExt};
use komodo_client::{
api::write::CreateTag,
api::{read::ExportResourcesToToml, write::CreateTag},
entities::{
komodo_timestamp,
permission::PermissionLevel,
@@ -18,6 +18,7 @@ use komodo_client::{
user::{system_user, User},
Operation, ResourceTarget, ResourceTargetVariant,
},
parsers::parse_string_list,
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
@@ -242,6 +243,79 @@ pub async fn get_check_permissions<T: KomodoResource>(
}
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource<T: KomodoResource>(
user: &User,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let resource_type = T::resource_type();
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Add in the resource level global base permission
let resource_base = get::<T>(resource_id).await?.base_permission;
if resource_base > base {
base = resource_base;
}
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
level
}
});
Ok(permission)
}
// ======
// LIST
// ======
@@ -325,79 +399,6 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
Ok(Some(ids.into_iter().collect()))
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource<T: KomodoResource>(
user: &User,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let resource_type = T::resource_type();
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Add in the resource level global base permission
let resource_base = get::<T>(resource_id).await?.base_permission;
if resource_base > base {
base = resource_base;
}
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
level
}
});
Ok(permission)
}
#[instrument(level = "debug")]
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
@@ -410,6 +411,23 @@ pub async fn list_for_user<T: KomodoResource>(
list_for_user_using_document::<T>(filters, user).await
}
#[instrument(level = "debug")]
pub async fn list_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_pattern::<T>(
pattern, query, user, all_tags,
)
.await?
.into_iter()
.map(|resource| T::to_list_item(resource));
Ok(join_all(list).await)
}
#[instrument(level = "debug")]
pub async fn list_for_user_using_document<T: KomodoResource>(
filters: Document,
user: &User,
@@ -421,6 +439,55 @@ pub async fn list_for_user_using_document<T: KomodoResource>(
Ok(join_all(list).await)
}
/// Lists full resource matching wildcard syntax,
/// or regex if wrapped with "\\"
///
/// ## Example
/// ```
/// let items = list_full_for_user_using_match_string::<Build>("foo-*", Default::default(), user, all_tags).await?;
/// let items = list_full_for_user_using_match_string::<Build>("\\^foo-.*$\\", Default::default(), user, all_tags).await?;
/// ```
#[instrument(level = "debug")]
pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
let resources =
list_full_for_user::<T>(query, user, all_tags).await?;
let patterns = parse_string_list(pattern);
let mut names = HashSet::<String>::new();
for pattern in patterns {
if pattern.starts_with('\\') && pattern.ends_with('\\') {
let regex = regex::Regex::new(&pattern[1..(pattern.len() - 1)])
.context("Regex matching string invalid")?;
for resource in &resources {
if regex.is_match(&resource.name) {
names.insert(resource.name.clone());
}
}
} else {
let wildcard = wildcard::Wildcard::new(pattern.as_bytes())
.context("Wildcard matching string invalid")?;
for resource in &resources {
if wildcard.is_match(resource.name.as_bytes()) {
names.insert(resource.name.clone());
}
}
};
}
Ok(
resources
.into_iter()
.filter(|resource| names.contains(resource.name.as_str()))
.collect(),
)
}
#[instrument(level = "debug")]
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
@@ -831,6 +898,16 @@ pub async fn delete<T: KomodoResource>(
}
let target = resource_target::<T>(resource.id.clone());
let toml = State
.resolve(
ExportResourcesToToml {
targets: vec![target.clone()],
..Default::default()
},
user.clone(),
)
.await?
.toml;
let mut update =
make_update(target.clone(), T::delete_operation(), user);
@@ -843,13 +920,14 @@ pub async fn delete<T: KomodoResource>(
delete_one_by_id(T::coll(), &resource.id, None)
.await
.with_context(|| {
format!("failed to delete {} from database", T::resource_type())
format!("Failed to delete {} from database", T::resource_type())
})?;
update.push_simple_log(
&format!("delete {}", T::resource_type()),
format!("deleted {} {}", T::resource_type(), resource.name),
&format!("Delete {}", T::resource_type()),
format!("Deleted {} {}", T::resource_type(), resource.name),
);
update.push_simple_log("Deleted Toml", toml);
if let Err(e) = T::post_delete(&resource, &mut update).await {
update.push_error_log("post delete", format_serror(&e.into()));

View File

@@ -178,6 +178,13 @@ async fn validate_config(
}
params.procedure = procedure.id;
}
Execution::BatchRunProcedure(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::RunAction(params) => {
let action = super::get_check_permissions::<Action>(
&params.action,
@@ -187,6 +194,13 @@ async fn validate_config(
.await?;
params.action = action.id;
}
Execution::BatchRunAction(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::RunBuild(params) => {
let build = super::get_check_permissions::<Build>(
&params.build,
@@ -196,6 +210,13 @@ async fn validate_config(
.await?;
params.build = build.id;
}
Execution::BatchRunBuild(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CancelBuild(params) => {
let build = super::get_check_permissions::<Build>(
&params.build,
@@ -215,6 +236,13 @@ async fn validate_config(
.await?;
params.deployment = deployment.id;
}
Execution::BatchDeploy(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::StartDeployment(params) => {
let deployment =
super::get_check_permissions::<Deployment>(
@@ -275,6 +303,13 @@ async fn validate_config(
.await?;
params.deployment = deployment.id;
}
Execution::BatchDestroyDeployment(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CloneRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -284,6 +319,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchCloneRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::PullRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -293,6 +335,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchPullRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::BuildRepo(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -302,6 +351,13 @@ async fn validate_config(
.await?;
params.repo = repo.id;
}
Execution::BatchBuildRepo(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::CancelRepoBuild(params) => {
let repo = super::get_check_permissions::<Repo>(
&params.repo,
@@ -528,6 +584,13 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDeployStack(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::DeployStackIfChanged(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -537,6 +600,13 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDeployStackIfChanged(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::StartStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -591,6 +661,13 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::BatchDestroyStack(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::Sleep(_) => {}
}
}

View File

@@ -47,6 +47,7 @@ impl super::KomodoResource for Server {
info: ServerListItemInfo {
state: status.map(|s| s.state).unwrap_or_default(),
region: server.config.region,
address: server.config.address,
send_unreachable_alerts: server
.config
.send_unreachable_alerts,

View File

@@ -1,6 +1,9 @@
use anyhow::Context;
use komodo_client::entities::{
stack::{ComposeFile, ComposeService, Stack, StackServiceNames},
stack::{
ComposeFile, ComposeService, ComposeServiceDeploy, Stack,
StackServiceNames,
},
FileContents,
};
@@ -69,16 +72,40 @@ pub fn extract_services_into_res(
let compose = serde_yaml::from_str::<ComposeFile>(compose_contents)
.context("failed to parse service names from compose contents")?;
let services = compose.services.into_iter().map(
|(service_name, ComposeService { container_name, .. })| {
StackServiceNames {
container_name: container_name.unwrap_or_else(|| {
format!("{project_name}-{service_name}")
}),
service_name,
}
let mut services = Vec::with_capacity(compose.services.capacity());
for (
service_name,
ComposeService {
container_name,
deploy,
..
},
);
) in compose.services
{
match deploy {
Some(ComposeServiceDeploy {
replicas: Some(replicas),
}) if replicas > 1 => {
for i in 1..1 + replicas {
services.push(StackServiceNames {
container_name: format!(
"{project_name}-{service_name}-{i}"
),
service_name: format!("{service_name}-{i}"),
});
}
}
_ => {
services.push(StackServiceNames {
container_name: container_name.unwrap_or_else(|| {
format!("{project_name}-{service_name}")
}),
service_name,
});
}
}
}
res.extend(services);

View File

@@ -360,6 +360,7 @@ impl ResourceSyncTrait for Procedure {
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::BatchRunProcedure(_config) => {}
Execution::RunAction(config) => {
config.action = resources
.actions
@@ -367,6 +368,7 @@ impl ResourceSyncTrait for Procedure {
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::BatchRunAction(_config) => {}
Execution::RunBuild(config) => {
config.build = resources
.builds
@@ -374,6 +376,7 @@ impl ResourceSyncTrait for Procedure {
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::BatchRunBuild(_config) => {}
Execution::CancelBuild(config) => {
config.build = resources
.builds
@@ -388,6 +391,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchDeploy(_config) => {}
Execution::StartDeployment(config) => {
config.deployment = resources
.deployments
@@ -430,6 +434,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchDestroyDeployment(_config) => {}
Execution::CloneRepo(config) => {
config.repo = resources
.repos
@@ -437,6 +442,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchCloneRepo(_config) => {}
Execution::PullRepo(config) => {
config.repo = resources
.repos
@@ -444,6 +450,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchPullRepo(_config) => {}
Execution::BuildRepo(config) => {
config.repo = resources
.repos
@@ -451,6 +458,7 @@ impl ResourceSyncTrait for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BatchBuildRepo(_config) => {}
Execution::CancelRepoBuild(config) => {
config.repo = resources
.repos
@@ -626,6 +634,7 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDeployStack(_config) => {}
Execution::DeployStackIfChanged(config) => {
config.stack = resources
.stacks
@@ -633,6 +642,7 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDeployStackIfChanged(_config) => {}
Execution::StartStack(config) => {
config.stack = resources
.stacks
@@ -675,6 +685,7 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::BatchDestroyStack(_config) => {}
Execution::Sleep(_) => {}
}
}

View File

@@ -390,6 +390,7 @@ impl ToToml for Builder {
let empty_params = match resource.config {
PartialBuilderConfig::Aws(config) => config.is_none(),
PartialBuilderConfig::Server(config) => config.is_none(),
PartialBuilderConfig::Url(config) => config.is_none(),
};
if empty_params {
// toml_pretty will remove empty map
@@ -414,6 +415,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunProcedure(_exec) => {}
Execution::RunAction(exec) => exec.action.clone_from(
all
.actions
@@ -421,6 +423,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunAction(_exec) => {}
Execution::RunBuild(exec) => exec.build.clone_from(
all
.builds
@@ -428,6 +431,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchRunBuild(_exec) => {}
Execution::CancelBuild(exec) => exec.build.clone_from(
all
.builds
@@ -442,6 +446,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDeploy(_exec) => {}
Execution::StartDeployment(exec) => {
exec.deployment.clone_from(
all
@@ -496,6 +501,7 @@ impl ToToml for Procedure {
.unwrap_or(&String::new()),
)
}
Execution::BatchDestroyDeployment(_exec) => {}
Execution::CloneRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -503,6 +509,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchCloneRepo(_exec) => {}
Execution::PullRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -510,6 +517,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchPullRepo(_exec) => {}
Execution::BuildRepo(exec) => exec.repo.clone_from(
all
.repos
@@ -517,6 +525,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchBuildRepo(_exec) => {}
Execution::CancelRepoBuild(exec) => exec.repo.clone_from(
all
.repos
@@ -710,6 +719,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDeployStack(_exec) => {}
Execution::DeployStackIfChanged(exec) => {
exec.stack.clone_from(
all
@@ -719,6 +729,7 @@ impl ToToml for Procedure {
.unwrap_or(&String::new()),
)
}
Execution::BatchDeployStackIfChanged(_exec) => {}
Execution::StartStack(exec) => exec.stack.clone_from(
all
.stacks
@@ -761,6 +772,7 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::BatchDestroyStack(_exec) => {}
Execution::Sleep(_) | Execution::None(_) => {}
}
}

View File

@@ -40,7 +40,7 @@ impl DockerClient {
pub async fn list_containers(
&self,
) -> anyhow::Result<Vec<ContainerListItem>> {
self
let containers = self
.docker
.list_containers(Some(ListContainersOptions::<String> {
all: true,
@@ -48,8 +48,8 @@ impl DockerClient {
}))
.await?
.into_iter()
.map(|container| {
Ok(ContainerListItem {
.flat_map(|container| {
anyhow::Ok(ContainerListItem {
server_id: None,
name: container
.names
@@ -75,9 +75,12 @@ impl DockerClient {
networks: container
.network_settings
.and_then(|settings| {
settings
.networks
.map(|networks| networks.into_keys().collect())
settings.networks.map(|networks| {
let mut keys =
networks.into_keys().collect::<Vec<_>>();
keys.sort();
keys
})
})
.unwrap_or_default(),
volumes: container
@@ -92,7 +95,8 @@ impl DockerClient {
labels: container.labels.unwrap_or_default(),
})
})
.collect()
.collect::<Vec<_>>();
Ok(containers)
}
pub async fn inspect_container(
@@ -519,7 +523,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<NetworkListItem>> {
self
let networks = self
.docker
.list_networks::<String>(None)
.await?
@@ -545,7 +549,7 @@ impl DockerClient {
}),
None => false,
};
Ok(NetworkListItem {
NetworkListItem {
name: network.name,
id: network.id,
created: network.created,
@@ -559,9 +563,10 @@ impl DockerClient {
attachable: network.attachable,
ingress: network.ingress,
in_use,
})
}
})
.collect()
.collect();
Ok(networks)
}
pub async fn inspect_network(
@@ -628,7 +633,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<ImageListItem>> {
self
let images = self
.docker
.list_images::<String>(None)
.await?
@@ -641,7 +646,7 @@ impl DockerClient {
.map(|id| id == &image.id)
.unwrap_or_default()
});
Ok(ImageListItem {
ImageListItem {
name: image
.repo_tags
.into_iter()
@@ -652,9 +657,10 @@ impl DockerClient {
created: image.created,
size: image.size,
in_use,
})
}
})
.collect()
.collect();
Ok(images)
}
pub async fn inspect_image(
@@ -761,7 +767,7 @@ impl DockerClient {
&self,
containers: &[ContainerListItem],
) -> anyhow::Result<Vec<VolumeListItem>> {
self
let volumes = self
.docker
.list_volumes::<String>(None)
.await?
@@ -786,7 +792,7 @@ impl DockerClient {
let in_use = containers.iter().any(|container| {
container.volumes.iter().any(|name| &volume.name == name)
});
Ok(VolumeListItem {
VolumeListItem {
name: volume.name,
driver: volume.driver,
mountpoint: volume.mountpoint,
@@ -794,9 +800,10 @@ impl DockerClient {
size: volume.usage_data.map(|data| data.size),
scope,
in_use,
})
}
})
.collect()
.collect();
Ok(volumes)
}
pub async fn inspect_volume(

View File

@@ -11,7 +11,9 @@ repository.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
# default = ["blocking"] # use to dev client blocking mode
mongo = ["dep:mongo_indexed"]
blocking = ["reqwest/blocking"]
[dependencies]
# mogh

View File

@@ -1,4 +1,35 @@
# Komodo
*A system to build and deploy software across many servers*
Docs: [https://docs.rs/komodo_client/latest/komodo_client](https://docs.rs/komodo_client/latest/komodo_client)
Full Docs: [https://docs.rs/komodo_client/latest/komodo_client](https://docs.rs/komodo_client/latest/komodo_client).
This is a client library for the Komodo Core API.
It contains:
- Definitions for the application [api](https://docs.rs/komodo_client/latest/komodo_client/api/index.html)
and [entities](https://docs.rs/komodo_client/latest/komodo_client/entities/index.html).
- A [client](https://docs.rs/komodo_client/latest/komodo_client/struct.KomodoClient.html)
to interact with the Komodo Core API.
- Information on configuring Komodo
[Core](https://docs.rs/komodo_client/latest/komodo_client/entities/config/core/index.html) and
[Periphery](https://docs.rs/komodo_client/latest/komodo_client/entities/config/periphery/index.html).
## Client Configuration
The client includes a convenenience method to parse the Komodo API url and credentials from the environment:
- `KOMODO_ADDRESS`
- `KOMODO_API_KEY`
- `KOMODO_API_SECRET`
## Client Example
```rust
dotenvy::dotenv().ok();
let client = KomodoClient::new_from_env()?;
// Get all the deployments
let deployments = client.read(ListDeployments::default()).await?;
println!("{deployments:#?}");
let update = client.execute(RunBuild { build: "test-build".to_string() }).await?:
```

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Runs the target Action. Response: [Update]
#[typeshare]
@@ -26,3 +26,31 @@ pub struct RunAction {
/// Id or name
pub action: String,
}
/// Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResult]
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunAction {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* actions
/// foo-*
/// # add some more
/// extra-action-1, extra-action-2
/// ```
pub pattern: String,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
//
@@ -36,6 +36,36 @@ pub struct RunBuild {
//
/// Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunBuild {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* builds
/// foo-*
/// # add some more
/// extra-build-1, extra-build-2
/// ```
pub pattern: String,
}
//
/// Cancels the target build.
/// Only does anything if the build is `building` when called.
/// Response: [Update]

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::{update::Update, TerminationSignal};
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Deploys the container for the target deployment. Response: [Update].
///
@@ -41,6 +41,36 @@ pub struct Deploy {
//
/// Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeploy {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* deployments
/// foo-*
/// # add some more
/// extra-deployment-1, extra-deployment-2
/// ```
pub pattern: String,
}
//
/// Starts the container for the target deployment. Response: [Update]
///
/// 1. Runs `docker start ${container_name}`.
@@ -187,3 +217,33 @@ pub struct DestroyDeployment {
/// Override the default termination max time.
pub time: Option<i32>,
}
//
/// Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDestroyDeployment {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* deployments
/// foo-*
/// # add some more
/// extra-deployment-1, extra-deployment-2
/// ```
pub pattern: String,
}

View File

@@ -27,7 +27,7 @@ pub use sync::*;
use crate::{
api::write::CommitSync,
entities::{NoData, I64},
entities::{update::Update, NoData, _Serror, I64},
};
pub trait KomodoExecuteRequest: HasResponse {}
@@ -59,27 +59,35 @@ pub enum Execution {
// ACTION
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// PROCEDURE
RunProcedure(RunProcedure),
BatchRunProcedure(BatchRunProcedure),
// BUILD
RunBuild(RunBuild),
BatchRunBuild(BatchRunBuild),
CancelBuild(CancelBuild),
// DEPLOYMENT
Deploy(Deploy),
BatchDeploy(BatchDeploy),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
UnpauseDeployment(UnpauseDeployment),
StopDeployment(StopDeployment),
DestroyDeployment(DestroyDeployment),
BatchDestroyDeployment(BatchDestroyDeployment),
// REPO
CloneRepo(CloneRepo),
BatchCloneRepo(BatchCloneRepo),
PullRepo(PullRepo),
BatchPullRepo(BatchPullRepo),
BuildRepo(BuildRepo),
BatchBuildRepo(BatchBuildRepo),
CancelRepoBuild(CancelRepoBuild),
// SERVER (Container)
@@ -113,13 +121,16 @@ pub enum Execution {
// STACK
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
StartStack(StartStack),
RestartStack(RestartStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
StopStack(StopStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// SLEEP
Sleep(Sleep),
@@ -131,3 +142,34 @@ pub struct Sleep {
#[serde(default)]
pub duration_ms: I64,
}
#[typeshare]
pub type BatchExecutionResponse = Vec<BatchExecutionResponseItem>;
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "status", content = "data")]
pub enum BatchExecutionResponseItem {
Ok(Update),
Err(BatchExecutionResponseItemErr),
}
impl From<Result<Update, BatchExecutionResponseItemErr>>
for BatchExecutionResponseItem
{
fn from(
value: Result<Update, BatchExecutionResponseItemErr>,
) -> Self {
match value {
Ok(update) => Self::Ok(update),
Err(e) => Self::Err(e),
}
}
}
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BatchExecutionResponseItemErr {
pub name: String,
pub error: _Serror,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Runs the target Procedure. Response: [Update]
#[typeshare]
@@ -26,3 +26,31 @@ pub struct RunProcedure {
/// Id or name
pub procedure: String,
}
/// Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchRunProcedure {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* procedures
/// foo-*
/// # add some more
/// extra-procedure-1, extra-procedure-2
/// ```
pub pattern: String,
}

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
//
@@ -39,6 +39,36 @@ pub struct CloneRepo {
//
/// Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchCloneRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Pulls the target repo. Response: [Update].
///
/// Note. Repo must have server attached at `server_id`.
@@ -65,6 +95,36 @@ pub struct PullRepo {
//
/// Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchPullRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Builds the target repo, using the attached builder. Response: [Update].
///
/// Note. Repo must have builder attached at `builder_id`.
@@ -95,6 +155,36 @@ pub struct BuildRepo {
//
/// Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchBuildRepo {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* repos
/// foo-*
/// # add some more
/// extra-repo-1, extra-repo-2
/// ```
pub pattern: String,
}
//
/// Cancels the target repo build.
/// Only does anything if the repo build is `building` when called.
/// Response: [Update]

View File

@@ -6,7 +6,7 @@ use typeshare::typeshare;
use crate::entities::update::Update;
use super::KomodoExecuteRequest;
use super::{BatchExecutionResponse, KomodoExecuteRequest};
/// Deploys the target stack. `docker compose up`. Response: [Update]
#[typeshare]
@@ -30,6 +30,38 @@ pub struct DeployStack {
pub stop_time: Option<i32>,
}
//
/// Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeployStack {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}
//
/// Checks deployed contents vs latest contents,
/// and only if any changes found
/// will `docker compose up`. Response: [Update]
@@ -56,6 +88,36 @@ pub struct DeployStackIfChanged {
//
/// Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDeployStackIfChanged {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}
//
/// Starts the target stack. `docker compose start`. Response: [Update]
#[typeshare]
#[derive(
@@ -198,3 +260,33 @@ pub struct DestroyStack {
/// Override the default termination max time.
pub stop_time: Option<i32>,
}
//
/// Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult].
#[typeshare]
#[derive(
Serialize,
Deserialize,
Debug,
Clone,
PartialEq,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(BatchExecutionResponse)]
pub struct BatchDestroyStack {
/// Id or name or wildcard pattern or regex.
/// Supports multiline and comma delineated combinations of the above.
///d
/// Example:
/// ```
/// # match all foo-* stacks
/// foo-*
/// # add some more
/// extra-stack-1, extra-stack-2
/// ```
pub pattern: String,
}

View File

@@ -0,0 +1,213 @@
use serde::{de::Visitor, Deserializer};
pub fn maybe_string_i64_deserializer<'de, D>(
deserializer: D,
) -> Result<i64, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(MaybeStringI64Visitor)
}
pub fn option_maybe_string_i64_deserializer<'de, D>(
deserializer: D,
) -> Result<Option<i64>, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(OptionMaybeStringI64Visitor)
}
struct MaybeStringI64Visitor;
impl<'de> Visitor<'de> for MaybeStringI64Visitor {
type Value = i64;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(formatter, "number or string number")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
v.parse::<i64>().map_err(E::custom)
}
fn visit_f32<E>(self, v: f32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i8<E>(self, v: i8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i16<E>(self, v: i16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v)
}
fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u16<E>(self, v: u16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v as i64)
}
}
struct OptionMaybeStringI64Visitor;
impl<'de> Visitor<'de> for OptionMaybeStringI64Visitor {
type Value = Option<i64>;
fn expecting(
&self,
formatter: &mut std::fmt::Formatter,
) -> std::fmt::Result {
write!(formatter, "null or number or string number")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
MaybeStringI64Visitor.visit_str(v).map(Some)
}
fn visit_f32<E>(self, v: f32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i8<E>(self, v: i8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i16<E>(self, v: i16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i32<E>(self, v: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v))
}
fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u16<E>(self, v: u16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Some(v as i64))
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
}

View File

@@ -6,6 +6,7 @@ mod file_contents;
mod labels;
mod string_list;
mod term_signal_labels;
mod maybe_string_i64;
pub use conversion::*;
pub use environment::*;
@@ -13,3 +14,4 @@ pub use file_contents::*;
pub use labels::*;
pub use string_list::*;
pub use term_signal_labels::*;
pub use maybe_string_i64::*;

View File

@@ -48,10 +48,13 @@ pub struct BuilderListItemInfo {
#[serde(tag = "type", content = "params")]
#[allow(clippy::large_enum_variant)]
pub enum BuilderConfig {
/// Use a connected server an image builder.
/// Use a Periphery address as a Builder.
Url(UrlBuilderConfig),
/// Use a connected server as a Builder.
Server(ServerBuilderConfig),
/// Use EC2 instances spawned on demand as an image builder.
/// Use EC2 instances spawned on demand as a Builder.
Aws(AwsBuilderConfig),
}
@@ -76,19 +79,21 @@ impl Default for BuilderConfig {
#[serde(tag = "type", content = "params")]
#[allow(clippy::large_enum_variant)]
pub enum PartialBuilderConfig {
Url(#[serde(default)] _PartialUrlBuilderConfig),
Server(#[serde(default)] _PartialServerBuilderConfig),
Aws(#[serde(default)] _PartialAwsBuilderConfig),
}
impl Default for PartialBuilderConfig {
fn default() -> Self {
Self::Aws(Default::default())
Self::Url(Default::default())
}
}
impl MaybeNone for PartialBuilderConfig {
fn is_none(&self) -> bool {
match self {
PartialBuilderConfig::Url(config) => config.is_none(),
PartialBuilderConfig::Server(config) => config.is_none(),
PartialBuilderConfig::Aws(config) => config.is_none(),
}
@@ -98,6 +103,7 @@ impl MaybeNone for PartialBuilderConfig {
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum BuilderConfigDiff {
Url(UrlBuilderConfigDiff),
Server(ServerBuilderConfigDiff),
Aws(AwsBuilderConfigDiff),
}
@@ -105,6 +111,9 @@ pub enum BuilderConfigDiff {
impl From<BuilderConfigDiff> for PartialBuilderConfig {
fn from(value: BuilderConfigDiff) -> Self {
match value {
BuilderConfigDiff::Url(diff) => {
PartialBuilderConfig::Url(diff.into())
}
BuilderConfigDiff::Server(diff) => {
PartialBuilderConfig::Server(diff.into())
}
@@ -120,6 +129,9 @@ impl Diff for BuilderConfigDiff {
&self,
) -> impl Iterator<Item = partial_derive2::FieldDiff> {
match self {
BuilderConfigDiff::Url(diff) => {
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
}
BuilderConfigDiff::Server(diff) => {
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
}
@@ -138,10 +150,27 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
partial: PartialBuilderConfig,
) -> BuilderConfigDiff {
match self {
BuilderConfig::Url(original) => match partial {
PartialBuilderConfig::Url(partial) => {
BuilderConfigDiff::Url(original.partial_diff(partial))
}
PartialBuilderConfig::Server(partial) => {
let default = ServerBuilderConfig::default();
BuilderConfigDiff::Server(default.partial_diff(partial))
}
PartialBuilderConfig::Aws(partial) => {
let default = AwsBuilderConfig::default();
BuilderConfigDiff::Aws(default.partial_diff(partial))
}
},
BuilderConfig::Server(original) => match partial {
PartialBuilderConfig::Server(partial) => {
BuilderConfigDiff::Server(original.partial_diff(partial))
}
PartialBuilderConfig::Url(partial) => {
let default = UrlBuilderConfig::default();
BuilderConfigDiff::Url(default.partial_diff(partial))
}
PartialBuilderConfig::Aws(partial) => {
let default = AwsBuilderConfig::default();
BuilderConfigDiff::Aws(default.partial_diff(partial))
@@ -151,6 +180,10 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
PartialBuilderConfig::Aws(partial) => {
BuilderConfigDiff::Aws(original.partial_diff(partial))
}
PartialBuilderConfig::Url(partial) => {
let default = UrlBuilderConfig::default();
BuilderConfigDiff::Url(default.partial_diff(partial))
}
PartialBuilderConfig::Server(partial) => {
let default = ServerBuilderConfig::default();
BuilderConfigDiff::Server(default.partial_diff(partial))
@@ -163,6 +196,7 @@ impl PartialDiff<PartialBuilderConfig, BuilderConfigDiff>
impl MaybeNone for BuilderConfigDiff {
fn is_none(&self) -> bool {
match self {
BuilderConfigDiff::Url(config) => config.is_none(),
BuilderConfigDiff::Server(config) => config.is_none(),
BuilderConfigDiff::Aws(config) => config.is_none(),
}
@@ -172,6 +206,9 @@ impl MaybeNone for BuilderConfigDiff {
impl From<PartialBuilderConfig> for BuilderConfig {
fn from(value: PartialBuilderConfig) -> BuilderConfig {
match value {
PartialBuilderConfig::Url(server) => {
BuilderConfig::Url(server.into())
}
PartialBuilderConfig::Server(server) => {
BuilderConfig::Server(server.into())
}
@@ -185,6 +222,9 @@ impl From<PartialBuilderConfig> for BuilderConfig {
impl From<BuilderConfig> for PartialBuilderConfig {
fn from(value: BuilderConfig) -> Self {
match value {
BuilderConfig::Url(config) => {
PartialBuilderConfig::Url(config.into())
}
BuilderConfig::Server(config) => {
PartialBuilderConfig::Server(config.into())
}
@@ -202,6 +242,16 @@ impl MergePartial for BuilderConfig {
partial: PartialBuilderConfig,
) -> BuilderConfig {
match partial {
PartialBuilderConfig::Url(partial) => match self {
BuilderConfig::Url(config) => {
let config = UrlBuilderConfig {
address: partial.address.unwrap_or(config.address),
passkey: partial.passkey.unwrap_or(config.passkey),
};
BuilderConfig::Url(config)
}
_ => BuilderConfig::Url(partial.into()),
},
PartialBuilderConfig::Server(partial) => match self {
BuilderConfig::Server(config) => {
let config = ServerBuilderConfig {
@@ -252,6 +302,42 @@ impl MergePartial for BuilderConfig {
}
}
#[typeshare(serialized_as = "Partial<UrlBuilderConfig>")]
pub type _PartialUrlBuilderConfig = PartialUrlBuilderConfig;
/// Configuration for a Komodo Url Builder.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Builder, Partial)]
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
#[partial(skip_serializing_none, from, diff)]
pub struct UrlBuilderConfig {
/// The address of the Periphery agent
#[serde(default = "default_address")]
pub address: String,
/// A custom passkey to use. Otherwise, use the default passkey.
#[serde(default)]
pub passkey: String,
}
fn default_address() -> String {
String::from("https://periphery:8120")
}
impl Default for UrlBuilderConfig {
fn default() -> Self {
Self {
address: default_address(),
passkey: Default::default(),
}
}
}
impl UrlBuilderConfig {
pub fn builder() -> UrlBuilderConfigBuilder {
UrlBuilderConfigBuilder::default()
}
}
#[typeshare(serialized_as = "Partial<ServerBuilderConfig>")]
pub type _PartialServerBuilderConfig = PartialServerBuilderConfig;
@@ -264,11 +350,17 @@ pub type _PartialServerBuilderConfig = PartialServerBuilderConfig;
#[partial(skip_serializing_none, from, diff)]
pub struct ServerBuilderConfig {
/// The server id of the builder
#[serde(alias = "server")]
#[serde(default, alias = "server")]
#[partial_attr(serde(alias = "server"))]
pub server_id: String,
}
impl ServerBuilderConfig {
pub fn builder() -> ServerBuilderConfigBuilder {
ServerBuilderConfigBuilder::default()
}
}
#[typeshare(serialized_as = "Partial<AwsBuilderConfig>")]
pub type _PartialAwsBuilderConfig = PartialAwsBuilderConfig;

View File

@@ -168,7 +168,7 @@ pub fn get_image_name(
pub fn to_komodo_name(name: &str) -> String {
name
.to_lowercase()
.replace([' ', '.'], "_")
.replace([' ', '.', ',', '\n'], "_")
.trim()
.to_string()
}

View File

@@ -27,6 +27,8 @@ pub struct ServerListItemInfo {
pub state: ServerState,
/// Region of the server.
pub region: String,
/// Address of the server.
pub address: String,
/// Whether server is configured to send unreachable alerts.
pub send_unreachable_alerts: bool,
/// Whether server is configured to send cpu alerts.

View File

@@ -127,7 +127,7 @@ apt upgrade -y
curl -fsSL https://get.docker.com | sh
systemctl enable docker.service
systemctl enable containerd.service
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | HOME=/root python3
systemctl enable periphery.service")
}

View File

@@ -121,7 +121,7 @@ runcmd:
- curl -fsSL https://get.docker.com | sh
- systemctl enable docker.service
- systemctl enable containerd.service
- curl -sSL 'https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py' | python3
- curl -sSL 'https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py' | HOME=/root python3
- systemctl enable periphery.service")
}

View File

@@ -11,6 +11,7 @@ use typeshare::typeshare;
use crate::deserializers::{
env_vars_deserializer, file_contents_deserializer,
option_env_vars_deserializer, option_file_contents_deserializer,
option_maybe_string_i64_deserializer,
option_string_list_deserializer, string_list_deserializer,
};
@@ -563,8 +564,8 @@ impl super::resource::AddFilters for StackQuerySpecifics {
}
}
/// Keeping this minimal for now as its only needed to parse the service names / container names
#[typeshare]
/// Keeping this minimal for now as its only needed to parse the service names / container names,
/// and replica count. Not a typeshared type.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeFile {
/// If not provided, will default to the parent folder holding the compose file.
@@ -573,9 +574,18 @@ pub struct ComposeFile {
pub services: HashMap<String, ComposeService>,
}
#[typeshare]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeService {
pub image: Option<String>,
pub container_name: Option<String>,
pub deploy: Option<ComposeServiceDeploy>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ComposeServiceDeploy {
#[serde(
default,
deserialize_with = "option_maybe_string_i64_deserializer"
)]
pub replicas: Option<i64>,
}

View File

@@ -1,87 +1,89 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use super::{
action::PartialActionConfig, alerter::PartialAlerterConfig,
build::PartialBuildConfig, builder::PartialBuilderConfig,
deployment::PartialDeploymentConfig, permission::PermissionLevel,
procedure::PartialProcedureConfig, repo::PartialRepoConfig,
server::PartialServerConfig,
action::_PartialActionConfig, alerter::_PartialAlerterConfig,
build::_PartialBuildConfig, builder::_PartialBuilderConfig,
deployment::_PartialDeploymentConfig, permission::PermissionLevel,
procedure::_PartialProcedureConfig, repo::_PartialRepoConfig,
server::_PartialServerConfig,
server_template::PartialServerTemplateConfig,
stack::PartialStackConfig, sync::PartialResourceSyncConfig,
stack::_PartialStackConfig, sync::_PartialResourceSyncConfig,
variable::Variable, ResourceTarget, ResourceTargetVariant,
};
/// Specifies resources to sync on Komodo
#[typeshare]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ResourcesToml {
#[serde(
default,
rename = "server",
alias = "server",
skip_serializing_if = "Vec::is_empty"
)]
pub servers: Vec<ResourceToml<PartialServerConfig>>,
pub servers: Vec<ResourceToml<_PartialServerConfig>>,
#[serde(
default,
rename = "deployment",
alias = "deployment",
skip_serializing_if = "Vec::is_empty"
)]
pub deployments: Vec<ResourceToml<PartialDeploymentConfig>>,
pub deployments: Vec<ResourceToml<_PartialDeploymentConfig>>,
#[serde(
default,
rename = "stack",
alias = "stack",
skip_serializing_if = "Vec::is_empty"
)]
pub stacks: Vec<ResourceToml<PartialStackConfig>>,
pub stacks: Vec<ResourceToml<_PartialStackConfig>>,
#[serde(
default,
rename = "build",
alias = "build",
skip_serializing_if = "Vec::is_empty"
)]
pub builds: Vec<ResourceToml<PartialBuildConfig>>,
pub builds: Vec<ResourceToml<_PartialBuildConfig>>,
#[serde(
default,
rename = "repo",
alias = "repo",
skip_serializing_if = "Vec::is_empty"
)]
pub repos: Vec<ResourceToml<PartialRepoConfig>>,
pub repos: Vec<ResourceToml<_PartialRepoConfig>>,
#[serde(
default,
rename = "procedure",
alias = "procedure",
skip_serializing_if = "Vec::is_empty"
)]
pub procedures: Vec<ResourceToml<PartialProcedureConfig>>,
pub procedures: Vec<ResourceToml<_PartialProcedureConfig>>,
#[serde(
default,
rename = "action",
alias = "action",
skip_serializing_if = "Vec::is_empty"
)]
pub actions: Vec<ResourceToml<PartialActionConfig>>,
pub actions: Vec<ResourceToml<_PartialActionConfig>>,
#[serde(
default,
rename = "alerter",
alias = "alerter",
skip_serializing_if = "Vec::is_empty"
)]
pub alerters: Vec<ResourceToml<PartialAlerterConfig>>,
pub alerters: Vec<ResourceToml<_PartialAlerterConfig>>,
#[serde(
default,
rename = "builder",
alias = "builder",
skip_serializing_if = "Vec::is_empty"
)]
pub builders: Vec<ResourceToml<PartialBuilderConfig>>,
pub builders: Vec<ResourceToml<_PartialBuilderConfig>>,
#[serde(
default,
rename = "server_template",
alias = "server_template",
skip_serializing_if = "Vec::is_empty"
)]
pub server_templates:
@@ -89,26 +91,27 @@ pub struct ResourcesToml {
#[serde(
default,
rename = "resource_sync",
alias = "resource_sync",
skip_serializing_if = "Vec::is_empty"
)]
pub resource_syncs: Vec<ResourceToml<PartialResourceSyncConfig>>,
pub resource_syncs: Vec<ResourceToml<_PartialResourceSyncConfig>>,
#[serde(
default,
rename = "user_group",
alias = "user_group",
skip_serializing_if = "Vec::is_empty"
)]
pub user_groups: Vec<UserGroupToml>,
#[serde(
default,
rename = "variable",
alias = "variable",
skip_serializing_if = "Vec::is_empty"
)]
pub variables: Vec<Variable>,
}
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResourceToml<PartialConfig: Default> {
/// The resource name. Required
@@ -146,6 +149,7 @@ fn is_false(b: &bool) -> bool {
!b
}
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UserGroupToml {
/// User group name
@@ -164,6 +168,7 @@ pub struct UserGroupToml {
pub permissions: Vec<PermissionToml>,
}
#[typeshare]
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct PermissionToml {
/// Id can be:

View File

@@ -5,14 +5,14 @@
//! It contains:
//! - Definitions for the application [api] and [entities].
//! - A [client][KomodoClient] to interact with the Komodo Core API.
//! - Information on configuring Komodo [core][entities::config::core] and [periphery][entities::config::periphery].
//! - Information on configuring Komodo [Core][entities::config::core] and [Periphery][entities::config::periphery].
//!
//! ## Client Configuration
//!
//! The client includes a convenenience method to parse the Komodo API url and credentials from the environment:
//! - KOMODO_ADDRESS
//! - KOMODO_API_KEY
//! - KOMODO_API_SECRET
//! - `KOMODO_ADDRESS`
//! - `KOMODO_API_KEY`
//! - `KOMODO_API_SECRET`
//!
//! ## Client Example
//! ```
@@ -28,6 +28,8 @@
//! let update = client.execute(RunBuild { build: "test-build".to_string() }).await?:
//! ```
use std::sync::OnceLock;
use anyhow::Context;
use api::read::GetVersion;
use serde::Deserialize;
@@ -41,51 +43,131 @@ pub mod ws;
mod request;
/// &'static KomodoClient initialized from environment.
pub fn komodo_client() -> &'static KomodoClient {
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
KOMODO_CLIENT.get_or_init(|| {
KomodoClient::new_from_env()
.context("Missing KOMODO_ADDRESS, KOMODO_API_KEY, KOMODO_API_SECRET from env")
.unwrap()
})
}
/// Default environment variables for the [KomodoClient].
#[derive(Deserialize)]
struct KomodoEnv {
/// KOMODO_ADDRESS
komodo_address: String,
/// KOMODO_API_KEY
komodo_api_key: String,
/// KOMODO_API_SECRET
komodo_api_secret: String,
}
/// Client to interface with [Komodo](https://komo.do/docs/api#rust-client)
#[derive(Clone)]
pub struct KomodoClient {
#[cfg(not(feature = "blocking"))]
reqwest: reqwest::Client,
#[cfg(feature = "blocking")]
reqwest: reqwest::blocking::Client,
address: String,
key: String,
secret: String,
}
impl KomodoClient {
#[tracing::instrument(skip_all)]
pub async fn new(
/// Initializes KomodoClient, including a health check.
pub fn new(
address: impl Into<String>,
key: impl Into<String>,
secret: impl Into<String>,
) -> anyhow::Result<KomodoClient> {
let client = KomodoClient {
) -> KomodoClient {
KomodoClient {
reqwest: Default::default(),
address: address.into(),
key: key.into(),
secret: secret.into(),
};
client.read(GetVersion {}).await?;
Ok(client)
}
}
#[tracing::instrument]
pub async fn new_from_env() -> anyhow::Result<KomodoClient> {
/// Initializes KomodoClient from environment: [KomodoEnv]
pub fn new_from_env() -> anyhow::Result<KomodoClient> {
let KomodoEnv {
komodo_address,
komodo_api_key,
komodo_api_secret,
} = envy::from_env()
.context("failed to parse environment for komodo client")?;
KomodoClient::new(
Ok(KomodoClient::new(
komodo_address,
komodo_api_key,
komodo_api_secret,
)
.await
))
}
/// Add a healthcheck in the initialization pipeline:
///
/// ```rust
/// let komodo = KomodoClient::new_from_env()?
/// .with_healthcheck().await?;
/// ```
#[cfg(not(feature = "blocking"))]
pub async fn with_healthcheck(self) -> anyhow::Result<Self> {
self.health_check().await?;
Ok(self)
}
/// Add a healthcheck in the initialization pipeline:
///
/// ```rust
/// let komodo = KomodoClient::new_from_env()?
/// .with_healthcheck().await?;
/// ```
#[cfg(feature = "blocking")]
pub fn with_healthcheck(self) -> anyhow::Result<Self> {
self.health_check()?;
Ok(self)
}
/// Get the Core version.
#[cfg(not(feature = "blocking"))]
pub async fn core_version(&self) -> anyhow::Result<String> {
self.read(GetVersion {}).await.map(|r| r.version)
}
/// Get the Core version.
#[cfg(feature = "blocking")]
pub fn core_version(&self) -> anyhow::Result<String> {
self.read(GetVersion {}).map(|r| r.version)
}
/// Send a health check.
#[cfg(not(feature = "blocking"))]
pub async fn health_check(&self) -> anyhow::Result<()> {
self.read(GetVersion {}).await.map(|_| ())
}
/// Send a health check.
#[cfg(feature = "blocking")]
pub fn health_check(&self) -> anyhow::Result<()> {
self.read(GetVersion {}).map(|_| ())
}
/// Use a custom reqwest client.
#[cfg(not(feature = "blocking"))]
pub fn set_reqwest(mut self, reqwest: reqwest::Client) -> Self {
self.reqwest = reqwest;
self
}
/// Use a custom reqwest client.
#[cfg(feature = "blocking")]
pub fn set_reqwest(
mut self,
reqwest: reqwest::blocking::Client,
) -> Self {
self.reqwest = reqwest;
self
}
}

View File

@@ -14,7 +14,7 @@ use crate::{
};
impl KomodoClient {
#[tracing::instrument(skip(self))]
#[cfg(not(feature = "blocking"))]
pub async fn auth<T: KomodoAuthRequest>(
&self,
request: T,
@@ -30,7 +30,21 @@ impl KomodoClient {
.await
}
#[tracing::instrument(skip(self))]
#[cfg(feature = "blocking")]
pub fn auth<T: KomodoAuthRequest>(
&self,
request: T,
) -> anyhow::Result<T::Response> {
self.post(
"/auth",
json!({
"type": T::req_type(),
"params": request
}),
)
}
#[cfg(not(feature = "blocking"))]
pub async fn user<T: KomodoUserRequest>(
&self,
request: T,
@@ -46,7 +60,21 @@ impl KomodoClient {
.await
}
#[tracing::instrument(skip(self))]
#[cfg(feature = "blocking")]
pub fn user<T: KomodoUserRequest>(
&self,
request: T,
) -> anyhow::Result<T::Response> {
self.post(
"/auth",
json!({
"type": T::req_type(),
"params": request
}),
)
}
#[cfg(not(feature = "blocking"))]
pub async fn read<T: KomodoReadRequest>(
&self,
request: T,
@@ -62,7 +90,21 @@ impl KomodoClient {
.await
}
#[tracing::instrument(skip(self))]
#[cfg(feature = "blocking")]
pub fn read<T: KomodoReadRequest>(
&self,
request: T,
) -> anyhow::Result<T::Response> {
self.post(
"/read",
json!({
"type": T::req_type(),
"params": request
}),
)
}
#[cfg(not(feature = "blocking"))]
pub async fn write<T: KomodoWriteRequest>(
&self,
request: T,
@@ -78,7 +120,21 @@ impl KomodoClient {
.await
}
#[tracing::instrument(skip(self))]
#[cfg(feature = "blocking")]
pub fn write<T: KomodoWriteRequest>(
&self,
request: T,
) -> anyhow::Result<T::Response> {
self.post(
"/write",
json!({
"type": T::req_type(),
"params": request
}),
)
}
#[cfg(not(feature = "blocking"))]
pub async fn execute<T: KomodoExecuteRequest>(
&self,
request: T,
@@ -94,7 +150,21 @@ impl KomodoClient {
.await
}
#[tracing::instrument(skip(self))]
#[cfg(feature = "blocking")]
pub fn execute<T: KomodoExecuteRequest>(
&self,
request: T,
) -> anyhow::Result<T::Response> {
self.post(
"/execute",
json!({
"type": T::req_type(),
"params": request
}),
)
}
#[cfg(not(feature = "blocking"))]
async fn post<
B: Serialize + std::fmt::Debug,
R: DeserializeOwned,
@@ -108,29 +178,48 @@ impl KomodoClient {
.post(format!("{}{endpoint}", self.address))
.header("x-api-key", &self.key)
.header("x-api-secret", &self.secret)
.header("Content-Type", "application/json")
.header("content-type", "application/json")
.json(&body);
let res =
req.send().await.context("failed to reach Komodo API")?;
tracing::debug!("got response");
let status = res.status();
if status == StatusCode::OK {
tracing::debug!("response is OK");
match res.json().await {
Ok(res) => Ok(res),
Err(e) => Err(anyhow!("{status} | {e:#?}")),
Err(e) => Err(anyhow!("{e:#?}").context(status)),
}
} else {
tracing::debug!("response is non-OK");
match res.text().await {
Ok(res) => Err(
deserialize_error(res)
.context(format!("request failed with status {status}")),
),
Err(e) => Err(
anyhow!("{e:?}")
.context(format!("request failed with status {status}")),
),
Ok(res) => Err(deserialize_error(res).context(status)),
Err(e) => Err(anyhow!("{e:?}").context(status)),
}
}
}
#[cfg(feature = "blocking")]
fn post<B: Serialize + std::fmt::Debug, R: DeserializeOwned>(
&self,
endpoint: &str,
body: B,
) -> anyhow::Result<R> {
let req = self
.reqwest
.post(format!("{}{endpoint}", self.address))
.header("x-api-key", &self.key)
.header("x-api-secret", &self.secret)
.header("content-type", "application/json")
.json(&body);
let res = req.send().context("failed to reach Komodo API")?;
let status = res.status();
if status == StatusCode::OK {
match res.json() {
Ok(res) => Ok(res),
Err(e) => Err(anyhow!("{e:#?}").context(status)),
}
} else {
match res.text() {
Ok(res) => Err(deserialize_error(res).context(status)),
Err(e) => Err(anyhow!("{e:?}").context(status)),
}
}
}

View File

@@ -8,7 +8,7 @@ use thiserror::Error;
use tokio::sync::broadcast;
use tokio_tungstenite::{connect_async, tungstenite::Message};
use tokio_util::sync::CancellationToken;
use tracing::{info, info_span, warn, Instrument};
use tracing::{debug, info, info_span, warn, Instrument};
use typeshare::typeshare;
use uuid::Uuid;
@@ -92,7 +92,7 @@ impl KomodoClient {
);
async {
info!("Entering inner (connection) loop | outer uuid {outer_uuid} | master uuid {master_uuid}");
debug!("Entering inner (connection) loop | outer uuid {outer_uuid} | master uuid {master_uuid}");
let mut retry = 0;
loop {
// INNER LOOP (SHORT RECONNECT)
@@ -112,7 +112,7 @@ impl KomodoClient {
);
async {
info!("Connecting to websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
debug!("Connecting to websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
let mut ws =
match connect_async(&address).await.with_context(|| {
@@ -131,7 +131,7 @@ impl KomodoClient {
}
};
info!("Connected to websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
debug!("Connected to websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
// ==================
// SEND LOGIN MSG
@@ -200,7 +200,7 @@ impl KomodoClient {
let _ = tx.send(UpdateWsMessage::Reconnected);
info!("logged into websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
info!("Logged into websocket | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}");
// If we get to this point (connected / logged in) reset the short retry counter
retry = 0;
@@ -217,13 +217,13 @@ impl KomodoClient {
Ok(Some(Message::Text(msg))) => {
match serde_json::from_str::<UpdateListItem>(&msg) {
Ok(msg) => {
tracing::debug!(
debug!(
"got recognized message: {msg:?} | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}"
);
let _ = tx.send(UpdateWsMessage::Update(msg));
}
Err(_) => {
tracing::warn!(
warn!(
"got unrecognized message: {msg:?} | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}"
);
let _ = tx.send(UpdateWsMessage::Error(
@@ -235,7 +235,7 @@ impl KomodoClient {
Ok(Some(Message::Close(_))) => {
let _ = tx.send(UpdateWsMessage::Disconnected);
let _ = ws.close(None).await;
tracing::warn!(
warn!(
"breaking inner loop | got close message | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}"
);
break;
@@ -246,7 +246,7 @@ impl KomodoClient {
));
let _ = tx.send(UpdateWsMessage::Disconnected);
let _ = ws.close(None).await;
tracing::warn!(
warn!(
"breaking inner loop | got error message | {e:#} | inner uuid {inner_uuid} | outer uuid {outer_uuid} | master uuid {master_uuid}"
);
break;

View File

@@ -23,15 +23,11 @@ const komodo = KomodoClient("https://demo.komo.do", {
},
});
const stacks: Types.StackListItem[] = await komodo.read({
type: "ListStacks",
params: {},
});
// Inferred as Types.StackListItem[]
const stacks = await komodo.read("ListStacks", {});
const stack: Types.Stack = await komodo.read({
type: "GetStack",
params: {
stack: stacks[0].name,
}
// Inferred as Types.Stack
const stack = await komodo.read("GetStack", {
stack: stacks[0].name,
});
```

View File

@@ -1,6 +1,6 @@
{
"name": "komodo_client",
"version": "1.16.3",
"version": "1.16.6",
"description": "Komodo client package",
"homepage": "https://komo.do",
"main": "dist/lib.js",

View File

@@ -349,28 +349,36 @@ export type ExecuteResponses = {
// ==== DEPLOYMENT ====
Deploy: Types.Update;
BatchDeploy: Types.BatchExecutionResponse;
StartDeployment: Types.Update;
RestartDeployment: Types.Update;
PauseDeployment: Types.Update;
UnpauseDeployment: Types.Update;
StopDeployment: Types.Update;
DestroyDeployment: Types.Update;
BatchDestroyDeployment: Types.BatchExecutionResponse;
// ==== BUILD ====
RunBuild: Types.Update;
BatchRunBuild: Types.BatchExecutionResponse;
CancelBuild: Types.Update;
// ==== REPO ====
CloneRepo: Types.Update;
BatchCloneRepo: Types.BatchExecutionResponse;
PullRepo: Types.Update;
BatchPullRepo: Types.BatchExecutionResponse;
BuildRepo: Types.Update;
BatchBuildRepo: Types.BatchExecutionResponse;
CancelRepoBuild: Types.Update;
// ==== PROCEDURE ====
RunProcedure: Types.Update;
BatchRunProcedure: Types.BatchExecutionResponse;
// ==== ACTION ====
RunAction: Types.Update;
BatchRunAction: Types.BatchExecutionResponse;
// ==== SERVER TEMPLATE ====
LaunchServer: Types.Update;
@@ -380,13 +388,16 @@ export type ExecuteResponses = {
// ==== STACK ====
DeployStack: Types.Update;
BatchDeployStack: Types.BatchExecutionResponse;
DeployStackIfChanged: Types.Update;
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
StartStack: Types.Update;
RestartStack: Types.Update;
StopStack: Types.Update;
PauseStack: Types.Update;
UnpauseStack: Types.Update;
DestroyStack: Types.Update;
BatchDestroyStack: Types.BatchExecutionResponse;
// ==== STACK Service ====
DeployStackService: Types.Update;

View File

@@ -198,6 +198,12 @@ export interface AlerterQuerySpecifics {
export type AlerterQuery = ResourceQuery<AlerterQuerySpecifics>;
export type BatchExecutionResponseItem =
| { status: "Ok", data: Update }
| { status: "Err", data: BatchExecutionResponseItemErr };
export type BatchExecutionResponse = BatchExecutionResponseItem[];
export interface Version {
major: number;
minor: number;
@@ -389,9 +395,11 @@ export interface BuildQuerySpecifics {
export type BuildQuery = ResourceQuery<BuildQuerySpecifics>;
export type BuilderConfig =
/** Use a connected server an image builder. */
/** Use a Periphery address as a Builder. */
| { type: "Url", params: UrlBuilderConfig }
/** Use a connected server as a Builder. */
| { type: "Server", params: ServerBuilderConfig }
/** Use EC2 instances spawned on demand as an image builder. */
/** Use EC2 instances spawned on demand as a Builder. */
| { type: "Aws", params: AwsBuilderConfig };
export type Builder = Resource<BuilderConfig, undefined>;
@@ -418,19 +426,27 @@ export type Execution =
/** The "null" execution. Does nothing. */
| { type: "None", params: NoData }
| { type: "RunAction", params: RunAction }
| { type: "BatchRunAction", params: BatchRunAction }
| { type: "RunProcedure", params: RunProcedure }
| { type: "BatchRunProcedure", params: BatchRunProcedure }
| { type: "RunBuild", params: RunBuild }
| { type: "BatchRunBuild", params: BatchRunBuild }
| { type: "CancelBuild", params: CancelBuild }
| { type: "Deploy", params: Deploy }
| { type: "BatchDeploy", params: BatchDeploy }
| { type: "StartDeployment", params: StartDeployment }
| { type: "RestartDeployment", params: RestartDeployment }
| { type: "PauseDeployment", params: PauseDeployment }
| { type: "UnpauseDeployment", params: UnpauseDeployment }
| { type: "StopDeployment", params: StopDeployment }
| { type: "DestroyDeployment", params: DestroyDeployment }
| { type: "BatchDestroyDeployment", params: BatchDestroyDeployment }
| { type: "CloneRepo", params: CloneRepo }
| { type: "BatchCloneRepo", params: BatchCloneRepo }
| { type: "PullRepo", params: PullRepo }
| { type: "BatchPullRepo", params: BatchPullRepo }
| { type: "BuildRepo", params: BuildRepo }
| { type: "BatchBuildRepo", params: BatchBuildRepo }
| { type: "CancelRepoBuild", params: CancelRepoBuild }
| { type: "StartContainer", params: StartContainer }
| { type: "RestartContainer", params: RestartContainer }
@@ -456,13 +472,16 @@ export type Execution =
| { type: "RunSync", params: RunSync }
| { type: "CommitSync", params: CommitSync }
| { type: "DeployStack", params: DeployStack }
| { type: "BatchDeployStack", params: BatchDeployStack }
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
| { type: "StartStack", params: StartStack }
| { type: "RestartStack", params: RestartStack }
| { type: "PauseStack", params: PauseStack }
| { type: "UnpauseStack", params: UnpauseStack }
| { type: "StopStack", params: StopStack }
| { type: "DestroyStack", params: DestroyStack }
| { type: "BatchDestroyStack", params: BatchDestroyStack }
| { type: "Sleep", params: Sleep };
/** Allows to enable / disabled procedures in the sequence / parallel vec on the fly */
@@ -3142,6 +3161,8 @@ export interface ServerListItemInfo {
state: ServerState;
/** Region of the server. */
region: string;
/** Address of the server. */
address: string;
/** Whether server is configured to send unreachable alerts. */
send_unreachable_alerts: boolean;
/** Whether server is configured to send cpu alerts. */
@@ -3387,6 +3408,8 @@ export type _PartialStackConfig = Partial<StackConfig>;
export type _PartialTag = Partial<Tag>;
export type _PartialUrlBuilderConfig = Partial<UrlBuilderConfig>;
export interface __Serror {
error: string;
trace: string[];
@@ -3512,6 +3535,198 @@ export interface AwsServerTemplateConfig {
user_data: string;
}
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchBuildRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchCloneRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeploy {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeployStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeployStackIfChanged {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDestroyDeployment {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDestroyStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
* d
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
export interface BatchExecutionResponseItemErr {
name: string;
error: _Serror;
}
/** Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchPullRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResult] */
export interface BatchRunAction {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* actions
* foo-*
* # add some more
* extra-action-1, extra-action-2
* ```
*/
pattern: string;
}
/** Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchRunBuild {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* builds
* foo-*
* # add some more
* extra-build-1, extra-build-2
* ```
*/
pattern: string;
}
/** Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchRunProcedure {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* procedures
* foo-*
* # add some more
* extra-procedure-1, extra-procedure-2
* ```
*/
pattern: string;
}
/**
* Builds the target repo, using the attached builder. Response: [Update].
*
@@ -3605,18 +3820,6 @@ export interface CommitSync {
sync: string;
}
export interface ComposeService {
image?: string;
container_name?: string;
}
/** Keeping this minimal for now as its only needed to parse the service names / container names */
export interface ComposeFile {
/** If not provided, will default to the parent folder holding the compose file. */
name?: string;
services?: Record<string, ComposeService>;
}
export interface Conversion {
/** reference on the server. */
local: string;
@@ -3811,6 +4014,7 @@ export interface CreateBuildWebhook {
/** Partial representation of [BuilderConfig] */
export type PartialBuilderConfig =
| { type: "Url", params: _PartialUrlBuilderConfig }
| { type: "Server", params: _PartialServerBuilderConfig }
| { type: "Aws", params: _PartialAwsBuilderConfig };
@@ -5936,6 +6140,23 @@ export interface PauseStack {
service?: string;
}
export interface PermissionToml {
/**
* Id can be:
* - resource name. `id = "abcd-build"`
* - regex matching resource names. `id = "\^(.+)-build-([0-9]+)$\"`
*/
target: ResourceTarget;
/**
* The permission level:
* - None
* - Read
* - Execute
* - Write
*/
level: PermissionLevel;
}
export enum PortTypeEnum {
EMPTY = "",
TCP = "tcp",
@@ -6219,6 +6440,60 @@ export interface RenameUserGroup {
name: string;
}
export interface ResourceToml<PartialConfig> {
/** The resource name. Required */
name: string;
/** The resource description. Optional. */
description?: string;
/** Tag ids or names. Optional */
tags?: string[];
/**
* Optional. Only relevant for deployments / stacks.
*
* Will ensure deployment / stack is running with the latest configuration.
* Deploy actions to achieve this will be included in the sync.
* Default is false.
*/
deploy?: boolean;
/**
* Optional. Only relevant for deployments / stacks using the 'deploy' sync feature.
*
* Specify other deployments / stacks by name as dependencies.
* The sync will ensure the deployment / stack will only be deployed 'after' its dependencies.
*/
after?: string[];
/** Resource specific configuration. */
config?: PartialConfig;
}
export interface UserGroupToml {
/** User group name */
name: string;
/** Users in the group */
users?: string[];
/** Give the user group elevated permissions on all resources of a certain type */
all?: Record<ResourceTarget["type"], PermissionLevel>;
/** Permissions given to the group */
permissions?: PermissionToml[];
}
/** Specifies resources to sync on Komodo */
export interface ResourcesToml {
servers?: ResourceToml<_PartialServerConfig>[];
deployments?: ResourceToml<_PartialDeploymentConfig>[];
stacks?: ResourceToml<_PartialStackConfig>[];
builds?: ResourceToml<_PartialBuildConfig>[];
repos?: ResourceToml<_PartialRepoConfig>[];
procedures?: ResourceToml<_PartialProcedureConfig>[];
actions?: ResourceToml<_PartialActionConfig>[];
alerters?: ResourceToml<_PartialAlerterConfig>[];
builders?: ResourceToml<_PartialBuilderConfig>[];
server_templates?: ResourceToml<PartialServerTemplateConfig>[];
resource_syncs?: ResourceToml<_PartialResourceSyncConfig>[];
user_groups?: UserGroupToml[];
variables?: Variable[];
}
/** Restarts all containers on the target server. Response: [Update] */
export interface RestartAllContainers {
/** Name or id */
@@ -6381,7 +6656,7 @@ export interface SearchStackServiceLog {
/** Configuration for a Komodo Server Builder. */
export interface ServerBuilderConfig {
/** The server id of the builder */
server_id: string;
server_id?: string;
}
/** The health of a part of the server. */
@@ -6906,6 +7181,14 @@ export interface UpdateVariableValue {
value: string;
}
/** Configuration for a Komodo Url Builder. */
export interface UrlBuilderConfig {
/** The address of the Periphery agent */
address: string;
/** A custom passkey to use. Otherwise, use the default passkey. */
passkey?: string;
}
/** Update file contents in Files on Server or Git Repo mode. Response: [Update]. */
export interface WriteStackFileContents {
/** The name or id of the target Stack. */
@@ -6964,6 +7247,7 @@ export type ExecuteRequest =
| { type: "PruneBuildx", params: PruneBuildx }
| { type: "PruneSystem", params: PruneSystem }
| { type: "Deploy", params: Deploy }
| { type: "BatchDeploy", params: BatchDeploy }
| { type: "StartDeployment", params: StartDeployment }
| { type: "RestartDeployment", params: RestartDeployment }
| { type: "PauseDeployment", params: PauseDeployment }
@@ -6971,21 +7255,30 @@ export type ExecuteRequest =
| { type: "StopDeployment", params: StopDeployment }
| { type: "DestroyDeployment", params: DestroyDeployment }
| { type: "DeployStack", params: DeployStack }
| { type: "BatchDeployStack", params: BatchDeployStack }
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
| { type: "StartStack", params: StartStack }
| { type: "RestartStack", params: RestartStack }
| { type: "StopStack", params: StopStack }
| { type: "PauseStack", params: PauseStack }
| { type: "UnpauseStack", params: UnpauseStack }
| { type: "DestroyStack", params: DestroyStack }
| { type: "BatchDestroyStack", params: BatchDestroyStack }
| { type: "RunBuild", params: RunBuild }
| { type: "BatchRunBuild", params: BatchRunBuild }
| { type: "CancelBuild", params: CancelBuild }
| { type: "CloneRepo", params: CloneRepo }
| { type: "BatchCloneRepo", params: BatchCloneRepo }
| { type: "PullRepo", params: PullRepo }
| { type: "BatchPullRepo", params: BatchPullRepo }
| { type: "BuildRepo", params: BuildRepo }
| { type: "BatchBuildRepo", params: BatchBuildRepo }
| { type: "CancelRepoBuild", params: CancelRepoBuild }
| { type: "RunProcedure", params: RunProcedure }
| { type: "BatchRunProcedure", params: BatchRunProcedure }
| { type: "RunAction", params: RunAction }
| { type: "BatchRunAction", params: BatchRunAction }
| { type: "LaunchServer", params: LaunchServer }
| { type: "RunSync", params: RunSync };

View File

@@ -1,6 +1,50 @@
# API
# API and Clients
Komodo Core exposes an http API to read data, write configuration, and execute actions. The API documentation is generated from the code and is [available here](https://docs.rs/komodo_client/latest/komodo_client/api/index.html).
Komodo Core exposes an RPC-like HTTP API to read data, write configuration, and execute actions.
There are typesafe clients available in
[**Rust**](/docs/api#rust-client) and [**Typescript**](/docs/api#typescript-client).
You can also install the [Komodo CLI](https://crates.io/crates/komodo_cli) to execute actions like RunBuild or DeployStack from the command line.
This can be coupled with scripts in Komodo Repos to achieve unlimited automation.
The full API documentation is [**available here**](https://docs.rs/komodo_client/latest/komodo_client/api/index.html).
## Rust Client
The Rust client is published to crates.io at [komodo_client](https://crates.io/crates/komodo_client).
```rust
let komodo = KomodoClient::new("https://demo.komo.do", "your_key", "your_secret")
.with_healthcheck()
.await?;
let stacks = komodo.read(ListStacks::default()).await?;
let update = komodo
.execute(DeployStack {
stack: stacks[0].name.clone(),
stop_time: None
})
.await?;
```
## Typescript Client
The Typescript client is published to NPM at [komodo_client](https://www.npmjs.com/package/komodo_client).
```ts
import { KomodoClient, Types } from "komodo_client";
const komodo = KomodoClient("https://demo.komo.do", {
type: "api-key",
params: {
api_key: "your_key",
secret: "your secret",
},
});
// Inferred as Types.StackListItem[]
const stacks = await komodo.read("ListStacks", {});
// Inferred as Types.Update
const update = await komodo.execute("DeployStack", {
stack: stacks[0].name,
});
```

View File

@@ -22,7 +22,7 @@ apt upgrade -y
curl -fsSL https://get.docker.com | sh
systemctl enable docker.service
systemctl enable containerd.service
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | python3
curl -sSL https://raw.githubusercontent.com/mbecker20/komodo/main/scripts/setup-periphery.py | HOME=/root python3
systemctl enable periphery.service
```

View File

@@ -1,29 +1,57 @@
# Permissioning Resources
# Permissioning
All Komodo resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions.
## Permission Levels
There are 4 levels of permissions a user can have on a resource:
1. **None**. This is the lowest permission level, and means the user will not have any access to this resource. They will not see it in the GUI, and it will not show up if the user queries the core API directly. All attempts to view or update the resource will be blocked.
2. **Read**. This is the first permission level that grants any access. It will enable the user to see the resource in the GUI, read the configuration, and see any logs. Any attempts to update configuration or trigger any action will be blocked.
3. **Execute**. This level will allow the user to execute actions on the resource, like send a build command or trigger a redeploy. The user will still be blocked from updating configuration on the resource.
4. **Write**. The user has full access to the resource, they can execute any actions, update the configuration, and delete the resource.
Komodo has a granular, layer-based permissioning system to provide non-admin users access only to intended Resources.
## User Groups
In addition to assigning permissions to users directly, admins can create User Groups and **assign permissions to them**, as if they were a user.
Users can then be **added to multiple User Groups** and they **inherit the group's permissions**.
While Komodo can assign permissions to specific users directly, it is recommended to instead **create User Groups and assign permissions to them**, as if they were a user.
Users can then be **added to multiple User Groups** and they **inherit the group's permissions**, similar to linux permissions.
For permissioning at scale, users can define [**User Groups in Resource Syncs**](/docs/sync-resources#user-group).
## Permission Levels
There are 4 permission levels a user / group can be given on a Resource:
1. **None**. The user will not have any access to the resource. The user **will not see it in the GUI, and it will not show up if the user queries the Komodo API directly**. All attempts to view or update the resource will be blocked. This is the default for non-admins, unless using `KOMODO_TRANSPARENT_MODE=true`.
2. **Read**. This is the first permission level that grants any access. It will enable the user to **see the resource in the GUI, read the configuration, and see any logs**. Any attempts to update configuration or trigger any action **will be blocked**. Using `KOMODO_TRANSPARENT_MODE=true` will make this level the base level on all resources, for all users.
3. **Execute**. This level will allow the user to execute actions on the resource, **like send a build command** or **trigger a redeploy**. The user will still be blocked from updating configuration on the resource.
4. **Write**. The user has full access to the resource, **they can execute any actions, update the configuration, and delete the resource**.
## Global permissions
Users or User Groups can be given a base permission level on all Resources of a particular type, such as Stack.
In TOML form, this looks like:
```toml
[[user_group]]
name = "groupo"
users = ["mbecker20", "karamvirsingh98"]
all.Build = "Execute" # <- Group members can run all builds (but not update config),
all.Stack = "Read" # <- And see all Stacks / logs (not deploy / update).
```
A user / group can still be given a greater permission level on select resources:
```toml
permissions = [
{ target.type = "Stack", target.id = "my-stack", level = "Execute" },
# Use regex to match multiple resources, for example give john execute on all of their Stacks
{ target.type = "Stack", target.id = "\\^john-(.+)$\\", level = "Execute" },
]
```
## Administration
Users can be given admin priviledges by accessing the Komodo MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all Komodo resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions.
Users can be given Admin priviledges by a `Super Admin` (only the first user is given this status, set with `super_admin: true` on a User document in database). Super admins will see the "Make Admin" button when on a User page `/users/${user_id}`.
Komodo admins are responsible for managing user accounts as well. When a user logs into Komodo for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API.
These users have unrestricted access to all Komodo Resources. Additionally, these users can update other (non-admin) user's permissions on resources.
Komodo admins are responsible for managing user accounts as well. When a user logs into Komodo for the first time, they will not immediately be granted access (this can changed with `KOMODO_ENABLE_NEW_USERS=true`). An admin must first **enable** the user, which can be done from the `Users` tab on `Settings` page. Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API.
Users also have some configurable global permissions, these are:

View File

@@ -0,0 +1,86 @@
# Procedures and Actions
For orchestrations involving multiple Resources, Komodo offers the `Procedure` and `Action` resource types.
## Procedures
`Procedures` are compositions of many executions, such as `RunBuild` and `DeployStack`.
The executions are grouped into a series of `Stages`, where each `Stage` contains one or more executions
to run **_all at once_**. The Procedure will wait until all of the executions in a `Stage` are complete before moving
on to the next stage. In short, the executions in a `Stage` are run **_in parallel_**, and the stages themselves are
executed **_sequentially_**.
### Batch Executions
Many executions have a `Batch` version you can select, for example [**BatchDeployStackIfChanged**](https://docs.rs/komodo_client/latest/komodo_client/api/execute/struct.BatchDeployStackIfChanged.html). With this, you can match multiple Stacks by name
using [**wildcard syntax**](https://docs.rs/wildcard/latest/wildcard) and [**regex**](https://docs.rs/regex/latest/regex).
### TOML Example
Like all Resources, `Procedures` have a TOML representation, and can be managed in `ResourceSyncs`.
```toml
[[procedure]]
name = "pull-deploy"
description = "Pulls stack-repo, deploys stacks"
[[procedure.config.stage]]
name = "Pull Repo"
executions = [
{ execution.type = "PullRepo", execution.params.pattern = "stack-repo" },
]
[[procedure.config.stage]]
name = "Deploy if changed"
executions = [
# Uses the Batch version, witch matches many stacks by pattern
# This one matches all stacks prefixed with `foo-` (wildcard) and `bar-` (regex).
{ execution.type = "BatchDeployStackIfChanged", execution.params.pattern = "foo-* , \\^bar-.*$\\" },
]
```
## Actions
`Actions` give users the power of Typescript to write calls to the Komodo API.
For example, an `Action` script like this will align the versions and branches of many `Builds`.
```ts
const VERSION = "1.16.5";
const BRANCH = "dev/" + VERSION;
const APPS = ["core", "periphery"];
const ARCHS = ["x86", "aarch64"];
await komodo.write("UpdateVariableValue", {
name: "KOMODO_DEV_VERSION",
value: VERSION,
});
console.log("Updated KOMODO_DEV_VERSION to " + VERSION);
for (const app of APPS) {
for (const arch of ARCHS) {
const name = `komodo-${app}-${arch}-dev`;
await komodo.write("UpdateBuild", {
id: name,
config: {
version: VERSION as any,
branch: BRANCH,
},
});
console.log(
`Updated Build ${name} to version ${VERSION} and branch ${BRANCH}`,
);
}
}
for (const arch of ARCHS) {
const name = `periphery-bin-${arch}-dev`;
await komodo.write("UpdateRepo", {
id: name,
config: {
branch: BRANCH,
},
});
console.log(`Updated Repo ${name} to branch ${BRANCH}`);
}
```

View File

@@ -12,54 +12,62 @@ All resources which depend on git repos / docker registries are able to use thes
## Server
-- Configure the connection to periphery agents.<br></br>
-- Set alerting thresholds.<br></br>
-- Can be attached to **Deployments**, **Stacks**, **Repos**, and **Builders**.
- Configure the connection to periphery agents.
- Set alerting thresholds.
- Can be attached to by **Deployments**, **Stacks**, **Repos**, and **Builders**.
## Deployment
-- Deploy a docker container on the attached Server.<br></br>
-- Manage services at the container level, perform orchestration using **Procedures** and **ResourceSyncs**.
- Deploy a docker container on the attached Server.
- Manage services at the container level, perform orchestration using **Procedures** and **ResourceSyncs**.
## Stack
-- Deploy with docker compose.<br></br>
-- Provide the compose file in UI, or move the files to a git repo and use a webhook for auto redeploy on push.<br></br>
-- Supports composing multiple compose files using `docker compose -f ... -f ...`.<br></br>
-- Pass environment variables usable within the compose file. Interpolate in app-wide variables / secrets.
- Deploy with docker compose.
- Provide the compose file in UI, or move the files to a git repo and use a webhook for auto redeploy on push.
- Supports composing multiple compose files using `docker compose -f ... -f ...`.
- Pass environment variables usable within the compose file. Interpolate in app-wide variables / secrets.
## Repo
-- Put scripts in git repos, and run them on a Server, or using a Builder.<br></br>
-- Can build binaries, perform automation, really whatever you can think of.
- Put scripts in git repos, and run them on a Server, or using a Builder.
- Can build binaries, perform automation, really whatever you can think of.
## Build
-- Build application source into docker images, and push them to the configured registry.<br></br>
-- The source can be any git repo containing a Dockerfile.
- Build application source into docker images, and push them to the configured registry.
- The source can be any git repo containing a Dockerfile.
## Builder
-- Either points to a connected server, or holds configuration to launch a single-use AWS instance to build the image.<br></br>
-- Can be attached to **Builds** and **Repos**.
- Either points to a connected server, or holds configuration to launch a single-use AWS instance to build the image.
- Can be attached to **Builds** and **Repos**.
## Procedure
-- Compose many actions on other resource type, like `RunBuild` or `DeployStack`, and run it on button push (or with a webhook).<br></br>
-- Can run one or more actions in parallel "stages", and compose a series of parallel stages to run sequentially.
- Compose many actions on other resource type, like `RunBuild` or `DeployStack`, and run it on button push (or with a webhook).
- Can run one or more actions in parallel "stages", and compose a series of parallel stages to run sequentially.
## Action
- Write scripts calling the Komodo API in Typescript
- Use a pre-initialized Komodo client within the script, no api keys necessary.
- Type aware in UI editor. Get suggestions and see in depth docs as you type.
- The Typescript client is also [published on NPM](https://www.npmjs.com/package/komodo_client).
## ResourceSync
-- Orchestrate all your configuration declaratively by defining it in `toml` files, which are checked into a git repo.<br></br>
-- Can deploy **Deployments** and **Stacks** if changes are suggested.<br></br>
-- Specify deploy ordering with `after` array. (like docker compose `depends_on` but can span across servers.).
- Orchestrate all your configuration declaratively by defining it in `toml` files, which are checked into a git repo.
- Can deploy **Deployments** and **Stacks** if changes are suggested.
- Specify deploy ordering with `after` array. (like docker compose `depends_on` but can span across servers.).
## Alerter
-- Route alerts to various endpoints.<br></br>
-- Can configure rules on each Alerter, such as resource whitelist, blacklist, or alert type filter.
- Route alerts to various endpoints.
- Can configure rules on each Alerter, such as resource whitelist, blacklist, or alert type filter.
## ServerTemplate
-- Easily expand your cloud network by storing cloud server lauch templates on various providers.<br></br>
-- Auto connect the server to Komodo on launch, using `User Data` launch scripts.
- Easily expand your cloud network by storing cloud server lauch templates on various providers.
- Auto connect the server to Komodo on launch, using `User Data` launch scripts.
- Currently supports **AWS EC2** and **Hetzner**

View File

@@ -23,9 +23,10 @@ automatically execute syncs upon pushes to the configured branch.
name = "server-prod"
description = "the prod server"
tags = ["prod"]
config.address = "http://localhost:8120"
config.region = "AshburnDc1"
config.enabled = true # default: false
[server.config]
address = "http://localhost:8120"
region = "AshburnDc1"
enabled = true # default: false
```
### Builder and build
@@ -38,14 +39,15 @@ config.enabled = true # default: false
name = "builder-01"
tags = []
config.type = "Aws"
config.params.region = "us-east-2"
config.params.ami_id = "ami-0e9bd154667944680"
[builder.config.params]
region = "us-east-2"
ami_id = "ami-0e9bd154667944680"
# These things come from your specific setup
config.params.subnet_id = "subnet-xxxxxxxxxxxxxxxxxx"
config.params.key_pair_name = "xxxxxxxx"
config.params.assign_public_ip = true
config.params.use_public_ip = true
config.params.security_group_ids = [
subnet_id = "subnet-xxxxxxxxxxxxxxxxxx"
key_pair_name = "xxxxxxxx"
assign_public_ip = true
use_public_ip = true
security_group_ids = [
"sg-xxxxxxxxxxxxxxxxxx",
"sg-xxxxxxxxxxxxxxxxxx"
]
@@ -56,19 +58,21 @@ config.params.security_group_ids = [
name = "test_logger"
description = "Logs randomly at INFO, WARN, ERROR levels to test logging setups"
tags = ["test"]
config.builder_id = "builder-01"
config.repo = "mbecker20/test_logger"
config.branch = "master"
config.git_account = "mbecker20"
config.image_registry.type = "Standard"
config.image_registry.params.domain = "github.com" # or your custom domain
config.image_registry.params.account = "your_username"
config.image_registry.params.organization = "your_organization" # optinoal
[build.config]
builder_id = "builder-01"
repo = "mbecker20/test_logger"
branch = "master"
git_account = "mbecker20"
image_registry.type = "Standard"
image_registry.params.domain = "github.com" # or your custom domain
image_registry.params.account = "your_username"
image_registry.params.organization = "your_organization" # optinoal
# Set docker labels
config.labels = """
labels = """
org.opencontainers.image.source = https://github.com/mbecker20/test_logger
org.opencontainers.image.description = Logs randomly at INFO, WARN, ERROR levels to test logging setups
org.opencontainers.image.licenses = GPL-3.0"""
org.opencontainers.image.licenses = GPL-3.0
"""
```
### Deployments
@@ -76,7 +80,8 @@ org.opencontainers.image.licenses = GPL-3.0"""
- [Deployment config schema](https://docs.rs/komodo_client/latest/komodo_client/entities/deployment/struct.DeploymentConfig.html)
```toml
[[variable]] # Declare variables
# Declare variables
[[variable]]
name = "OTLP_ENDPOINT"
value = "http://localhost:4317"
@@ -91,20 +96,26 @@ tags = ["test"]
# - has relevant config updates.
# - the attached build has new version.
deploy = true
config.server_id = "server-01"
config.image.type = "Build"
config.image.params.build = "test_logger"
[deployment.config]
server_id = "server-01"
image.type = "Build"
image.params.build = "test_logger"
# set the volumes / bind mounts
config.volumes = """
volumes = """
# Supports comments
/data/logs = /etc/logs
/data/config = /etc/config"""
# And other formats (eg yaml list)
- "/data/config:/etc/config"
"""
# Set the environment variables
config.environment = """
OTLP_ENDPOINT = [[OTLP_ENDPOINT]] # interpolate variables into the envs. (they also support comments using '#')
environment = """
# Comments supported
OTLP_ENDPOINT = [[OTLP_ENDPOINT]] # interpolate variables into the envs.
VARIABLE_1 = value_1
VARIABLE_2 = value_2"""
VARIABLE_2 = value_2
"""
# Set Docker labels
config.labels = "deployment.type = logger"
labels = "deployment.type = logger"
##
@@ -116,17 +127,19 @@ deploy = true
# Create a dependency on test-logger-01. This deployment will only be deployed after test-logger-01 is deployed.
# Additionally, any sync deploy of test-logger-01 will also trigger sync deploy of this deployment.
after = ["test-logger-01"]
config.server_id = "server-01"
config.image.type = "Build"
config.image.params.build = "test_logger"
config.volumes = """
[deployment.config]
server_id = "server-01"
image.type = "Build"
image.params.build = "test_logger"
volumes = """
/data/logs = /etc/logs
/data/config = /etc/config"""
config.environment = """
environment = """
VARIABLE_1 = value_1
VARIABLE_2 = value_2"""
VARIABLE_2 = value_2
"""
# Set Docker labels
config.labels = "deployment.type = logger"
labels = "deployment.type = logger"
```
### Stack
@@ -140,11 +153,12 @@ description = "stack test"
deploy = true
after = ["test-logger-01"] # Stacks can depend on deployments, and vice versa.
tags = ["test"]
config.server_id = "server-prod"
config.file_paths = ["mongo.yaml", "redis.yaml"]
config.git_provider = "git.mogh.tech"
config.git_account = "mbecker20" # clone private repo by specifying account
config.repo = "mbecker20/stack_test"
[stack.config]
server_id = "server-prod"
file_paths = ["mongo.yaml", "redis.yaml"]
git_provider = "git.mogh.tech"
git_account = "mbecker20" # clone private repo by specifying account
repo = "mbecker20/stack_test"
```
### Procedure
@@ -157,28 +171,28 @@ name = "test-procedure"
description = "Do some things in a specific order"
tags = ["test"]
# Each stage will be executed one after the other (in sequence)
[[procedure.config.stage]]
name = "Build stuff"
enabled = true
# The executions within a stage will be run in parallel. The stage completes when all executions finish.
executions = [
{ execution.type = "RunBuild", execution.params.build = "test_logger", enabled = true },
{ execution.type = "PullRepo", execution.params.repo = "komodo-periphery", enabled = true },
{ execution.type = "RunBuild", execution.params.build = "test_logger" },
# Uses the Batch version, witch matches many builds by pattern
# This one matches all builds prefixed with `foo-` (wildcard) and `bar-` (regex).
{ execution.type = "BatchRunBuild", execution.params.pattern = "foo-* , \\^bar-.*$\\" },
{ execution.type = "PullRepo", execution.params.repo = "komodo-periphery" },
]
[[procedure.config.stage]]
name = "Deploy test logger 1"
enabled = true
executions = [
{ execution.type = "Deploy", execution.params.deployment = "test-logger-01", enabled = true }
{ execution.type = "Deploy", execution.params.deployment = "test-logger-01" },
{ execution.type = "Deploy", execution.params.deployment = "test-logger-03", enabled = false },
]
[[procedure.config.stage]]
name = "Deploy test logger 2"
enabled = true
enabled = false
executions = [
{ execution.type = "Deploy", execution.params.deployment = "test-logger-02", enabled = true }
{ execution.type = "Deploy", execution.params.deployment = "test-logger-02" }
]
```
@@ -191,15 +205,19 @@ executions = [
name = "komodo-periphery"
description = "Builds new versions of the periphery binary. Requires Rust installed on the host."
tags = ["komodo"]
config.server_id = "server-01"
config.git_provider = "git.mogh.tech" # use an alternate git provider (default is github.com)
config.git_account = "mbecker20"
config.repo = "mbecker20/komodo"
[repo.config]
server_id = "server-01"
git_provider = "git.mogh.tech" # use an alternate git provider (default is github.com)
git_account = "mbecker20"
repo = "mbecker20/komodo"
# Run an action after the repo is pulled
config.on_pull.path = "."
config.on_pull.command = """
/root/.cargo/bin/cargo build -p komodo_periphery --release && \
cp ./target/release/periphery /root/periphery"""
on_pull.path = "."
on_pull.command = """
# Supports comments
/root/.cargo/bin/cargo build -p komodo_periphery --release
# Multiple lines will be combined together using '&&'
cp ./target/release/periphery /root/periphery
"""
```
### User Group:

45
docsite/docs/variables.md Normal file
View File

@@ -0,0 +1,45 @@
# Variables and Secrets
A variable / secret in Komodo is just a key-value pair.
```
KEY_1 = "value_1"
```
You can interpolate the value into any Environment (and most other user configurable inputs, such as Repo `On Clone` and `On Pull`, or Stack `Extra Args`) using double brackets around the key to trigger interpolation:
```toml
# Before interpolation
SOME_ENV_VAR = [[KEY_1]] # <- wrap the key in double brackets '[[]]'
# After iterpolation:
SOME_ENV_VAR = value_1
```
## Defining Variables and Secrets
- **In the UI**, you can go to `Settings` page, `Variables` tab. Here, you can create some Variables to store in the Komodo database.
- There is a "secret" option you can check, this will **prevent the value from exposure in any updates / logs**, as well as prevent access to the value to any **non-admin** Komodo users.
- Variables can also be managed in ResourceSyncs (see [example](/docs/sync-resources#deployments)) but should only be done for non-secret variables, to avoid committing sensitive data. You should manage secrets using one of the following options.
- **Mount a config file to Core**: https://komo.do/docs/setup/advanced#mount-a-config-file
- In the Komodo Core config file, you can configure `secrets` using a block like:
```toml
# in core.config.toml
[secrets]
KEY_1 = "value_1"
KEY_2 = "value_2"
```
- `KEY_1` and `KEY_2` will be available for interpolation on all your resources, as if they were Variables set up in the UI.
- They keys are queryable and show up on the variable page (so you know they are available for use),
but **the values are not exposed by API for ANY user**.
- **Mount a config file to Periphery agent**:
- In the Komodo Periphery config file, you can also configure `secrets` using the same syntax as the Core config file.
- The variable **WILL NOT be available globally to all Komodo resources**, it will only be available to the resources on the associated Server resource on which that single Periphery agent is running.
- This effectively distributes your secret locations, can be good or bad depending on your security requirements. It does avoid the need to send the secret over network from Core to Periphery, Periphery based secrets are never exposed to the network.
- **Use a dedicated secret management tool** such as Hashicorp Vault, alongside Komodo
- Ultimately Komodo variable / secret features **may not fill enterprise level secret management requirements**, organizations of this level should use still a dedicated secret management solution. At this point Komodo is not intended as an enterprise level secret management solution.
- These solutions do require application level integrations, your applications should only receive credentials to access the secret management API. **Your applications will pull the actual secret values from the dedicated secret management tool, they stay out of Komodo entirely**.

View File

@@ -1,15 +1,37 @@
# Configuring Webhooks
Multiple Komodo resources can take advantage of webhooks from your git provider. Komodo supports incoming webhooks using the Github standard, which is also supported by other providers like Gitea.
Multiple Komodo resources can take advantage of webhooks from your git provider. Komodo supports incoming webhooks using either the Github or Gitlab webhook authentication type, which is also supported by other providers like Gitea.
:::note
On Gitea, the default "Gitea" webhook type works with the Github standard 👍
On Gitea, the default "Gitea" webhook type works with the Github authentication type 👍
:::
## Copy the Resource Payload URL
## Copy the Webhook URL
Find the resource in UI, like a `Build`, `Repo`, or `Stack`.
Scroll down to the bottom of Configuration area, and copy the webhook for the action you want.
Go to the `Config` section, find "Webhooks", and copy the webhook for the action you want.
The webhook URL is constructed as follows:
```shell
https://${HOST}/listener/${AUTH_TYPE}/${RESOURCE_TYPE}/${ID_OR_NAME}/${EXECUTION}
```
- **`HOST`**: Your Komodo endpoint to recieve webhooks.
- If your Komodo sits in a private network,
you will need a public proxy setup to forward `/listener` requests to Komodo.
- **`AUTH_TYPE`**:
- options: `github` | `gitlab`
- `github`: Validates the signature attached with `X-Hub-Signature-256`. [reference](https://docs.github.com/en/webhooks/using-webhooks/validating-webhook-deliveries)
- `gitlab`: Checks that the secret attached to `X-Gitlab-Token` is valid. [reference](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#create-a-webhook)
- **`RESOURCE_TYPE`**:
- options: `build` | `repo` | `stack` | `sync` | `procedure` | `action`
- **`ID_OR_NAME`**:
- Reference the specific resource by id or name. If the name may change, it is better to use id.
- **`EXECUTION`**:
- Which executions are available depends on the `RESOURCE_TYPE`. Builds only have the `/build` action.
Repos can select between `/pull`, `/clone`, or `/build`. Stacks have `/deploy` and `/refresh`, and Resource Syncs have `/sync` and `/refresh`.
- For **Procedures and Actions**, this will be the **branch to listen to for pushes**, or `__ALL__` to trigger
on pushes to any branch.
## Create the webhook on the Git Provider
@@ -32,17 +54,4 @@ etc. only cares about a specific branch of the repo.
Because of this, the webhook will trigger the action **only on pushes to the branch configured on the resource**.
For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch.
## Procedure webhooks
Not all actions support webhooks directly, however for those that don't, they can still be triggered via webhook by using a Procedure. Just create a Procedure and configure it to run the action you are looking for, and create a webhook pointing to the Procedure.
Since Procedures don't specificy a particular branch it should listen for pushes on, this information
must be put in the webhook payload url. Procedures use webhook payload urls of the form:
```
<KOMODO_HOST>/listener/github/procedure/<PROCEDURE_ID>/<LISTEN_BRANCH>
```
If the `<LISTEN_BRANCH>` is not provided, it will default to listening on the `main` branch.
For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch.

View File

@@ -58,9 +58,11 @@ const sidebars: SidebarsConfig = {
],
},
"docker-compose",
"variables",
"procedures",
"permissioning",
"sync-resources",
"webhooks",
"permissioning",
"version-upgrades",
"api",
"development"

View File

@@ -17,7 +17,8 @@ async fn app() -> anyhow::Result<()> {
info!("v {}", env!("CARGO_PKG_VERSION"));
let komodo = KomodoClient::new_from_env().await?;
let komodo =
KomodoClient::new_from_env()?.with_healthcheck().await?;
let (mut rx, _) = komodo.subscribe_to_updates(1000, 5)?;

View File

@@ -262,30 +262,41 @@ export type ExecuteResponses = {
PruneBuildx: Types.Update;
PruneSystem: Types.Update;
Deploy: Types.Update;
BatchDeploy: Types.BatchExecutionResponse;
StartDeployment: Types.Update;
RestartDeployment: Types.Update;
PauseDeployment: Types.Update;
UnpauseDeployment: Types.Update;
StopDeployment: Types.Update;
DestroyDeployment: Types.Update;
BatchDestroyDeployment: Types.BatchExecutionResponse;
RunBuild: Types.Update;
BatchRunBuild: Types.BatchExecutionResponse;
CancelBuild: Types.Update;
CloneRepo: Types.Update;
BatchCloneRepo: Types.BatchExecutionResponse;
PullRepo: Types.Update;
BatchPullRepo: Types.BatchExecutionResponse;
BuildRepo: Types.Update;
BatchBuildRepo: Types.BatchExecutionResponse;
CancelRepoBuild: Types.Update;
RunProcedure: Types.Update;
BatchRunProcedure: Types.BatchExecutionResponse;
RunAction: Types.Update;
BatchRunAction: Types.BatchExecutionResponse;
LaunchServer: Types.Update;
RunSync: Types.Update;
DeployStack: Types.Update;
BatchDeployStack: Types.BatchExecutionResponse;
DeployStackIfChanged: Types.Update;
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
StartStack: Types.Update;
RestartStack: Types.Update;
StopStack: Types.Update;
PauseStack: Types.Update;
UnpauseStack: Types.Update;
DestroyStack: Types.Update;
BatchDestroyStack: Types.BatchExecutionResponse;
DeployStackService: Types.Update;
StartStackService: Types.Update;
RestartStackService: Types.Update;

View File

@@ -203,6 +203,14 @@ export interface AlerterQuerySpecifics {
types: AlerterEndpoint["type"][];
}
export type AlerterQuery = ResourceQuery<AlerterQuerySpecifics>;
export type BatchExecutionResponseItem = {
status: "Ok";
data: Update;
} | {
status: "Err";
data: BatchExecutionResponseItemErr;
};
export type BatchExecutionResponse = BatchExecutionResponseItem[];
export interface Version {
major: number;
minor: number;
@@ -383,12 +391,17 @@ export interface BuildQuerySpecifics {
}
export type BuildQuery = ResourceQuery<BuildQuerySpecifics>;
export type BuilderConfig =
/** Use a connected server an image builder. */
/** Use a Periphery address as a Builder. */
{
type: "Url";
params: UrlBuilderConfig;
}
/** Use a connected server as a Builder. */
| {
type: "Server";
params: ServerBuilderConfig;
}
/** Use EC2 instances spawned on demand as an image builder. */
/** Use EC2 instances spawned on demand as a Builder. */
| {
type: "Aws";
params: AwsBuilderConfig;
@@ -416,18 +429,30 @@ export type Execution =
} | {
type: "RunAction";
params: RunAction;
} | {
type: "BatchRunAction";
params: BatchRunAction;
} | {
type: "RunProcedure";
params: RunProcedure;
} | {
type: "BatchRunProcedure";
params: BatchRunProcedure;
} | {
type: "RunBuild";
params: RunBuild;
} | {
type: "BatchRunBuild";
params: BatchRunBuild;
} | {
type: "CancelBuild";
params: CancelBuild;
} | {
type: "Deploy";
params: Deploy;
} | {
type: "BatchDeploy";
params: BatchDeploy;
} | {
type: "StartDeployment";
params: StartDeployment;
@@ -446,15 +471,27 @@ export type Execution =
} | {
type: "DestroyDeployment";
params: DestroyDeployment;
} | {
type: "BatchDestroyDeployment";
params: BatchDestroyDeployment;
} | {
type: "CloneRepo";
params: CloneRepo;
} | {
type: "BatchCloneRepo";
params: BatchCloneRepo;
} | {
type: "PullRepo";
params: PullRepo;
} | {
type: "BatchPullRepo";
params: BatchPullRepo;
} | {
type: "BuildRepo";
params: BuildRepo;
} | {
type: "BatchBuildRepo";
params: BatchBuildRepo;
} | {
type: "CancelRepoBuild";
params: CancelRepoBuild;
@@ -530,9 +567,15 @@ export type Execution =
} | {
type: "DeployStack";
params: DeployStack;
} | {
type: "BatchDeployStack";
params: BatchDeployStack;
} | {
type: "DeployStackIfChanged";
params: DeployStackIfChanged;
} | {
type: "BatchDeployStackIfChanged";
params: BatchDeployStackIfChanged;
} | {
type: "StartStack";
params: StartStack;
@@ -551,6 +594,9 @@ export type Execution =
} | {
type: "DestroyStack";
params: DestroyStack;
} | {
type: "BatchDestroyStack";
params: BatchDestroyStack;
} | {
type: "Sleep";
params: Sleep;
@@ -3049,6 +3095,8 @@ export interface ServerListItemInfo {
state: ServerState;
/** Region of the server. */
region: string;
/** Address of the server. */
address: string;
/** Whether server is configured to send unreachable alerts. */
send_unreachable_alerts: boolean;
/** Whether server is configured to send cpu alerts. */
@@ -3227,6 +3275,7 @@ export type _PartialServerBuilderConfig = Partial<ServerBuilderConfig>;
export type _PartialServerConfig = Partial<ServerConfig>;
export type _PartialStackConfig = Partial<StackConfig>;
export type _PartialTag = Partial<Tag>;
export type _PartialUrlBuilderConfig = Partial<UrlBuilderConfig>;
export interface __Serror {
error: string;
trace: string[];
@@ -3345,6 +3394,186 @@ export interface AwsServerTemplateConfig {
/** The user data to deploy the instance with. */
user_data: string;
}
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchBuildRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Clones multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchCloneRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Deploys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeploy {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeployStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Deploys multiple Stacks if changed in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDeployStackIfChanged {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
/** Destroys multiple Deployments in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDestroyDeployment {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* deployments
* foo-*
* # add some more
* extra-deployment-1, extra-deployment-2
* ```
*/
pattern: string;
}
/** Destroys multiple Stacks in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchDestroyStack {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
* d
* Example:
* ```
* # match all foo-* stacks
* foo-*
* # add some more
* extra-stack-1, extra-stack-2
* ```
*/
pattern: string;
}
export interface BatchExecutionResponseItemErr {
name: string;
error: _Serror;
}
/** Pulls multiple Repos in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchPullRepo {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* repos
* foo-*
* # add some more
* extra-repo-1, extra-repo-2
* ```
*/
pattern: string;
}
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResult] */
export interface BatchRunAction {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* actions
* foo-*
* # add some more
* extra-action-1, extra-action-2
* ```
*/
pattern: string;
}
/** Runs multiple builds in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchRunBuild {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* builds
* foo-*
* # add some more
* extra-build-1, extra-build-2
* ```
*/
pattern: string;
}
/** Runs multiple Procedures in parallel that match pattern. Response: [BatchExecutionResult]. */
export interface BatchRunProcedure {
/**
* Id or name or wildcard pattern or regex.
* Supports multiline and comma delineated combinations of the above.
*
* Example:
* ```
* # match all foo-* procedures
* foo-*
* # add some more
* extra-procedure-1, extra-procedure-2
* ```
*/
pattern: string;
}
/**
* Builds the target repo, using the attached builder. Response: [Update].
*
@@ -3431,16 +3660,6 @@ export interface CommitSync {
/** Id or name */
sync: string;
}
export interface ComposeService {
image?: string;
container_name?: string;
}
/** Keeping this minimal for now as its only needed to parse the service names / container names */
export interface ComposeFile {
/** If not provided, will default to the parent folder holding the compose file. */
name?: string;
services?: Record<string, ComposeService>;
}
export interface Conversion {
/** reference on the server. */
local: string;
@@ -3617,6 +3836,9 @@ export interface CreateBuildWebhook {
}
/** Partial representation of [BuilderConfig] */
export type PartialBuilderConfig = {
type: "Url";
params: _PartialUrlBuilderConfig;
} | {
type: "Server";
params: _PartialServerBuilderConfig;
} | {
@@ -5528,6 +5750,22 @@ export interface PauseStack {
/** Optionally specify a specific service to pause */
service?: string;
}
export interface PermissionToml {
/**
* Id can be:
* - resource name. `id = "abcd-build"`
* - regex matching resource names. `id = "\^(.+)-build-([0-9]+)$\"`
*/
target: ResourceTarget;
/**
* The permission level:
* - None
* - Read
* - Execute
* - Write
*/
level: PermissionLevel;
}
export declare enum PortTypeEnum {
EMPTY = "",
TCP = "tcp",
@@ -5782,6 +6020,57 @@ export interface RenameUserGroup {
/** The new name for the UserGroup */
name: string;
}
export interface ResourceToml<PartialConfig> {
/** The resource name. Required */
name: string;
/** The resource description. Optional. */
description?: string;
/** Tag ids or names. Optional */
tags?: string[];
/**
* Optional. Only relevant for deployments / stacks.
*
* Will ensure deployment / stack is running with the latest configuration.
* Deploy actions to achieve this will be included in the sync.
* Default is false.
*/
deploy?: boolean;
/**
* Optional. Only relevant for deployments / stacks using the 'deploy' sync feature.
*
* Specify other deployments / stacks by name as dependencies.
* The sync will ensure the deployment / stack will only be deployed 'after' its dependencies.
*/
after?: string[];
/** Resource specific configuration. */
config?: PartialConfig;
}
export interface UserGroupToml {
/** User group name */
name: string;
/** Users in the group */
users?: string[];
/** Give the user group elevated permissions on all resources of a certain type */
all?: Record<ResourceTarget["type"], PermissionLevel>;
/** Permissions given to the group */
permissions?: PermissionToml[];
}
/** Specifies resources to sync on Komodo */
export interface ResourcesToml {
servers?: ResourceToml<_PartialServerConfig>[];
deployments?: ResourceToml<_PartialDeploymentConfig>[];
stacks?: ResourceToml<_PartialStackConfig>[];
builds?: ResourceToml<_PartialBuildConfig>[];
repos?: ResourceToml<_PartialRepoConfig>[];
procedures?: ResourceToml<_PartialProcedureConfig>[];
actions?: ResourceToml<_PartialActionConfig>[];
alerters?: ResourceToml<_PartialAlerterConfig>[];
builders?: ResourceToml<_PartialBuilderConfig>[];
server_templates?: ResourceToml<PartialServerTemplateConfig>[];
resource_syncs?: ResourceToml<_PartialResourceSyncConfig>[];
user_groups?: UserGroupToml[];
variables?: Variable[];
}
/** Restarts all containers on the target server. Response: [Update] */
export interface RestartAllContainers {
/** Name or id */
@@ -5932,7 +6221,7 @@ export interface SearchStackServiceLog {
/** Configuration for a Komodo Server Builder. */
export interface ServerBuilderConfig {
/** The server id of the builder */
server_id: string;
server_id?: string;
}
/** The health of a part of the server. */
export interface ServerHealthState {
@@ -6411,6 +6700,13 @@ export interface UpdateVariableValue {
/** The value to set. */
value: string;
}
/** Configuration for a Komodo Url Builder. */
export interface UrlBuilderConfig {
/** The address of the Periphery agent */
address: string;
/** A custom passkey to use. Otherwise, use the default passkey. */
passkey?: string;
}
/** Update file contents in Files on Server or Git Repo mode. Response: [Update]. */
export interface WriteStackFileContents {
/** The name or id of the target Stack. */
@@ -6519,6 +6815,9 @@ export type ExecuteRequest = {
} | {
type: "Deploy";
params: Deploy;
} | {
type: "BatchDeploy";
params: BatchDeploy;
} | {
type: "StartDeployment";
params: StartDeployment;
@@ -6540,9 +6839,15 @@ export type ExecuteRequest = {
} | {
type: "DeployStack";
params: DeployStack;
} | {
type: "BatchDeployStack";
params: BatchDeployStack;
} | {
type: "DeployStackIfChanged";
params: DeployStackIfChanged;
} | {
type: "BatchDeployStackIfChanged";
params: BatchDeployStackIfChanged;
} | {
type: "StartStack";
params: StartStack;
@@ -6561,30 +6866,51 @@ export type ExecuteRequest = {
} | {
type: "DestroyStack";
params: DestroyStack;
} | {
type: "BatchDestroyStack";
params: BatchDestroyStack;
} | {
type: "RunBuild";
params: RunBuild;
} | {
type: "BatchRunBuild";
params: BatchRunBuild;
} | {
type: "CancelBuild";
params: CancelBuild;
} | {
type: "CloneRepo";
params: CloneRepo;
} | {
type: "BatchCloneRepo";
params: BatchCloneRepo;
} | {
type: "PullRepo";
params: PullRepo;
} | {
type: "BatchPullRepo";
params: BatchPullRepo;
} | {
type: "BuildRepo";
params: BuildRepo;
} | {
type: "BatchBuildRepo";
params: BatchBuildRepo;
} | {
type: "CancelRepoBuild";
params: CancelRepoBuild;
} | {
type: "RunProcedure";
params: RunProcedure;
} | {
type: "BatchRunProcedure";
params: BatchRunProcedure;
} | {
type: "RunAction";
params: RunAction;
} | {
type: "BatchRunAction";
params: BatchRunAction;
} | {
type: "LaunchServer";
params: LaunchServer;

6653
frontend/public/deno.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,923 @@
import { KomodoClient, Types as KomodoTypes } from "./client/lib.js";
import "./deno.d.ts";
declare global {
// =================
// 🔴 Docker Compose
// =================
/**
* Docker Compose configuration interface
*/
export interface DockerCompose {
/** Version of the Compose file format */
version?: string;
/** Defines services within the Docker Compose file */
services: Record<string, DockerComposeService>;
/** Defines volumes in the Docker Compose file */
volumes?: Record<string, DockerComposeVolume>;
/** Defines networks in the Docker Compose file */
networks?: Record<string, DockerComposeNetwork>;
}
/**
* Describes a service within Docker Compose
*/
export interface DockerComposeService {
/** Docker image to use */
image?: string;
/** Build configuration for the service */
build?: DockerComposeServiceBuild;
/** Ports to map, supporting single strings or mappings */
ports?: (string | DockerComposeServicePortMapping)[];
/** Environment variables to set within the container */
environment?: Record<string, string>;
/** Volumes to mount */
volumes?: (string | DockerComposeServiceVolumeMount)[];
/** Networks to attach the service to */
networks?: string[];
/** Dependencies of the service */
depends_on?: string[];
/** Command to override the default CMD */
command?: string | string[];
/** Entrypoint to override the default ENTRYPOINT */
entrypoint?: string | string[];
/** Container name */
container_name?: string;
/** Healthcheck configuration for the service */
healthcheck?: DockerComposeServiceHealthcheck;
/** Logging options for the service */
logging?: DockerComposeServiceLogging;
/** Deployment settings for the service */
deploy?: DockerComposeServiceDeploy;
/** Restart policy */
restart?: string;
/** Security options */
security_opt?: string[];
/** Ulimits configuration */
ulimits?: Record<string, DockerComposeServiceUlimit>;
/** Secrets to be used by the service */
secrets?: string[];
/** Configuration items */
configs?: string[];
/** Labels to apply to the service */
labels?: Record<string, string>;
/** Number of CPU units assigned */
cpus?: string | number;
/** Memory limit */
mem_limit?: string;
/** CPU shares for container allocation */
cpu_shares?: number;
/** Extra hosts for the service */
extra_hosts?: string[];
[key: string]: unknown;
}
/**
* Configuration for Docker build
*/
export interface DockerComposeServiceBuild {
/** Build context path */
context: string;
/** Dockerfile path within the context */
dockerfile?: string;
/** Build arguments to pass */
args?: Record<string, string>;
/** Sources for cache imports */
cache_from?: string[];
/** Labels for the build */
labels?: Record<string, string>;
/** Network mode for build process */
network?: string;
/** Target build stage */
target?: string;
/** Shared memory size */
shm_size?: string;
/** Secrets for the build process */
secrets?: string[];
/** Extra hosts for build process */
extra_hosts?: string[];
}
/**
* Port mapping configuration
*/
export interface DockerComposeServicePortMapping {
/** Target port inside the container */
target: number;
/** Published port on the host */
published?: number;
/** Protocol used for the port (tcp/udp) */
protocol?: "tcp" | "udp";
/** Mode for port publishing */
mode?: "host" | "ingress";
}
/**
* Volume mount configuration
*/
export interface DockerComposeServiceVolumeMount {
/** Type of volume mount */
type: "volume" | "bind" | "tmpfs";
/** Source path or name */
source: string;
/** Target path within the container */
target: string;
/** Whether the volume is read-only */
read_only?: boolean;
}
/**
* Healthcheck configuration for a service
*/
export interface DockerComposeServiceHealthcheck {
/** Command to check health */
test: string | string[];
/** Interval between checks */
interval?: string;
/** Timeout for each check */
timeout?: string;
/** Maximum number of retries */
retries?: number;
/** Initial delay before checks start */
start_period?: string;
}
/**
* Logging configuration for a service
*/
export interface DockerComposeServiceLogging {
/** Logging driver */
driver: string;
/** Options for the logging driver */
options?: Record<string, string>;
}
/**
* Deployment configuration for a service
*/
export interface DockerComposeServiceDeploy {
/** Number of replicas */
replicas?: number;
/** Update configuration */
update_config?: DockerComposeServiceDeploy;
/** Restart policy */
restart_policy?: DockerComposeServiceDeployRestartPolicy;
}
/**
* Update configuration during deployment
*/
export interface DockerComposeServiceDeployUpdateConfig {
/** Number of containers updated in parallel */
parallelism?: number;
/** Delay between updates */
delay?: string;
/** Action on failure */
failure_action?: string;
/** Order of updates */
order?: string;
}
/**
* Restart policy configuration
*/
export interface DockerComposeServiceDeployRestartPolicy {
/** Condition for restart */
condition: "none" | "on-failure" | "any";
/** Delay before restarting */
delay?: string;
/** Maximum number of restart attempts */
max_attempts?: number;
/** Time window for restart attempts */
window?: string;
}
/**
* Ulimit configuration
*/
export interface DockerComposeServiceUlimit {
/** Soft limit */
soft: number;
/** Hard limit */
hard: number;
}
/**
* Volume configuration in Docker Compose
*/
export interface DockerComposeVolume {
/** Volume driver to use */
driver?: string;
/** Driver options */
driver_opts?: Record<string, string>;
/** External volume identifier */
external?: boolean | string;
}
/**
* Network configuration in Docker Compose
*/
export interface DockerComposeNetwork {
/** Network driver */
driver?: string;
/** Indicates if network is external */
external?: boolean;
}
// =====================
// 🔴 YAML De/serializer
// =====================
// https://jsr.io/@std/yaml
export type YamlSchemaType =
| "failsafe"
| "json"
| "core"
| "default"
| "extended";
export type YamlStyleVariant =
| "lowercase"
| "uppercase"
| "camelcase"
| "decimal"
| "binary"
| "octal"
| "hexadecimal";
/** Options for `YAML.stringify` */
export type YamlStringifyOptions = {
/**
* Indentation width to use (in spaces).
*
* @default {2}
*/
indent?: number;
/**
* When true, adds an indentation level to array elements.
*
* @default {true}
*/
arrayIndent?: boolean;
/**
* Do not throw on invalid types (like function in the safe schema) and skip
* pairs and single values with such types.
*
* @default {false}
*/
skipInvalid?: boolean;
/**
* Specifies level of nesting, when to switch from block to flow style for
* collections. `-1` means block style everywhere.
*
* @default {-1}
*/
flowLevel?: number;
/** Each tag may have own set of styles. - "tag" => "style" map. */
styles?: Record<string, YamlStyleVariant>;
/**
* Name of the schema to use.
*
* @default {"default"}
*/
schema?: YamlSchemaType;
/**
* If true, sort keys when dumping YAML in ascending, ASCII character order.
* If a function, use the function to sort the keys.
* If a function is specified, the function must return a negative value
* if first argument is less than second argument, zero if they're equal
* and a positive value otherwise.
*
* @default {false}
*/
sortKeys?: boolean | ((a: string, b: string) => number);
/**
* Set max line width.
*
* @default {80}
*/
lineWidth?: number;
/**
* If false, don't convert duplicate objects into references.
*
* @default {true}
*/
useAnchors?: boolean;
/**
* If false don't try to be compatible with older yaml versions.
* Currently: don't quote "yes", "no" and so on,
* as required for YAML 1.1.
*
* @default {true}
*/
compatMode?: boolean;
/**
* If true flow sequences will be condensed, omitting the
* space between `key: value` or `a, b`. Eg. `'[a,b]'` or `{a:{b:c}}`.
* Can be useful when using yaml for pretty URL query params
* as spaces are %-encoded.
*
* @default {false}
*/
condenseFlow?: boolean;
};
/** Options for `YAML.parse` */
export interface YamlParseOptions {
/**
* Name of the schema to use.
*
* @default {"default"}
*/
schema?: YamlSchemaType;
/**
* If `true`, duplicate keys will overwrite previous values. Otherwise,
* duplicate keys will throw a {@linkcode SyntaxError}.
*
* @default {false}
*/
allowDuplicateKeys?: boolean;
/**
* If defined, a function to call on warning messages taking an
* {@linkcode Error} as its only argument.
*/
onWarning?(error: Error): void;
}
// ===============
// 🔴 Cargo TOML 🦀
// ===============
/**
* Represents the structure of a Cargo.toml manifest file.
*/
export interface CargoToml {
/**
* Information about the main package in the Cargo project.
*/
package?: CargoTomlPackage;
/**
* Dependencies required by the project, organized into normal, development, and build dependencies.
*/
dependencies?: CargoTomlDependencies;
/**
* Development dependencies required by the project.
*/
devDependencies?: CargoTomlDependencies;
/**
* Build dependencies required by the project.
*/
buildDependencies?: CargoTomlDependencies;
/**
* Features available in the package, each as an array of dependency names or other features.
*/
features?: Record<string, string[]>;
/**
* Build profiles available in the package, allowing for profile-specific configurations.
*/
profile?: CargoTomlProfiles;
/**
* Path to the custom build script for the package, if applicable.
*/
build?: string;
/**
* Workspace configuration for multi-package Cargo projects.
*/
workspace?: CargoTomlWorkspace;
/**
* Additional metadata ignored by Cargo but potentially used by other tools.
*/
[key: string]: any;
}
/**
* Metadata for the main package in the Cargo project.
*/
export interface CargoTomlPackage {
/**
* The name of the package, used by Cargo and for crate publishing.
*/
name: string;
/**
* The version of the package, following Semantic Versioning.
*/
version: string;
/**
* List of author names or emails.
*/
authors?: string[];
/**
* The Rust edition for this package.
*/
edition?: "2015" | "2018" | "2021";
/**
* Short description of the package.
*/
description?: string;
/**
* The license for the package, specified as a SPDX identifier.
*/
license?: string;
/**
* Path to a custom license file for the package.
*/
licenseFile?: string;
/**
* URL to the package documentation.
*/
documentation?: string;
/**
* URL to the package homepage.
*/
homepage?: string;
/**
* URL to the package repository.
*/
repository?: string;
/**
* Path to the README file for the package.
*/
readme?: string;
/**
* List of keywords for the package, used for search optimization.
*/
keywords?: string[];
/**
* List of categories that the package belongs to.
*/
categories?: string[];
/**
* Workspace that this package belongs to, if any.
*/
workspace?: string;
/**
* Path to a build script for the package.
*/
build?: string;
/**
* Name of a native library to link with, if applicable.
*/
links?: string;
/**
* List of paths to exclude from the package.
*/
exclude?: string[];
/**
* List of paths to include in the package.
*/
include?: string[];
/**
* Indicates whether the package should be published to crates.io.
*/
publish?: boolean;
/**
* Arbitrary metadata that is ignored by Cargo but can be used by other tools.
*/
metadata?: Record<string, any>;
/**
* Auto-enable binaries for the package.
*/
autobins?: boolean;
/**
* Auto-enable examples for the package.
*/
autoexamples?: boolean;
/**
* Auto-enable tests for the package.
*/
autotests?: boolean;
/**
* Auto-enable benchmarks for the package.
*/
autobenches?: boolean;
/**
* Specifies the version of dependency resolution to use.
*/
resolver?: "1" | "2";
}
/**
* A map of dependencies in the Cargo manifest, with each dependency represented by its name.
*/
export type CargoTomlDependencies = Record<string, CargoTomlDependency>;
/**
* Information about a specific dependency in the Cargo manifest.
*/
export type CargoTomlDependency =
| string
| {
/**
* Version requirement for the dependency.
*/
version?: string;
/**
* Path to a local dependency.
*/
path?: string;
/**
* Name of the registry to use for this dependency.
*/
registry?: string;
/**
* URL to a Git repository for this dependency.
*/
git?: string;
/**
* Branch to use for a Git dependency.
*/
branch?: string;
/**
* Tag to use for a Git dependency.
*/
tag?: string;
/**
* Specific revision to use for a Git dependency.
*/
rev?: string;
/**
* Marks this dependency as optional.
*/
optional?: boolean;
/**
* Enables default features for this dependency.
*/
defaultFeatures?: boolean;
/**
* List of features to enable for this dependency.
*/
features?: string[];
/**
* Renames the dependency package name.
*/
package?: string;
};
/**
* Defines available profiles for building the package.
*/
export interface CargoTomlProfiles {
/**
* Development profile configuration.
*/
dev?: CargoTomlProfile;
/**
* Release profile configuration.
*/
release?: CargoTomlProfile;
/**
* Test profile configuration.
*/
test?: CargoTomlProfile;
/**
* Benchmark profile configuration.
*/
bench?: CargoTomlProfile;
/**
* Documentation profile configuration.
*/
doc?: CargoTomlProfile;
/**
* Additional custom profiles.
*/
[profileName: string]: CargoTomlProfile | undefined;
}
/**
* Configuration for an individual build profile.
*/
export interface CargoTomlProfile {
/**
* Profile that this profile inherits from.
*/
inherits?: string;
/**
* Optimization level for the profile.
*/
optLevel?: "0" | "1" | "2" | "3" | "s" | "z";
/**
* Enables debug information, either as a boolean or a level.
*/
debug?: boolean | number;
/**
* Controls how debug information is split.
*/
splitDebugInfo?: "unpacked" | "packed" | "off";
/**
* Enables or disables debug assertions.
*/
debugAssertions?: boolean;
/**
* Enables or disables overflow checks.
*/
overflowChecks?: boolean;
/**
* Enables or disables unit testing for the profile.
*/
test?: boolean;
/**
* Link-time optimization settings for the profile.
*/
lto?: boolean | "thin" | "fat";
/**
* Panic strategy for the profile.
*/
panic?: "unwind" | "abort";
/**
* Enables or disables incremental compilation.
*/
incremental?: boolean;
/**
* Number of code generation units for parallelism.
*/
codegenUnits?: number;
/**
* Enables or disables the use of runtime paths.
*/
rpath?: boolean;
/**
* Specifies stripping options for the binary.
*/
strip?: boolean | "debuginfo" | "symbols";
/**
* Additional custom profile fields.
*/
[key: string]: any;
}
/**
* Defines workspace-specific settings for a Cargo project.
*/
export interface CargoTomlWorkspace {
/**
* Members of the workspace.
*/
members?: string[];
/**
* Paths to exclude from the workspace.
*/
exclude?: string[];
/**
* Members to include by default when building the workspace.
*/
defaultMembers?: string[];
/**
* Common Information about the packages in the Cargo workspace.
*/
package?: CargoTomlPackage;
/**
* Additional custom workspace fields.
*/
[key: string]: any;
}
// =====================
// 🔴 TOML De/serializer
// =====================
// https://jsr.io/@std/toml
export interface TomlStringifyOptions {
/**
* Define if the keys should be aligned or not.
*
* @default {false}
*/
keyAlignment?: boolean;
}
/** Pre initialized Komodo client */
var komodo: ReturnType<typeof KomodoClient>;
/** YAML parsing utilities */
var YAML: {
/**
* Converts a JavaScript object or value to a YAML document string.
*
* @example Usage
* ```ts
* const data = { id: 1, name: "Alice" };
*
* const yaml = YAML.stringify(data);
*
* assertEquals(yaml, "id: 1\nname: Alice\n");
* ```
*
* @throws {TypeError} If `data` contains invalid types.
* @param data The data to serialize.
* @param options The options for serialization.
* @returns A YAML string.
*/
stringify: (data: unknown, options?: YamlStringifyOptions) => string;
/**
* Parse and return a YAML string as a parsed YAML document object.
*
* Note: This does not support functions. Untrusted data is safe to parse.
*
* @example Usage
* ```ts
* const data = YAML.parse(`
* id: 1
* name: Alice
* `);
*
* assertEquals(data, { id: 1, name: "Alice" });
* ```
*
* @throws {SyntaxError} Throws error on invalid YAML.
* @param content YAML string to parse.
* @param options Parsing options.
* @returns Parsed document.
*/
parse: (content: string, options?: YamlParseOptions) => unknown;
/**
* Same as `YAML.parse`, but understands multi-document YAML sources, and
* returns multiple parsed YAML document objects.
*
* @example Usage
* ```ts
* const data = YAML.parseAll(`
* ---
* id: 1
* name: Alice
* ---
* id: 2
* name: Bob
* ---
* id: 3
* name: Eve
* `);
*
* assertEquals(data, [ { id: 1, name: "Alice" }, { id: 2, name: "Bob" }, { id: 3, name: "Eve" }]);
* ```
*
* @param content YAML string to parse.
* @param options Parsing options.
* @returns Array of parsed documents.
*/
parseAll: (content: string, options?: YamlParseOptions) => unknown;
/**
* Parse and return a YAML string as a Docker Compose file.
*
* @example Usage
* ```ts
* const stack = await komodo.read("GetStack", { stack: "test-stack" });
* const contents = stack?.config?.file_contents;
*
* const parsed: DockerCompose = YAML.parseDockerCompose(contents)
* ```
*
* @throws {SyntaxError} Throws error on invalid YAML.
* @param content Docker compose file string.
* @param options Parsing options.
* @returns Parsed document.
*/
parseDockerCompose: (
content: string,
options?: YamlParseOptions
) => DockerCompose;
};
/** TOML parsing utilities */
var TOML: {
/**
* Converts an object to a [TOML string](https://toml.io).
*
* @example Usage
* ```ts
* const obj = {
* title: "TOML Example",
* owner: {
* name: "Bob",
* bio: "Bob is a cool guy",
* }
* };
*
* const tomlString = TOML.stringify(obj);
*
* assertEquals(tomlString, `title = "TOML Example"\n\n[owner]\nname = "Bob"\nbio = "Bob is a cool guy"\n`);
* ```
* @param obj Source object
* @param options Options for stringifying.
* @returns TOML string
*/
stringify: (
obj: Record<string, unknown>,
options?: TomlStringifyOptions
) => string;
/**
* Parses a [TOML string](https://toml.io) into an object.
*
* @example Usage
* ```ts
* const tomlString = `title = "TOML Example"
* [owner]
* name = "Alice"
* bio = "Alice is a programmer."`;
*
* const obj = TOML.parse(tomlString);
*
* assertEquals(obj, { title: "TOML Example", owner: { name: "Alice", bio: "Alice is a programmer." } });
* ```
* @param tomlString TOML string to be parsed.
* @returns The parsed JS object.
*/
parse: (tomlString: string) => Record<string, unknown>;
/**
* Parses Komodo resource.toml contents to an object
* for easier handling.
*
* @example Usage
* ```ts
* const sync = await komodo.read("GetResourceSync", { sync: "test-sync" })
* const contents = sync?.config?.file_contents;
*
* const resources: Types.ResourcesToml = TOML.parseResourceToml(contents);
* ```
*
* @param resourceToml The resource file contents.
* @returns Komodo resource.toml contents as JSON
*/
parseResourceToml: (resourceToml: string) => Types.ResourcesToml;
/**
* Parses Cargo.toml contents to an object
* for easier handling.
*
* @example Usage
* ```ts
* const contents = Deno.readTextFile("/path/to/Cargo.toml");
* const cargoToml: CargoToml = TOML.parseCargoToml(contents);
* ```
*
* @param cargoToml The Cargo.toml contents.
* @returns Cargo.toml contents as JSON
*/
parseCargoToml: (cargoToml: string) => CargoToml;
};
/** All Komodo Types */
export import Types = KomodoTypes;
}
export {}

View File

@@ -199,7 +199,7 @@ export const Config = <T,>({
key={section}
className="relative pb-12 border-b last:pb-0 last:border-b-0 "
>
<div className="xl:hidden sticky top-16 h-24 flex items-center justify-between bg-background z-10">
<div className="xl:hidden sticky top-16 h-16 flex items-center justify-between bg-background z-10">
{section && <p className="uppercase text-2xl">{section}</p>}
<Select
onValueChange={(value) => (window.location.hash = value)}

View File

@@ -340,6 +340,8 @@ export const ProviderSelector = ({
if (value === "Custom") {
onSelect("");
setCustomMode(true);
} else if (value === "None") {
onSelect("");
} else {
onSelect(value);
}
@@ -365,7 +367,8 @@ export const ProviderSelector = ({
!providers.includes(selected) && (
<SelectItem value={selected}>{selected}</SelectItem>
)}
{showCustom && <SelectItem value={"Custom"}>Custom</SelectItem>}
{showCustom && <SelectItem value="Custom">Custom</SelectItem>}
{!showCustom && <SelectItem value="None">None</SelectItem>}
</SelectContent>
</Select>
);
@@ -908,44 +911,45 @@ export const ImageRegistryConfig = ({
/>
</div>
</ConfigItem>
{organizations.length > 0 && (
<ConfigItem
label="Organization"
description="Push the build under an organization namespace, rather than the account namespace."
>
<OrganizationSelector
organizations={organizations}
selected={registry?.organization!}
set={(organization) =>
setRegistry({
...registry,
organization,
})
}
disabled={disabled}
/>
</ConfigItem>
)}
{registry && (
<ConfigItem
label="Account"
description="Select the account used to authenticate against the registry."
>
<AccountSelector
id={resource_id}
type="Builder"
account_type="docker"
provider={registry.domain!}
selected={registry.account}
onSelect={(account) =>
setRegistry({
...registry,
account,
})
}
disabled={disabled}
/>
</ConfigItem>
{registry?.domain && (
<>
<ConfigItem
label="Account"
description="Select the account used to authenticate against the registry."
>
<AccountSelector
id={resource_id}
type="Builder"
account_type="docker"
provider={registry.domain!}
selected={registry.account}
onSelect={(account) =>
setRegistry({
...registry,
account,
})
}
disabled={disabled}
/>
</ConfigItem>
<ConfigItem
label="Organization"
description="Push the build under an organization / project namespace, rather than the account namespace."
>
<OrganizationSelector
organizations={organizations}
selected={registry?.organization!}
set={(organization) =>
setRegistry({
...registry,
organization,
})
}
disabled={disabled}
/>
</ConfigItem>
</>
)}
</>
);
@@ -963,7 +967,7 @@ const OrganizationSelector = ({
disabled: boolean;
}) => {
const [customMode, setCustomMode] = useState(false);
if (customMode || organizations.length === 0) {
if (customMode) {
return (
<Input
placeholder="Input custom organization name"

View File

@@ -36,7 +36,7 @@ export const Layout = () => {
<div className="h-screen overflow-y-scroll">
<div className="container">
<Sidebar />
<div className="lg:ml-64 lg:pl-8 py-24">
<div className="lg:ml-[200px] lg:pl-8 py-[88px]">
<Outlet />
</div>
</div>
@@ -184,7 +184,7 @@ export const Section = ({
{(title || icon || titleRight || titleOther || actions) && (
<div
className={cn(
"flex flex-wrap gap-2 justify-between py-1",
"flex flex-wrap gap-2 justify-between",
itemsCenterTitleRow ? "items-center" : "items-start"
)}
>

View File

@@ -16,6 +16,7 @@ export type MonacoLanguage =
| "toml"
| "json"
| "key_value"
| "string_list"
| "shell"
| "dockerfile"
| "rust"
@@ -28,11 +29,15 @@ export const MonacoEditor = ({
onValueChange,
language,
readOnly,
minHeight,
className,
}: {
value: string | undefined;
onValueChange?: (value: string) => void;
language: MonacoLanguage;
readOnly?: boolean;
minHeight?: number;
className?: string;
}) => {
const [editor, setEditor] =
useState<monaco.editor.IStandaloneCodeEditor | null>(null);
@@ -97,12 +102,8 @@ export const MonacoEditor = ({
containerNode.style.height = `${Math.max(
Math.ceil(contentHeight),
MIN_EDITOR_HEIGHT
minHeight ?? MIN_EDITOR_HEIGHT
)}px`;
// containerNode.style.height = `${Math.max(
// Math.min(Math.ceil(contentHeight), MAX_EDITOR_HEIGHT),
// MIN_EDITOR_HEIGHT
// )}px`;
}, [editor, line_count]);
const { theme: _theme } = useTheme();
@@ -131,7 +132,7 @@ export const MonacoEditor = ({
};
return (
<div className="mx-2 my-1 w-full">
<div className={cn("mx-2 my-1 w-full", className)}>
<Editor
language={language}
value={value}

View File

@@ -1,4 +1,4 @@
import { useRead, useUser } from "@lib/hooks";
import { useAllResources, useUser } from "@lib/hooks";
import { Button } from "@ui/button";
import {
CommandDialog,
@@ -12,18 +12,9 @@ import {
import { Home, Search, User } from "lucide-react";
import { Fragment, ReactNode, useMemo, useState } from "react";
import { useNavigate } from "react-router-dom";
import { cn } from "@lib/utils";
import { DeploymentComponents } from "./resources/deployment";
import { BuildComponents } from "./resources/build";
import { ServerComponents } from "./resources/server";
import { ProcedureComponents } from "./resources/procedure";
import { RepoComponents } from "./resources/repo";
import { BuilderComponents } from "./resources/builder";
import { AlerterComponents } from "./resources/alerter";
import { ServerTemplateComponents } from "./resources/server-template";
import { cn, RESOURCE_TARGETS, usableResourcePath } from "@lib/utils";
import { Badge } from "@ui/badge";
import { ResourceSyncComponents } from "./resources/resource-sync";
import { StackComponents } from "./resources/stack";
import { ResourceComponents } from "./resources";
export const OmniSearch = ({
className,
@@ -116,16 +107,7 @@ const useOmniItems = (
search: string
): Record<string, OmniItem[]> => {
const user = useUser().data;
const servers = useRead("ListServers", {}).data;
const deployments = useRead("ListDeployments", {}).data;
const stacks = useRead("ListStacks", {}).data;
const builds = useRead("ListBuilds", {}).data;
const repos = useRead("ListRepos", {}).data;
const procedures = useRead("ListProcedures", {}).data;
const builders = useRead("ListBuilders", {}).data;
const alerters = useRead("ListAlerters", {}).data;
const templates = useRead("ListServerTemplates", {}).data;
const syncs = useRead("ListResourceSyncs", {}).data;
const resources = useAllResources();
const searchTerms = search
.toLowerCase()
.split(" ")
@@ -139,66 +121,21 @@ const useOmniItems = (
icon: <Home className="w-4 h-4" />,
onSelect: () => nav("/"),
},
{
key: "Servers",
label: "Servers",
icon: <ServerComponents.Icon />,
onSelect: () => nav("/servers"),
},
{
key: "Deployments",
label: "Deployments",
icon: <DeploymentComponents.Icon />,
onSelect: () => nav("/deployments"),
},
{
key: "Stacks",
label: "Stacks",
icon: <StackComponents.Icon />,
onSelect: () => nav("/stacks"),
},
{
key: "Builds",
label: "Builds",
icon: <BuildComponents.Icon />,
onSelect: () => nav("/builds"),
},
{
key: "Repos",
label: "Repos",
icon: <RepoComponents.Icon />,
onSelect: () => nav("/repos"),
},
{
key: "Procedures",
label: "Procedures",
icon: <ProcedureComponents.Icon />,
onSelect: () => nav("/procedures"),
},
{
key: "Builders",
label: "Builders",
icon: <BuilderComponents.Icon />,
onSelect: () => nav("/builders"),
},
{
key: "Alerters",
label: "Alerters",
icon: <AlerterComponents.Icon />,
onSelect: () => nav("/alerters"),
},
{
key: "Templates",
label: "Templates",
icon: <ServerTemplateComponents.Icon />,
onSelect: () => nav("/server-templates"),
},
{
key: "Syncs",
label: "Syncs",
icon: <ResourceSyncComponents.Icon />,
onSelect: () => nav("/resource-syncs"),
},
...RESOURCE_TARGETS.map((_type) => {
const type =
_type === "ResourceSync"
? "Sync"
: _type === "ServerTemplate"
? "Template"
: _type;
const Components = ResourceComponents[_type];
return {
key: type + "s",
label: type + "s",
icon: <Components.Icon />,
onSelect: () => nav(usableResourcePath(_type)),
};
}),
(user?.admin && {
key: "Users",
label: "Users",
@@ -214,200 +151,39 @@ const useOmniItems = (
searchTerms.every((term) => label.includes(term))
);
}),
Servers:
servers
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"server".includes(term)
...Object.fromEntries(
RESOURCE_TARGETS.map((_type) => {
const type =
_type === "ResourceSync"
? "Sync"
: _type === "ServerTemplate"
? "Template"
: _type;
const lower = type.toLowerCase();
const Components = ResourceComponents[_type];
return [
type + "s",
resources[_type]
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
lower.includes(term)
)
)
)
.map((server) => ({
key: "server-" + server.name,
label: server.name,
icon: <ServerComponents.Icon id={server.id} />,
onSelect: () => nav(`/servers/${server.id}`),
})) || [],
Deployments:
deployments
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"deployment".includes(term)
)
)
.map((deployment) => ({
key: "deployment-" + deployment.name,
label: deployment.name,
icon: <DeploymentComponents.Icon id={deployment.id} />,
onSelect: () => nav(`/deployments/${deployment.id}`),
})) || [],
Stacks:
stacks
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"stack".includes(term)
)
)
.map((stack) => ({
key: "stack-" + stack.name,
label: stack.name,
icon: <StackComponents.Icon id={stack.id} />,
onSelect: () => nav(`/stacks/${stack.id}`),
})) || [],
Build:
builds
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"build".includes(term)
)
)
.map((build) => ({
key: "build-" + build.name,
label: build.name,
icon: <BuildComponents.Icon id={build.id} />,
onSelect: () => nav(`/builds/${build.id}`),
})) || [],
Repos:
repos
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"repo".includes(term)
)
)
.map((repo) => ({
key: "repo-" + repo.name,
label: repo.name,
icon: <RepoComponents.Icon id={repo.id} />,
onSelect: () => nav(`/repos/${repo.id}`),
})) || [],
Procedures:
procedures
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"procedure".includes(term)
)
)
.map((procedure) => ({
key: "procedure-" + procedure.name,
label: procedure.name,
icon: <ProcedureComponents.Icon id={procedure.id} />,
onSelect: () => nav(`/procedures/${procedure.id}`),
})) || [],
Builders:
builders
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"builder".includes(term)
)
)
.map((builder) => ({
key: "builder-" + builder.name,
label: builder.name,
icon: <BuilderComponents.Icon id={builder.id} />,
onSelect: () => nav(`/builders/${builder.id}`),
})) || [],
Alerters:
alerters
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"alerter".includes(term)
)
)
.map((alerter) => ({
key: "alerter-" + alerter.name,
label: alerter.name,
icon: <AlerterComponents.Icon id={alerter.id} />,
onSelect: () => nav(`/alerters/${alerter.id}`),
})) || [],
Templates:
templates
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"template".includes(term)
)
)
.map((template) => ({
key: "template-" + template.name,
label: template.name,
icon: <ServerTemplateComponents.Icon id={template.id} />,
onSelect: () => nav(`/server-templates/${template.id}`),
})) || [],
Syncs:
syncs
?.filter(
(item) =>
searchTerms.length === 0 ||
searchTerms.every(
(term) =>
item.name.toLowerCase().includes(term) ||
"sync".includes(term)
)
)
.map((sync) => ({
key: "sync-" + sync.name,
label: sync.name,
icon: <ResourceSyncComponents.Icon id={sync.id} />,
onSelect: () => nav(`/resource-syncs/${sync.id}`),
})) || [],
.map((server) => ({
key: type + "-" + server.name,
label: server.name,
icon: <Components.Icon id={server.id} />,
onSelect: () =>
nav(`/${usableResourcePath(_type)}/${server.id}`),
})) || [],
];
})
),
}),
[
user,
servers,
deployments,
stacks,
builds,
repos,
procedures,
alerters,
builders,
templates,
syncs,
search,
]
[user, resources, search]
);
};

View File

@@ -22,6 +22,7 @@ export const BuilderConfig = ({ id }: { id: string }) => {
const config = useRead("GetBuilder", { builder: id }).data?.config;
if (config?.type === "Aws") return <AwsBuilderConfig id={id} />;
if (config?.type === "Server") return <ServerBuilderConfig id={id} />;
if (config?.type === "Url") return <UrlBuilderConfig id={id} />;
};
const AwsBuilderConfig = ({ id }: { id: string }) => {
@@ -301,6 +302,51 @@ const ServerBuilderConfig = ({ id }: { id: string }) => {
);
};
const UrlBuilderConfig = ({ id }: { id: string }) => {
const perms = useRead("GetPermissionLevel", {
target: { type: "Builder", id },
}).data;
const config = useRead("GetBuilder", { builder: id }).data?.config;
const [update, set] = useLocalStorage<Partial<Types.UrlBuilderConfig>>(
`url-builder-${id}-update-v1`,
{}
);
const { mutateAsync } = useWrite("UpdateBuilder");
if (!config) return null;
const disabled = perms !== Types.PermissionLevel.Write;
return (
<Config
disabled={disabled}
config={config.params as Types.UrlBuilderConfig}
update={update}
set={set}
onSave={async () => {
await mutateAsync({ id, config: { type: "Url", params: update } });
}}
components={{
"": [
{
label: "General",
labelHidden: true,
components: {
address: {
description: "The address of the Periphery agent",
placeholder: "https://periphery:8120",
},
passkey: {
description: "Use a custom passkey to authenticate with Periphery",
placeholder: "Custom passkey",
},
},
},
],
}}
/>
);
};
const ProvidersConfig = (params: {
type: "git" | "docker";
providers: Types.GitProvider[] | Types.DockerRegistry[];

View File

@@ -104,6 +104,7 @@ export const BuilderComponents: RequiredResourceComponents = {
<SelectGroup>
<SelectItem value="Aws">Aws</SelectItem>
<SelectItem value="Server">Server</SelectItem>
<SelectItem value="Url">Url</SelectItem>
</SelectGroup>
</SelectContent>
</Select>

View File

@@ -2,7 +2,7 @@ import {
ActionWithDialog,
ConfirmButton,
CopyButton,
TextUpdateMenu2,
TextUpdateMenuSimple,
} from "@components/util";
import {
useInvalidate,
@@ -70,7 +70,7 @@ export const ResourceDescription = ({
});
return (
<TextUpdateMenu2
<TextUpdateMenuSimple
title="Update Description"
placeholder="Set Description"
value={resource?.description}

View File

@@ -13,7 +13,13 @@ import {
useWrite,
} from "@lib/hooks";
import { Types } from "komodo_client";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@ui/card";
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from "@ui/card";
import { Input } from "@ui/input";
import { useEffect, useState } from "react";
import { CopyWebhook, ResourceSelector } from "../common";
@@ -54,6 +60,7 @@ import { DotsHorizontalIcon } from "@radix-ui/react-icons";
import { filterBySplit } from "@lib/utils";
import { useToast } from "@ui/use-toast";
import { fmt_upper_camelcase } from "@lib/formatting";
import { TextUpdateMenuMonaco } from "@components/util";
export const ProcedureConfig = ({ id }: { id: string }) => {
const procedure = useRead("GetProcedure", { procedure: id }).data;
@@ -404,6 +411,7 @@ const Stage = ({
columns={[
{
header: "Execution",
size: 250,
cell: ({ row: { original, index } }) => (
<ExecutionTypeSelector
disabled={disabled}
@@ -432,6 +440,7 @@ const Stage = ({
},
{
header: "Target",
size: 250,
cell: ({
row: {
original: {
@@ -464,6 +473,7 @@ const Stage = ({
},
{
header: "Add / Remove",
size: 150,
cell: ({ row: { index } }) => (
<div className="flex items-center gap-2">
<Button
@@ -501,6 +511,7 @@ const Stage = ({
},
{
header: "Enabled",
size: 100,
cell: ({
row: {
original: { enabled },
@@ -652,6 +663,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchRunProcedure: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match procedures"
value={
params.pattern ||
"# Match procedures by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
// Action
RunAction: {
params: { action: "" },
@@ -664,6 +691,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchRunAction: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match actions"
value={
params.pattern ||
"# Match actions by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
// Build
RunBuild: {
params: { build: "" },
@@ -676,6 +719,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchRunBuild: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match builds"
value={
params.pattern ||
"# Match builds by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
CancelBuild: {
params: { build: "" },
Component: ({ params, setParams, disabled }) => (
@@ -701,6 +760,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
);
},
},
BatchDeploy: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match deployments"
value={
params.pattern ||
"# Match deployments by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
StartDeployment: {
params: { deployment: "" },
Component: ({ params, setParams, disabled }) => (
@@ -767,6 +842,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchDestroyDeployment: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match deployments"
value={
params.pattern ||
"# Match deployments by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
// Stack
DeployStack: {
params: { stack: "" },
@@ -779,6 +870,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchDeployStack: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match stacks"
value={
params.pattern ||
"# Match stacks by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
DeployStackIfChanged: {
params: { stack: "" },
Component: ({ params, setParams, disabled }) => (
@@ -790,6 +897,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchDeployStackIfChanged: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match stacks"
value={
params.pattern ||
"# Match stacks by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
StartStack: {
params: { stack: "" },
Component: ({ params, setParams, disabled }) => (
@@ -856,6 +979,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchDestroyStack: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match stacks"
value={
params.pattern ||
"# Match stacks by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
// Repo
CloneRepo: {
params: { repo: "" },
@@ -868,6 +1007,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchCloneRepo: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match repos"
value={
params.pattern ||
"# Match repos by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
PullRepo: {
params: { repo: "" },
Component: ({ params, setParams, disabled }) => (
@@ -879,6 +1034,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchPullRepo: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match repos"
value={
params.pattern ||
"# Match repos by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
BuildRepo: {
params: { repo: "" },
Component: ({ params, setParams, disabled }) => (
@@ -890,6 +1061,22 @@ const TARGET_COMPONENTS: ExecutionConfigs = {
/>
),
},
BatchBuildRepo: {
params: { pattern: "" },
Component: ({ params, setParams, disabled }) => (
<TextUpdateMenuMonaco
title="Match repos"
value={
params.pattern ||
"# Match repos by name, id, wildcard, or \\regex\\.\n"
}
onUpdate={(pattern) => setParams({ pattern })}
disabled={disabled}
language="string_list"
fullWidth
/>
),
},
CancelRepoBuild: {
params: { repo: "" },
Component: ({ params, setParams, disabled }) => (

View File

@@ -1,14 +1,13 @@
import { DockerContainersSection } from "@components/util";
import { useRead } from "@lib/hooks";
import { ReactNode } from "react";
export const Containers = ({
id,
show,
setShow,
titleOther
}: {
id: string;
show: boolean;
setShow: (show: boolean) => void;
titleOther: ReactNode
}) => {
const containers =
useRead("ListDockerContainers", { server: id }, { refetchInterval: 10_000 })
@@ -17,8 +16,7 @@ export const Containers = ({
<DockerContainersSection
server_id={id}
containers={containers}
show={show}
setShow={setShow}
titleOther={titleOther}
pruneButton
/>
);

View File

@@ -1,20 +1,18 @@
import { Section } from "@components/layouts";
import { DockerResourceLink, ShowHideButton } from "@components/util";
import { DockerResourceLink } from "@components/util";
import { format_size_bytes } from "@lib/formatting";
import { useRead } from "@lib/hooks";
import { Badge } from "@ui/badge";
import { DataTable, SortableHeader } from "@ui/data-table";
import { HardDrive } from "lucide-react";
import { ReactNode } from "react";
import { Prune } from "../actions";
export const Images = ({
id,
show,
setShow,
titleOther,
}: {
id: string;
show: boolean;
setShow: (show: boolean) => void;
titleOther: ReactNode;
}) => {
const images =
useRead("ListDockerImages", { server: id }, { refetchInterval: 10_000 })
@@ -23,62 +21,52 @@ export const Images = ({
const allInUse = images.every((image) => image.in_use);
return (
<div className={show ? "mb-8" : undefined}>
<Section
title="Images"
icon={<HardDrive className="w-4 h-4" />}
actions={
<div className="flex items-center gap-2">
{!allInUse && <Prune server_id={id} type="Images" />}
<ShowHideButton show={show} setShow={setShow} />
</div>
}
>
{show && (
<DataTable
tableKey="server-images"
data={images}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<DockerResourceLink
type="image"
server_id={id}
name={row.original.name}
id={row.original.id}
extra={
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
}
/>
),
size: 200,
},
{
accessorKey: "id",
header: ({ column }) => (
<SortableHeader column={column} title="Id" />
),
},
{
accessorKey: "size",
header: ({ column }) => (
<SortableHeader column={column} title="Size" />
),
cell: ({ row }) =>
row.original.size
? format_size_bytes(row.original.size)
: "Unknown",
},
]}
/>
)}
</Section>
</div>
<Section
titleOther={titleOther}
actions={!allInUse && <Prune server_id={id} type="Images" />}
>
<DataTable
tableKey="server-images"
data={images}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<DockerResourceLink
type="image"
server_id={id}
name={row.original.name}
id={row.original.id}
extra={
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
}
/>
),
size: 200,
},
{
accessorKey: "id",
header: ({ column }) => (
<SortableHeader column={column} title="Id" />
),
},
{
accessorKey: "size",
header: ({ column }) => (
<SortableHeader column={column} title="Size" />
),
cell: ({ row }) =>
row.original.size
? format_size_bytes(row.original.size)
: "Unknown",
},
]}
/>
</Section>
);
};

View File

@@ -7,7 +7,7 @@ import { useLocalStorage } from "@lib/hooks";
import { Images } from "./images";
import { Containers } from "./containers";
import { Volumes } from "./volumes";
import { Button } from "@ui/button";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@ui/tabs";
export const ServerInfo = ({
id,
@@ -17,17 +17,9 @@ export const ServerInfo = ({
titleOther: ReactNode;
}) => {
const state = useServer(id)?.info.state ?? Types.ServerState.NotOk;
const [show, setShow] = useLocalStorage<{
containers: boolean;
networks: boolean;
images: boolean;
volumes: boolean;
}>("server-info-show-config", {
containers: true,
networks: true,
images: true,
volumes: true,
});
const [show2, setShow2] = useLocalStorage<
"Containers" | "Networks" | "Volumes" | "Images"
>("server-info-show-config-v2", "Containers");
if ([Types.ServerState.NotOk, Types.ServerState.Disabled].includes(state)) {
return (
@@ -39,50 +31,39 @@ export const ServerInfo = ({
);
}
const anyOpen = !Object.values(show).every((val) => !val);
const tabsList = (
<TabsList className="justify-start w-fit">
<TabsTrigger value="Containers" className="w-[110px]">
Containers
</TabsTrigger>
<TabsTrigger value="Networks" className="w-[110px]">
Networks
</TabsTrigger>
<TabsTrigger value="Volumes" className="w-[110px]">
Volumes
</TabsTrigger>
<TabsTrigger value="Images" className="w-[110px]">
Images
</TabsTrigger>
</TabsList>
);
return (
<Section
titleOther={titleOther}
actions={
<Button
size="sm"
variant="outline"
onClick={() =>
setShow({
containers: !anyOpen,
networks: !anyOpen,
images: !anyOpen,
volumes: !anyOpen,
})
}
>
{anyOpen ? "Hide All" : "Show All"}
</Button>
}
>
<div className="flex flex-col gap-4">
<Containers
id={id}
show={show.containers}
setShow={(containers) => setShow({ ...show, containers })}
/>
<Networks
id={id}
show={show.networks}
setShow={(networks) => setShow({ ...show, networks })}
/>
<Volumes
id={id}
show={show.volumes}
setShow={(volumes) => setShow({ ...show, volumes })}
/>
<Images
id={id}
show={show.images}
setShow={(images) => setShow({ ...show, images })}
/>
</div>
<Section titleOther={titleOther}>
<Tabs value={show2} onValueChange={setShow2 as any}>
<TabsContent value="Containers">
<Containers id={id} titleOther={tabsList} />
</TabsContent>
<TabsContent value="Networks">
<Networks id={id} titleOther={tabsList} />
</TabsContent>
<TabsContent value="Volumes">
<Volumes id={id} titleOther={tabsList} />
</TabsContent>
<TabsContent value="Images">
<Images id={id} titleOther={tabsList} />
</TabsContent>
</Tabs>
</Section>
);
};

View File

@@ -1,19 +1,17 @@
import { Section } from "@components/layouts";
import { DockerResourceLink, ShowHideButton } from "@components/util";
import { DockerResourceLink } from "@components/util";
import { useRead } from "@lib/hooks";
import { Badge } from "@ui/badge";
import { DataTable, SortableHeader } from "@ui/data-table";
import { Network } from "lucide-react";
import { ReactNode } from "react";
import { Prune } from "../actions";
export const Networks = ({
id,
show,
setShow,
titleOther,
}: {
id: string;
show: boolean;
setShow: (show: boolean) => void;
titleOther: ReactNode;
}) => {
const networks =
useRead("ListDockerNetworks", { server: id }, { refetchInterval: 10_000 })
@@ -24,82 +22,72 @@ export const Networks = ({
!network.name
? true
: ["none", "host", "bridge"].includes(network.name)
? true
: network.in_use
? true
: network.in_use
);
return (
<div className={show ? "mb-8" : undefined}>
<Section
title="Networks"
icon={<Network className="w-4 h-4" />}
actions={
<div className="flex items-center gap-2">
{!allInUse && <Prune server_id={id} type="Networks" />}
<ShowHideButton show={show} setShow={setShow} />
</div>
}
>
{show && (
<DataTable
tableKey="server-networks"
data={networks}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<div className="flex items-center gap-2">
<DockerResourceLink
type="network"
server_id={id}
name={row.original.name}
extra={
["none", "host", "bridge"].includes(
row.original.name ?? ""
) ? (
<Badge variant="outline">System</Badge>
) : (
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
)
}
/>
</div>
),
size: 300,
},
{
accessorKey: "driver",
header: ({ column }) => (
<SortableHeader column={column} title="Driver" />
),
},
{
accessorKey: "scope",
header: ({ column }) => (
<SortableHeader column={column} title="Scope" />
),
},
{
accessorKey: "attachable",
header: ({ column }) => (
<SortableHeader column={column} title="Attachable" />
),
},
{
accessorKey: "ipam_driver",
header: ({ column }) => (
<SortableHeader column={column} title="IPAM Driver" />
),
},
]}
/>
)}
</Section>
</div>
<Section
titleOther={titleOther}
actions={!allInUse && <Prune server_id={id} type="Networks" />}
>
<DataTable
tableKey="server-networks"
data={networks}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<div className="flex items-center gap-2">
<DockerResourceLink
type="network"
server_id={id}
name={row.original.name}
extra={
["none", "host", "bridge"].includes(
row.original.name ?? ""
) ? (
<Badge variant="outline">System</Badge>
) : (
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
)
}
/>
</div>
),
size: 300,
},
{
accessorKey: "driver",
header: ({ column }) => (
<SortableHeader column={column} title="Driver" />
),
},
{
accessorKey: "scope",
header: ({ column }) => (
<SortableHeader column={column} title="Scope" />
),
},
{
accessorKey: "attachable",
header: ({ column }) => (
<SortableHeader column={column} title="Attachable" />
),
},
{
accessorKey: "ipam_driver",
header: ({ column }) => (
<SortableHeader column={column} title="IPAM Driver" />
),
},
]}
/>
</Section>
);
};

View File

@@ -1,19 +1,17 @@
import { Section } from "@components/layouts";
import { DockerResourceLink, ShowHideButton } from "@components/util";
import { DockerResourceLink } from "@components/util";
import { useRead } from "@lib/hooks";
import { Badge } from "@ui/badge";
import { DataTable, SortableHeader } from "@ui/data-table";
import { Database } from "lucide-react";
import { ReactNode } from "react";
import { Prune } from "../actions";
export const Volumes = ({
id,
show,
setShow,
titleOther,
}: {
id: string;
show: boolean;
setShow: (show: boolean) => void;
titleOther: ReactNode;
}) => {
const volumes =
useRead("ListDockerVolumes", { server: id }, { refetchInterval: 10_000 })
@@ -22,57 +20,47 @@ export const Volumes = ({
const allInUse = volumes.every((volume) => volume.in_use);
return (
<div className={show ? "mb-8" : undefined}>
<Section
title="Volumes"
icon={<Database className="w-4 h-4" />}
actions={
<div className="flex items-center gap-2">
{!allInUse && <Prune server_id={id} type="Volumes" />}
<ShowHideButton show={show} setShow={setShow} />
</div>
}
>
{show && (
<DataTable
tableKey="server-volumes"
data={volumes}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<DockerResourceLink
type="volume"
server_id={id}
name={row.original.name}
extra={
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
}
/>
),
size: 200,
},
{
accessorKey: "driver",
header: ({ column }) => (
<SortableHeader column={column} title="Driver" />
),
},
{
accessorKey: "scope",
header: ({ column }) => (
<SortableHeader column={column} title="Scope" />
),
},
]}
/>
)}
</Section>
</div>
<Section
titleOther={titleOther}
actions={!allInUse && <Prune server_id={id} type="Volumes" />}
>
<DataTable
tableKey="server-volumes"
data={volumes}
columns={[
{
accessorKey: "name",
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
cell: ({ row }) => (
<DockerResourceLink
type="volume"
server_id={id}
name={row.original.name}
extra={
!row.original.in_use && (
<Badge variant="destructive">Unused</Badge>
)
}
/>
),
size: 200,
},
{
accessorKey: "driver",
header: ({ column }) => (
<SortableHeader column={column} title="Driver" />
),
},
{
accessorKey: "scope",
header: ({ column }) => (
<SortableHeader column={column} title="Scope" />
),
},
]}
/>
</Section>
);
};

View File

@@ -8,11 +8,11 @@ import { cn } from "@lib/utils";
import { DataTable, SortableHeader } from "@ui/data-table";
import { useStack } from ".";
import { Types } from "komodo_client";
import { ReactNode } from "react";
import { Fragment, ReactNode } from "react";
import { Link } from "react-router-dom";
import { Button } from "@ui/button";
import { Layers2 } from "lucide-react";
import { StatusBadge } from "@components/util";
import { DockerResourceLink, StatusBadge } from "@components/util";
export const StackServices = ({
id,
@@ -21,7 +21,9 @@ export const StackServices = ({
id: string;
titleOther: ReactNode;
}) => {
const state = useStack(id)?.info.state ?? Types.StackState.Unknown;
const info = useStack(id)?.info;
const server_id = info?.server_id;
const state = info?.state ?? Types.StackState.Unknown;
const services = useRead(
"ListStackServices",
{ stack: id },
@@ -43,6 +45,7 @@ export const StackServices = ({
columns={[
{
accessorKey: "service",
size: 200,
header: ({ column }) => (
<SortableHeader column={column} title="Service" />
),
@@ -66,18 +69,10 @@ export const StackServices = ({
</Link>
);
},
// size: 200,
},
{
accessorKey: "container.image",
header: ({ column }) => (
<SortableHeader column={column} title="Image" />
),
cell: ({ row }) => <>{row.original.container?.image}</>,
// size: 200,
},
{
accessorKey: "container.state",
size: 160,
header: ({ column }) => (
<SortableHeader column={column} title="State" />
),
@@ -90,7 +85,47 @@ export const StackServices = ({
/>
);
},
// size: 120,
},
{
accessorKey: "container.image",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Image" />
),
cell: ({ row }) =>
server_id && (
<DockerResourceLink
type="image"
server_id={server_id}
name={row.original.container?.image}
id={row.original.container?.image_id}
/>
),
// size: 200,
},
{
accessorKey: "container.networks.0",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Networks" />
),
cell: ({ row }) => (
<div className="flex items-center gap-2 flex-wrap">
{server_id &&
row.original.container?.networks.map((network, i) => (
<Fragment key={network}>
<DockerResourceLink
type="network"
server_id={server_id}
name={network}
/>
{i !== row.original.container!.networks.length - 1 && (
<div className="text-muted-foreground">|</div>
)}
</Fragment>
))}
</div>
),
},
]}
/>

View File

@@ -18,7 +18,7 @@ import { homeViewAtom } from "@main";
export const Sidebar = () => {
const [view, setView] = useAtom(homeViewAtom);
return (
<div className="fixed top-0 pt-24 w-64 border-r hidden lg:block pr-8 pb-8 h-screen overflow-y-auto">
<div className="fixed top-0 pt-[84px] w-[200px] border-r hidden lg:block pr-8 pb-8 h-screen overflow-y-auto">
<div className="flex flex-col gap-1">
<SidebarLink
label="Dashboard"

View File

@@ -1,5 +1,6 @@
import {
FocusEventHandler,
Fragment,
MouseEventHandler,
ReactNode,
forwardRef,
@@ -54,7 +55,7 @@ import { Section } from "./layouts";
import { DataTable, SortableHeader } from "@ui/data-table";
import { useRead, useUser } from "@lib/hooks";
import { Prune } from "./resources/server/actions";
import { MonacoEditor } from "./monaco";
import { MonacoEditor, MonacoLanguage } from "./monaco";
import { UsableResource } from "@types";
import { ResourceComponents } from "./resources";
@@ -351,7 +352,7 @@ export const CopyButton = ({
);
};
export const TextUpdateMenu = ({
export const TextUpdateMenuMonaco = ({
title,
titleRight,
value = "",
@@ -378,7 +379,7 @@ export const TextUpdateMenu = ({
open?: boolean;
setOpen?: (open: boolean) => void;
triggerHidden?: boolean;
language?: "toml" | "yaml" | "json" | "key_value";
language?: MonacoLanguage;
}) => {
const [_open, _setOpen] = useState(false);
const [__open, __setOpen] = [open ?? _open, setOpen ?? _setOpen];
@@ -406,7 +407,7 @@ export const TextUpdateMenu = ({
triggerClassName
)}
>
{value || placeholder}
{value.split("\n")[0] || placeholder}
</div>
</Card>
</DialogTrigger>
@@ -601,10 +602,10 @@ export const DOCKER_LINK_ICONS: {
!name
? "Warning"
: no_containers
? ["none", "host", "bridge"].includes(name)
? "None"
: "Critical"
: "Good"
? ["none", "host", "bridge"].includes(name)
? "None"
: "Critical"
: "Good"
)
)}
/>
@@ -677,7 +678,7 @@ export const DockerResourceLink = ({
<Icon server_id={server_id} name={type === "image" ? id : name} />
<div
title={name}
className="max-w-[200px] lg:max-w-[300px] overflow-hidden overflow-ellipsis"
className="max-w-[200px] lg:max-w-[250px] overflow-hidden overflow-ellipsis"
>
{name}
</div>
@@ -705,12 +706,14 @@ export const DockerContainersSection = ({
show = true,
setShow,
pruneButton,
titleOther,
}: {
server_id: string;
containers: Types.ListDockerContainersResponse;
show?: boolean;
setShow?: (show: boolean) => void;
pruneButton?: boolean;
titleOther?: ReactNode;
}) => {
const allRunning = useRead("ListDockerContainers", {
server: server_id,
@@ -720,8 +723,9 @@ export const DockerContainersSection = ({
return (
<div className={cn(setShow && show && "mb-8")}>
<Section
title="Containers"
icon={<Box className="w-4 h-4" />}
titleOther={titleOther}
title={!titleOther ? "Containers" : undefined}
icon={!titleOther ? <Box className="w-4 h-4" /> : undefined}
actions={
<div className="flex items-center gap-2">
{pruneButton && !allRunning && (
@@ -738,6 +742,7 @@ export const DockerContainersSection = ({
columns={[
{
accessorKey: "name",
size: 260,
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
@@ -748,10 +753,26 @@ export const DockerContainersSection = ({
name={row.original.name}
/>
),
size: 200,
},
{
accessorKey: "state",
size: 160,
header: ({ column }) => (
<SortableHeader column={column} title="State" />
),
cell: ({ row }) => {
const state = row.original?.state;
return (
<StatusBadge
text={state}
intent={container_state_intention(state)}
/>
);
},
},
{
accessorKey: "image",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Image" />
),
@@ -765,33 +786,28 @@ export const DockerContainersSection = ({
),
},
{
accessorKey: "network_mode",
accessorKey: "networks.0",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Network" />
<SortableHeader column={column} title="Networks" />
),
cell: ({ row }) => (
<DockerResourceLink
type="network"
server_id={server_id}
name={row.original.network_mode}
/>
<div className="flex items-center gap-x-2 flex-wrap">
{row.original.networks.map((network, i) => (
<Fragment key={network}>
<DockerResourceLink
type="network"
server_id={server_id}
name={network}
/>
{i !== row.original.networks.length - 1 && (
<div className="text-muted-foreground">|</div>
)}
</Fragment>
))}
</div>
),
},
{
accessorKey: "state",
header: ({ column }) => (
<SortableHeader column={column} title="State" />
),
cell: ({ row }) => {
const state = row.original?.state;
return (
<StatusBadge
text={state}
intent={container_state_intention(state)}
/>
);
},
},
]}
/>
)}
@@ -833,7 +849,7 @@ export const ResourcePageHeader = ({
);
};
export const TextUpdateMenu2 = ({
export const TextUpdateMenuSimple = ({
title,
titleRight,
value = "",
@@ -875,7 +891,7 @@ export const TextUpdateMenu2 = ({
triggerClassName
)}
>
{value || placeholder}
{value.split("\n")[0] || placeholder}
</div>
</DialogTrigger>
<DialogContent className="min-w-[50vw]">

View File

@@ -19,6 +19,7 @@ import { useToast } from "@ui/use-toast";
import { atom, useAtom } from "jotai";
import { useEffect, useState } from "react";
import { useParams } from "react-router-dom";
import { RESOURCE_TARGETS } from "./utils";
// ============== RESOLVER ==============
@@ -235,6 +236,68 @@ export const useResourceParamType = () => {
return (type[0].toUpperCase() + type.slice(1, -1)) as UsableResource;
};
type ResourceMap = {
[Resource in UsableResource]: Types.ResourceListItem<unknown>[] | undefined;
};
export const useAllResources = (): ResourceMap => {
return {
Server: useRead("ListServers", {}).data,
Stack: useRead("ListStacks", {}).data,
Deployment: useRead("ListDeployments", {}).data,
Build: useRead("ListBuilds", {}).data,
Repo: useRead("ListRepos", {}).data,
Procedure: useRead("ListProcedures", {}).data,
Action: useRead("ListActions", {}).data,
Builder: useRead("ListBuilders", {}).data,
Alerter: useRead("ListAlerters", {}).data,
ServerTemplate: useRead("ListServerTemplates", {}).data,
ResourceSync: useRead("ListResourceSyncs", {}).data,
};
};
// Returns true if Komodo has no resources.
export const useNoResources = () => {
const resources = useAllResources();
for (const target of RESOURCE_TARGETS) {
if (resources[target] && resources[target].length) {
return false;
}
}
return true;
};
/** returns function that takes a resource target and checks if it exists */
export const useCheckResourceExists = () => {
const resources = useAllResources();
return (target: Types.ResourceTarget) => {
return (
resources[target.type as UsableResource]?.some(
(resource) => resource.id === target.id
) || false
);
};
};
export const useFilterResources = <Info>(
resources?: Types.ResourceListItem<Info>[],
search?: string
) => {
const tags = useTagsFilter();
const searchSplit = search?.toLowerCase()?.split(" ") || [];
return (
resources?.filter(
(resource) =>
tags.every((tag: string) => resource.tags.includes(tag)) &&
(searchSplit.length > 0
? searchSplit.every((search) =>
resource.name.toLowerCase().includes(search)
)
: true)
) ?? []
);
};
export const usePushRecentlyViewed = ({ type, id }: Types.ResourceTarget) => {
const userInvalidate = useUserInvalidate();
@@ -285,60 +348,6 @@ export const useTagsFilter = () => {
return tags;
};
/** returns function that takes a resource target and checks if it exists */
export const useCheckResourceExists = () => {
const servers = useRead("ListServers", {}).data;
const deployments = useRead("ListDeployments", {}).data;
const builds = useRead("ListBuilds", {}).data;
const repos = useRead("ListRepos", {}).data;
const procedures = useRead("ListProcedures", {}).data;
const builders = useRead("ListBuilders", {}).data;
const alerters = useRead("ListAlerters", {}).data;
return (target: Types.ResourceTarget) => {
switch (target.type) {
case "Server":
return servers?.some((resource) => resource.id === target.id) || false;
case "Deployment":
return (
deployments?.some((resource) => resource.id === target.id) || false
);
case "Build":
return builds?.some((resource) => resource.id === target.id) || false;
case "Repo":
return repos?.some((resource) => resource.id === target.id) || false;
case "Procedure":
return (
procedures?.some((resource) => resource.id === target.id) || false
);
case "Builder":
return builders?.some((resource) => resource.id === target.id) || false;
case "Alerter":
return alerters?.some((resource) => resource.id === target.id) || false;
default:
return false;
}
};
};
export const useFilterResources = <Info>(
resources?: Types.ResourceListItem<Info>[],
search?: string
) => {
const tags = useTagsFilter();
const searchSplit = search?.toLowerCase()?.split(" ") || [];
return (
resources?.filter(
(resource) =>
tags.every((tag: string) => resource.tags.includes(tag)) &&
(searchSplit.length > 0
? searchSplit.every((search) =>
resource.name.toLowerCase().includes(search)
)
: true)
) ?? []
);
};
export type LocalStorageSetter<T> = (state: T) => T;
export const useLocalStorage = <T>(
@@ -409,44 +418,6 @@ export const useCtrlKeyListener = (listenKey: string, onPress: () => void) => {
});
};
// Returns true if Komodo has no resources.
export const useNoResources = () => {
const servers =
useRead("ListServers", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const deployments =
useRead("ListDeployments", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const stacks =
useRead("ListStacks", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const builds =
useRead("ListBuilds", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const repos =
useRead("ListRepos", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const procedures =
useRead("ListProcedures", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const builders =
useRead("ListBuilders", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const alerters =
useRead("ListAlerters", {}, { refetchInterval: 5000 }).data?.length ?? 0;
const templates =
useRead("ListServerTemplates", {}, { refetchInterval: 5000 }).data
?.length ?? 0;
const syncs =
useRead("ListResourceSyncs", {}, { refetchInterval: 5000 }).data?.length ??
0;
return (
servers === 0 &&
deployments === 0 &&
stacks === 0 &&
builds === 0 &&
repos === 0 &&
procedures === 0 &&
builders === 0 &&
alerters === 0 &&
templates === 0 &&
syncs === 0
);
};
export type WebhookIntegration = "Github" | "Gitlab";
export type WebhookIntegrations = {
[key: string]: WebhookIntegration;

View File

@@ -32,6 +32,7 @@ import "./theme";
import "./yaml";
import "./toml";
import "./key_value";
import "./string_list";
import "./shell";
import "./dockerfile";
import "./rust";

View File

@@ -15,16 +15,22 @@ export async function init_monaco() {
)
)
);
await Promise.all(promises);
fetch(`/index.d.ts`)
.then((res) => res.text())
.then((dts) =>
monaco.languages.typescript.typescriptDefaults.addExtraLib(
dts,
`file:///index.d.ts`
promises.push(
Promise.all(
["index.d.ts", "deno.d.ts"].map((file) =>
fetch(`/${file}`)
.then((res) => res.text())
.then((dts) =>
monaco.languages.typescript.typescriptDefaults.addExtraLib(
dts,
`file:///${file}`
)
)
)
);
)
);
await Promise.all(promises);
monaco.languages.typescript.typescriptDefaults.setCompilerOptions({
module: monaco.languages.typescript.ModuleKind.ESNext,

View File

@@ -0,0 +1,55 @@
import * as monaco from "monaco-editor";
const string_list_conf: monaco.languages.LanguageConfiguration = {
comments: {
lineComment: "#",
},
autoClosingPairs: [
{ open: '"', close: '"' },
{ open: "'", close: "'" },
],
surroundingPairs: [
{ open: '"', close: '"' },
{ open: "'", close: "'" },
],
};
const string_list_language = <monaco.languages.IMonarchLanguage>{
defaultToken: "",
tokenPostfix: ".string_list",
tokenizer: {
root: [
// Comments
[/#.*$/, "comment"],
// Comma as a delimiter
[/,/, "comment"],
[/\*/, "keyword"],
[/\?/, "keyword"],
// Special syntax: text surrounded by \
// [/\\[^\\]*\\/, "keyword"],
[/\\/, { token: "keyword", next: "@regex" }],
// Main strings separated by spaces or newlines
[/[^\*\?,#\\\s]+/, ""],
// Whitespace
[/[ \t\r\n]+/, ""],
],
regex: [
// Regex tokens
[/\[[^\]]*\]/, ""], // Character classes like [abc]
[/[*+?\.]+/, "keyword"], // Quantifiers like *, +, ?
[/\\./, "string.regexp constant.character.escape"], // Escape sequences like \d, \w
[/[^\\]/, "string"], // Any other regex content
[/\\/, { token: "keyword", next: "@pop" }], // Closing backslash returns to root
],
},
};
// Register the custom language and configuration with Monaco
monaco.languages.register({ id: "string_list" });
monaco.languages.setLanguageConfiguration("string_list", string_list_conf);
monaco.languages.setMonarchTokensProvider("string_list", string_list_language);

View File

@@ -6,7 +6,7 @@ import { useRead } from "@lib/hooks";
import { DataTable, SortableHeader } from "@ui/data-table";
import { Input } from "@ui/input";
import { Box, Search } from "lucide-react";
import { useCallback, useMemo, useState } from "react";
import { Fragment, useCallback, useMemo, useState } from "react";
export const ContainersPage = () => {
const [search, setSearch] = useState("");
@@ -54,6 +54,7 @@ export const ContainersPage = () => {
columns={[
{
accessorKey: "name",
size: 260,
header: ({ column }) => (
<SortableHeader column={column} title="Name" />
),
@@ -64,10 +65,26 @@ export const ContainersPage = () => {
name={row.original.name}
/>
),
size: 200,
},
{
accessorKey: "state",
size: 160,
header: ({ column }) => (
<SortableHeader column={column} title="State" />
),
cell: ({ row }) => {
const state = row.original?.state;
return (
<StatusBadge
text={state}
intent={container_state_intention(state)}
/>
);
},
},
{
accessorKey: "server_id",
size: 200,
sortingFn: (a, b) => {
const sa = serverName(a.original.server_id!);
const sb = serverName(b.original.server_id!);
@@ -89,6 +106,7 @@ export const ContainersPage = () => {
},
{
accessorKey: "image",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Image" />
),
@@ -102,33 +120,51 @@ export const ContainersPage = () => {
),
},
{
accessorKey: "network_mode",
accessorKey: "networks.0",
size: 300,
header: ({ column }) => (
<SortableHeader column={column} title="Network" />
<SortableHeader column={column} title="Networks" />
),
cell: ({ row }) => (
<DockerResourceLink
type="network"
server_id={row.original.server_id!}
name={row.original.network_mode}
/>
<div className="flex items-center gap-x-2 flex-wrap">
{row.original.networks.map((network, i) => (
<Fragment key={network}>
<DockerResourceLink
type="network"
server_id={row.original.server_id!}
name={network}
/>
{i !== row.original.networks.length - 1 && (
<div className="text-muted-foreground">|</div>
)}
</Fragment>
))}
</div>
),
},
{
accessorKey: "state",
header: ({ column }) => (
<SortableHeader column={column} title="State" />
),
cell: ({ row }) => {
const state = row.original?.state;
return (
<StatusBadge
text={state}
intent={container_state_intention(state)}
/>
);
},
},
// {
// accessorKey: "volumes.0",
// minSize: 300,
// header: ({ column }) => (
// <SortableHeader column={column} title="Volumes" />
// ),
// cell: ({ row }) => (
// <div className="flex items-center gap-x-2 flex-wrap">
// {row.original.volumes.map((volume, i) => (
// <Fragment key={volume}>
// <DockerResourceLink
// type="volume"
// server_id={row.original.server_id!}
// name={volume}
// />
// {i !== row.original.volumes.length - 1 && (
// <div className="text-muted-foreground">|</div>
// )}
// </Fragment>
// ))}
// </div>
// ),
// },
]}
/>
</div>

View File

@@ -25,7 +25,6 @@ import { Link } from "react-router-dom";
export const Dashboard = () => {
const noResources = useNoResources();
const user = useUser().data!;
return (
<>
<ActiveResources />

View File

@@ -4,21 +4,31 @@ import { useServer } from "@components/resources/server";
import {
DOCKER_LINK_ICONS,
DockerLabelsSection,
DockerResourcePageName,
StatusBadge,
DockerResourceLink,
ResourcePageHeader,
ShowHideButton,
} from "@components/util";
import { useRead, useSetTitle } from "@lib/hooks";
import { Button } from "@ui/button";
import { DataTable } from "@ui/data-table";
import { ChevronLeft, Clapperboard, Info, Loader2 } from "lucide-react";
import { useNavigate, useParams } from "react-router-dom";
import {
ChevronLeft,
Clapperboard,
Info,
Loader2,
SearchCode,
} from "lucide-react";
import { Link, useParams } from "react-router-dom";
import { ContainerLogs } from "./log";
import { Actions } from "./actions";
import { has_minimum_permissions } from "@lib/utils";
import { Types } from "komodo_client";
import { ResourceUpdates } from "@components/updates/resource";
import { container_state_intention } from "@lib/color";
import { UsableResource } from "@types";
import { Fragment } from "react/jsx-runtime";
import { useEditPermissions } from "@pages/resource";
import { ResourceNotifications } from "@pages/resource-notifications";
import { MonacoEditor } from "@components/monaco";
import { useState } from "react";
export const ContainerPage = () => {
const { type, id, container } = useParams() as {
@@ -41,12 +51,10 @@ const ContainerPageInner = ({
id: string;
container: string;
}) => {
const [showInspect, setShowInspect] = useState(false);
const server = useServer(id);
useSetTitle(`${server?.name} | container | ${container_name}`);
const nav = useNavigate();
const perms = useRead("GetPermissionLevel", {
target: { type: "Server", id },
}).data;
const { canExecute } = useEditPermissions({ type: "Server", id });
const {
data: container,
isPending,
@@ -81,96 +89,231 @@ const ContainerPageInner = ({
);
}
const canExecute = has_minimum_permissions(
perms,
Types.PermissionLevel.Execute
);
const state = list_container?.state ?? Types.ContainerStateStatusEnum.Empty;
const status = list_container?.status;
const intention = container_state_intention(state);
return (
<div className="flex flex-col gap-16 mb-24">
{/* HEADER */}
<div className="flex flex-col gap-4">
{/* BACK */}
<div className="flex items-center justify-between mb-4">
<Button
className="gap-2"
variant="secondary"
onClick={() => nav("/servers/" + id)}
>
<ChevronLeft className="w-4" /> Back
<div>
<div className="w-full flex items-center justify-between mb-12">
<Link to={"/servers/" + id}>
<Button className="gap-2" variant="secondary">
<ChevronLeft className="w-4" />
Back
</Button>
<NewDeployment id={id} container={container_name} />
</div>
{/* TITLE */}
<div className="flex items-center gap-4">
<div className="mt-1">
<DOCKER_LINK_ICONS.container
server_id={id}
</Link>
<NewDeployment id={id} container={container_name} />
</div>
<div className="flex flex-col xl:flex-row gap-4">
{/** HEADER */}
<div className="w-full flex flex-col gap-4">
<div className="flex flex-col gap-2 border rounded-md">
{/* <Components.ResourcePageHeader id={id} /> */}
<ResourcePageHeader
intent={intention}
icon={
<DOCKER_LINK_ICONS.container
server_id={id}
name={container_name}
size={8}
/>
}
name={container_name}
size={8}
state={state}
status={list_container?.status}
/>
<div className="flex flex-col pb-2 px-4">
<div className="flex items-center gap-x-4 gap-y-1 flex-wrap text-muted-foreground">
<ResourceLink type="Server" id={id} />
<AttachedResource id={id} container={container_name} />
{list_container?.image && (
<>
|
<DockerResourceLink
type="image"
server_id={id}
name={list_container.image}
id={list_container.image_id}
muted
/>
</>
)}
{list_container?.networks.map((network) => (
<Fragment key={network}>
|
<DockerResourceLink
type="network"
server_id={id}
name={network}
muted
/>
</Fragment>
))}
{list_container?.volumes.map((volume) => (
<Fragment key={volume}>
|
<DockerResourceLink
type="volume"
server_id={id}
name={volume}
muted
/>
</Fragment>
))}
</div>
</div>
</div>
<DockerResourcePageName name={container_name} />
<div className="flex items-center gap-4 flex-wrap">
<StatusBadge
text={state}
intent={container_state_intention(state)}
/>
{status && (
<p className="text-sm text-muted-foreground">{status}</p>
)}
</div>
</div>
{/* INFO */}
<div className="flex flex-wrap gap-4 items-center text-muted-foreground">
<ResourceLink type="Server" id={id} />
<AttachedResource id={id} container={container_name} />
{/* <ResourceDescription type="Server" id={id} disabled={!canWrite} /> */}
</div>
{/** NOTIFICATIONS */}
<ResourceNotifications type="Server" id={id} />
</div>
{/* Actions */}
{canExecute && (
<Section title="Actions" icon={<Clapperboard className="w-4 h-4" />}>
<div className="flex gap-4 items-center flex-wrap">
{Object.entries(Actions).map(([key, Action]) => (
<Action key={key} id={id} container={container_name} />
))}
</div>
<div className="mt-8 flex flex-col gap-12">
{/* Actions */}
{canExecute && (
<Section title="Actions" icon={<Clapperboard className="w-4 h-4" />}>
<div className="flex gap-4 items-center flex-wrap">
{Object.entries(Actions).map(([key, Action]) => (
<Action key={key} id={id} container={container_name} />
))}
</div>
</Section>
)}
{/* Logs */}
<ContainerLogs id={id} container_name={container_name} />
{/* TOP LEVEL CONTAINER INFO */}
<Section title="Details" icon={<Info className="w-4 h-4" />}>
<DataTable
tableKey="container-info"
data={[container]}
columns={[
{
accessorKey: "Id",
header: "Id",
},
{
accessorKey: "Image",
header: "Image",
},
{
accessorKey: "Driver",
header: "Driver",
},
]}
/>
</Section>
)}
{/* Updates */}
<ResourceUpdates type="Server" id={id} />
<DockerLabelsSection labels={container.Config?.Labels} />
<ContainerLogs id={id} container_name={container_name} />
{/* TOP LEVEL CONTAINER INFO */}
<Section title="Details" icon={<Info className="w-4 h-4" />}>
<DataTable
tableKey="container-info"
data={[container]}
columns={[
{
accessorKey: "Image",
header: "Image",
},
{
accessorKey: "Driver",
header: "Driver",
},
]}
/>
</Section>
<DockerLabelsSection labels={container.Config?.Labels} />
<Section
title="Inspect"
icon={<SearchCode className="w-4 h-4" />}
titleRight={
<div className="pl-2">
<ShowHideButton show={showInspect} setShow={setShowInspect} />
</div>
}
>
{showInspect && (
<MonacoEditor
value={JSON.stringify(container, null, 2)}
language="json"
readOnly
/>
)}
</Section>
</div>
</div>
);
// return (
// <div className="flex flex-col gap-16 mb-24">
// {/* HEADER */}
// <div className="flex flex-col gap-4">
// {/* BACK */}
// <div className="flex items-center justify-between mb-4">
// <Button
// className="gap-2"
// variant="secondary"
// onClick={() => nav("/servers/" + id)}
// >
// <ChevronLeft className="w-4" /> Back
// </Button>
// <NewDeployment id={id} container={container_name} />
// </div>
// {/* TITLE */}
// <div className="flex items-center gap-4">
// <div className="mt-1">
// <DOCKER_LINK_ICONS.container
// server_id={id}
// name={container_name}
// size={8}
// />
// </div>
// <DockerResourcePageName name={container_name} />
// <div className="flex items-center gap-4 flex-wrap">
// <StatusBadge
// text={state}
// intent={container_state_intention(state)}
// />
// {status && (
// <p className="text-sm text-muted-foreground">{status}</p>
// )}
// </div>
// </div>
// {/* INFO */}
// <div className="flex flex-wrap gap-4 items-center text-muted-foreground">
// <ResourceLink type="Server" id={id} />
// <AttachedResource id={id} container={container_name} />
// </div>
// </div>
// {/* Actions */}
// {canExecute && (
// <Section title="Actions" icon={<Clapperboard className="w-4 h-4" />}>
// <div className="flex gap-4 items-center flex-wrap">
// {Object.entries(Actions).map(([key, Action]) => (
// <Action key={key} id={id} container={container_name} />
// ))}
// </div>
// </Section>
// )}
// {/* Updates */}
// <ResourceUpdates type="Server" id={id} />
// <ContainerLogs id={id} container_name={container_name} />
// {/* TOP LEVEL CONTAINER INFO */}
// <Section title="Details" icon={<Info className="w-4 h-4" />}>
// <DataTable
// tableKey="container-info"
// data={[container]}
// columns={[
// {
// accessorKey: "Id",
// header: "Id",
// },
// {
// accessorKey: "Image",
// header: "Image",
// },
// {
// accessorKey: "Driver",
// header: "Driver",
// },
// ]}
// />
// </Section>
// <DockerLabelsSection labels={container.Config?.Labels} />
// </div>
// );
};
const AttachedResource = ({
@@ -197,13 +340,10 @@ const AttachedResource = ({
return (
<>
|
<div className="flex gap-2">
<div>{attached.resource.type}:</div>
<ResourceLink
type={attached.resource.type as UsableResource}
id={attached.resource.id}
/>
</div>
<ResourceLink
type={attached.resource.type as UsableResource}
id={attached.resource.id}
/>
</>
);
};

View File

@@ -7,6 +7,7 @@ import {
DockerContainersSection,
DockerLabelsSection,
DockerResourcePageName,
ShowHideButton,
} from "@components/util";
import { fmt_date_with_minutes, format_size_bytes } from "@lib/formatting";
import { useExecute, useRead, useSetTitle } from "@lib/hooks";
@@ -15,8 +16,17 @@ import { Types } from "komodo_client";
import { Badge } from "@ui/badge";
import { Button } from "@ui/button";
import { DataTable } from "@ui/data-table";
import { ChevronLeft, HistoryIcon, Info, Loader2, Trash } from "lucide-react";
import {
ChevronLeft,
HistoryIcon,
Info,
Loader2,
SearchCode,
Trash,
} from "lucide-react";
import { useNavigate, useParams } from "react-router-dom";
import { useState } from "react";
import { MonacoEditor } from "@components/monaco";
export const ImagePage = () => {
const { type, id, image } = useParams() as {
@@ -37,6 +47,7 @@ const ImagePageInner = ({
id: string;
image: string;
}) => {
const [showInspect, setShowInspect] = useState(false);
const server = useServer(id);
useSetTitle(`${server?.name} | image | ${image_name}`);
const nav = useNavigate();
@@ -212,6 +223,24 @@ const ImagePageInner = ({
)}
<DockerLabelsSection labels={image?.Config?.Labels} />
<Section
title="Inspect"
icon={<SearchCode className="w-4 h-4" />}
titleRight={
<div className="pl-2">
<ShowHideButton show={showInspect} setShow={setShowInspect} />
</div>
}
>
{showInspect && (
<MonacoEditor
value={JSON.stringify(image, null, 2)}
language="json"
readOnly
/>
)}
</Section>
</div>
);
};

View File

@@ -8,6 +8,7 @@ import {
DockerOptions,
DockerResourceLink,
DockerResourcePageName,
ShowHideButton,
} from "@components/util";
import { useExecute, useRead, useSetTitle } from "@lib/hooks";
import { has_minimum_permissions } from "@lib/utils";
@@ -20,10 +21,13 @@ import {
ChevronLeft,
Info,
Loader2,
SearchCode,
Trash,
Waypoints,
} from "lucide-react";
import { useNavigate, useParams } from "react-router-dom";
import { useState } from "react";
import { MonacoEditor } from "@components/monaco";
export const NetworkPage = () => {
const { type, id, network } = useParams() as {
@@ -44,6 +48,7 @@ const NetworkPageInner = ({
id: string;
network: string;
}) => {
const [showInspect, setShowInspect] = useState(false);
const server = useServer(id);
useSetTitle(`${server?.name} | network | ${network_name}`);
const nav = useNavigate();
@@ -277,6 +282,24 @@ const NetworkPageInner = ({
)}
<DockerLabelsSection labels={network.Labels} />
<Section
title="Inspect"
icon={<SearchCode className="w-4 h-4" />}
titleRight={
<div className="pl-2">
<ShowHideButton show={showInspect} setShow={setShowInspect} />
</div>
}
>
{showInspect && (
<MonacoEditor
value={JSON.stringify(network, null, 2)}
language="json"
readOnly
/>
)}
</Section>
</div>
);
};

View File

@@ -8,6 +8,7 @@ import {
DockerLabelsSection,
DockerOptions,
DockerResourcePageName,
ShowHideButton,
} from "@components/util";
import { useExecute, useRead, useSetTitle } from "@lib/hooks";
import { has_minimum_permissions } from "@lib/utils";
@@ -15,8 +16,10 @@ import { Types } from "komodo_client";
import { Badge } from "@ui/badge";
import { Button } from "@ui/button";
import { DataTable } from "@ui/data-table";
import { ChevronLeft, Info, Loader2, Trash } from "lucide-react";
import { ChevronLeft, Info, Loader2, SearchCode, Trash } from "lucide-react";
import { useNavigate, useParams } from "react-router-dom";
import { useState } from "react";
import { MonacoEditor } from "@components/monaco";
export const VolumePage = () => {
const { type, id, volume } = useParams() as {
@@ -37,6 +40,7 @@ const VolumePageInner = ({
id: string;
volume: string;
}) => {
const [showInspect, setShowInspect] = useState(false);
const server = useServer(id);
useSetTitle(`${server?.name} | volume | ${volume_name}`);
const nav = useNavigate();
@@ -114,7 +118,7 @@ const VolumePageInner = ({
{/* TITLE */}
<div className="flex items-center gap-4">
<div className="mt-1">
<DOCKER_LINK_ICONS.container
<DOCKER_LINK_ICONS.volume
server_id={id}
name={volume_name}
size={8}
@@ -175,6 +179,24 @@ const VolumePageInner = ({
</Section>
<DockerLabelsSection labels={volume.Labels} />
<Section
title="Inspect"
icon={<SearchCode className="w-4 h-4" />}
titleRight={
<div className="pl-2">
<ShowHideButton show={showInspect} setShow={setShowInspect} />
</div>
}
>
{showInspect && (
<MonacoEditor
value={JSON.stringify(volume, null, 2)}
language="json"
readOnly
/>
)}
</Section>
</div>
);
};

View File

@@ -1,4 +1,4 @@
import { ConfirmButton, CopyButton, TextUpdateMenu } from "@components/util";
import { ConfirmButton, CopyButton, TextUpdateMenuMonaco } from "@components/util";
import {
useInvalidate,
useRead,
@@ -233,7 +233,7 @@ const Providers = ({ type }: { type: "GitProvider" | "DockerRegistry" }) => {
]}
/>
{updateMenuData && (
<TextUpdateMenu
<TextUpdateMenuMonaco
title={updateMenuData.title}
titleRight={updateMenuData.titleRight}
placeholder={updateMenuData.placeholder}

View File

@@ -1,4 +1,8 @@
import { ConfirmButton, CopyButton, TextUpdateMenu } from "@components/util";
import {
ConfirmButton,
CopyButton,
TextUpdateMenuMonaco,
} from "@components/util";
import {
useInvalidate,
useRead,
@@ -84,7 +88,7 @@ export const Variables = () => {
</div>
{updateMenuData && (
<TextUpdateMenu
<TextUpdateMenuMonaco
title={updateMenuData.title}
placeholder={updateMenuData.placeholder}
value={updateMenuData.value}
@@ -209,7 +213,7 @@ export const Variables = () => {
{/** SECRETS */}
{secrets.length ? (
<div className="flex items-center gap-2 text-muted-foreground">
<div className="flex items-center gap-2 flex-wrap text-muted-foreground">
<div>Core Secrets:</div>
{secrets.map((secret) => (
<Badge variant="secondary">{secret}</Badge>

View File

@@ -14,16 +14,17 @@ import {
stroke_color_class_by_intention,
} from "@lib/color";
import { useRead, useSetTitle } from "@lib/hooks";
import { cn, has_minimum_permissions } from "@lib/utils";
import { cn } from "@lib/utils";
import { Types } from "komodo_client";
import { ChevronLeft, Clapperboard, Layers2 } from "lucide-react";
import { useNavigate, useParams } from "react-router-dom";
import { Link, useParams } from "react-router-dom";
import { StackServiceLogs } from "./log";
import { ResourceUpdates } from "@components/updates/resource";
import { Button } from "@ui/button";
import { ExportButton } from "@components/export";
import { AddTags, ResourceTags } from "@components/tags";
import { DockerResourceLink, StatusBadge } from "@components/util";
import { DockerResourceLink, ResourcePageHeader } from "@components/util";
import { useEditPermissions } from "@pages/resource";
import { ResourceNotifications } from "@pages/resource-notifications";
import { Fragment } from "react/jsx-runtime";
type IdServiceComponent = React.FC<{ id: string; service?: string }>;
@@ -54,15 +55,10 @@ const StackServicePageInner = ({
}) => {
const stack = useStack(stack_id);
useSetTitle(`${stack?.name} | ${service}`);
const nav = useNavigate();
const perms = useRead("GetPermissionLevel", {
target: { type: "Stack", id: stack_id },
}).data;
const canExecute = has_minimum_permissions(
perms,
Types.PermissionLevel.Execute
);
const canWrite = has_minimum_permissions(perms, Types.PermissionLevel.Write);
const { canExecute, canWrite } = useEditPermissions({
type: "Stack",
id: stack_id,
});
const services = useRead("ListStackServices", { stack: stack_id }).data;
const container = services?.find((s) => s.service === service)?.container;
const state = container?.state ?? Types.ContainerStateStatusEnum.Empty;
@@ -70,98 +66,119 @@ const StackServicePageInner = ({
const stroke_color = stroke_color_class_by_intention(intention);
return (
<div className="flex flex-col gap-16">
<div className="flex flex-col gap-4">
<div className="flex items-center justify-between mb-4">
<Button
className="gap-2"
variant="secondary"
onClick={() => nav("/stacks/" + stack_id)}
>
<ChevronLeft className="w-4" /> Back
<div>
<div className="w-full flex items-center justify-between mb-12">
<Link to={"/stacks/" + stack_id}>
<Button className="gap-2" variant="secondary">
<ChevronLeft className="w-4" />
Back
</Button>
</Link>
<div className="flex items-center gap-4">
<ExportButton targets={[{ type: "Stack", id: stack_id }]} />
</div>
<div className="flex flex-col gap-4">
<div className="flex gap-4 justify-between flex-wrap">
<div className="flex items-center gap-4">
<div className="mt-1">
<Layers2 className={cn("w-8 h-8", stroke_color)} />
</div>
<h1 className="text-3xl">{service}</h1>
<div className="flex flex-wrap gap-4 items-center">
<StatusBadge text={state} intent={intention} />
{container?.status && <div>{container?.status}</div>}
</div>
<div className="flex flex-col xl:flex-row gap-4">
{/** HEADER */}
<div className="w-full flex flex-col gap-4">
<div className="flex flex-col gap-2 border rounded-md">
{/* <Components.ResourcePageHeader id={id} /> */}
<ResourcePageHeader
intent={intention}
icon={<Layers2 className={cn("w-8 h-8", stroke_color)} />}
name={service}
state={state}
status={container?.status}
/>
<div className="flex flex-col pb-2 px-4">
<div className="flex items-center gap-x-4 gap-y-0 flex-wrap text-muted-foreground">
<ResourceLink type="Stack" id={stack_id} />
{stack?.info.server_id && (
<>
|
<ResourceLink type="Server" id={stack.info.server_id} />
</>
)}
{stack?.info.server_id && container?.name && (
<>
|
<DockerResourceLink
type="container"
server_id={stack.info.server_id}
name={container.name}
muted
/>
</>
)}
{stack?.info.server_id && container?.image && (
<>
|
<DockerResourceLink
type="image"
server_id={stack.info.server_id}
name={container.image}
id={container.image_id}
muted
/>
</>
)}
{stack?.info.server_id &&
container?.networks.map((network) => (
<Fragment key={network}>
|
<DockerResourceLink
type="network"
server_id={stack.info.server_id}
name={network}
muted
/>
</Fragment>
))}
{stack?.info.server_id &&
container &&
container.volumes.map((volume) => (
<Fragment key={volume}>
|
<DockerResourceLink
type="volume"
server_id={stack.info.server_id}
name={volume}
muted
/>
</Fragment>
))}
</div>
</div>
<div className="flex items-center gap-2">
<p className="text-sm text-muted-foreground">Description: </p>
<ResourceDescription
type="Stack"
id={stack_id}
disabled={!canWrite}
/>
</div>
</div>
<div className="flex gap-4 justify-between flex-wrap">
<div className="flex flex-wrap gap-4 items-center text-muted-foreground">
<ResourceLink type="Stack" id={stack_id} />
{stack?.info.server_id && (
<>
|
<ResourceLink type="Server" id={stack.info.server_id} />
</>
)}
{stack?.info.server_id && container && container.name && (
<>
|
<DockerResourceLink
type="container"
server_id={stack?.info.server_id}
name={container.name}
muted
/>
</>
)}
</div>
<div className="flex items-center gap-2 h-7 lg:justify-self-end">
<p className="text-sm text-muted-foreground">Tags:</p>
<ResourceTags
target={{ id: stack_id, type: "Stack" }}
className="text-sm"
disabled={!canWrite}
click_to_delete
/>
{canWrite && <AddTags target={{ id: stack_id, type: "Stack" }} />}
</div>
</div>
<ResourceDescription
type="Stack"
id={stack_id}
disabled={!canWrite}
/>
</div>
{/** NOTIFICATIONS */}
<ResourceNotifications type="Stack" id={stack_id} />
</div>
{/* Actions */}
{canExecute && (
<Section
title="Actions (Service)"
icon={<Clapperboard className="w-4 h-4" />}
>
<div className="flex gap-4 items-center flex-wrap">
{Object.entries(Actions).map(([key, Action]) => (
<Action key={key} id={stack_id} service={service} />
))}
</div>
</Section>
)}
<div className="mt-8 flex flex-col gap-12">
{/* Actions */}
{canExecute && (
<Section
title="Actions (Service)"
icon={<Clapperboard className="w-4 h-4" />}
>
<div className="flex gap-4 items-center flex-wrap">
{Object.entries(Actions).map(([key, Action]) => (
<Action key={key} id={stack_id} service={service} />
))}
</div>
</Section>
)}
{/* Updates */}
<ResourceUpdates type="Stack" id={stack_id} />
{/* Logs */}
<div className="pt-4">
<StackServiceLogs id={stack_id} service={service} />
{/* Logs */}
<div className="pt-4">
<StackServiceLogs id={stack_id} service={service} />
</div>
</div>
</div>
);

View File

@@ -1,14 +1,22 @@
use std::{collections::HashMap, path::Path};
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::{Arc, OnceLock},
};
use anyhow::Context;
use command::run_komodo_command;
use formatting::format_serror;
use komodo_client::entities::{
update::Log, CloneArgs, EnvironmentVar,
komodo_timestamp, update::Log, CloneArgs, EnvironmentVar,
};
use tokio::sync::Mutex;
use crate::{get_commit_hash_log, GitRes};
/// Wait this long after a pull to allow another pull through
const PULL_TIMEOUT: i64 = 5_000;
/// This will pull in a way that handles edge cases
/// from possible state of the repo. For example, the user
/// can change branch after clone, or even the remote.
@@ -38,187 +46,266 @@ where
{
let args: CloneArgs = clone_args.into();
let path = args.path(repo_dir);
let repo_url = args.remote_url(access_token.as_deref())?;
// Set remote url
let mut set_remote = run_komodo_command(
"set git remote",
path.as_ref(),
format!("git remote set-url origin {repo_url}"),
false,
)
.await;
// Acquire the path lock
let lock = pull_cache().get_lock(path.clone()).await;
if !set_remote.success {
if let Some(token) = access_token {
set_remote.command =
set_remote.command.replace(&token, "<TOKEN>");
set_remote.stdout =
set_remote.stdout.replace(&token, "<TOKEN>");
set_remote.stderr =
set_remote.stderr.replace(&token, "<TOKEN>");
}
return Ok(GitRes {
logs: vec![set_remote],
hash: None,
message: None,
env_file_path: None,
});
// Lock the path lock, prevents simultaneous pulls by
// ensuring simultaneous pulls will wait for first to finish
// and checking cached results.
let mut locked = lock.lock().await;
// Early return from cache if lasted pulled with PULL_TIMEOUT
if locked.last_pulled + PULL_TIMEOUT > komodo_timestamp() {
return clone_entry_res(&locked.res);
}
let checkout = run_komodo_command(
"checkout branch",
path.as_ref(),
format!("git checkout -f {}", args.branch),
false,
)
.await;
let res = async {
let repo_url = args.remote_url(access_token.as_deref())?;
if !checkout.success {
return Ok(GitRes {
logs: vec![checkout],
hash: None,
message: None,
env_file_path: None,
});
}
let pull_log = run_komodo_command(
"git pull",
path.as_ref(),
format!("git pull --rebase --force origin {}", args.branch),
false,
)
.await;
let mut logs = vec![pull_log];
if !logs[0].success {
return Ok(GitRes {
logs,
hash: None,
message: None,
env_file_path: None,
});
}
if let Some(commit) = args.commit {
let reset_log = run_komodo_command(
"set commit",
// Set remote url
let mut set_remote = run_komodo_command(
"set git remote",
path.as_ref(),
format!("git reset --hard {commit}"),
format!("git remote set-url origin {repo_url}"),
false,
)
.await;
logs.push(reset_log);
}
let (hash, message) = match get_commit_hash_log(&path).await {
Ok((log, hash, message)) => {
logs.push(log);
(Some(hash), Some(message))
if !set_remote.success {
if let Some(token) = access_token {
set_remote.command =
set_remote.command.replace(&token, "<TOKEN>");
set_remote.stdout =
set_remote.stdout.replace(&token, "<TOKEN>");
set_remote.stderr =
set_remote.stderr.replace(&token, "<TOKEN>");
}
return Ok(GitRes {
logs: vec![set_remote],
hash: None,
message: None,
env_file_path: None,
});
}
Err(e) => {
logs.push(Log::simple(
"latest commit",
format_serror(
&e.context("failed to get latest commit").into(),
),
));
(None, None)
}
};
let Ok(env_file_path) = crate::environment::write_file(
environment,
env_file_path,
secrets,
&path,
&mut logs,
)
.await
else {
return Ok(GitRes {
let checkout = run_komodo_command(
"checkout branch",
path.as_ref(),
format!("git checkout -f {}", args.branch),
false,
)
.await;
if !checkout.success {
return Ok(GitRes {
logs: vec![checkout],
hash: None,
message: None,
env_file_path: None,
});
}
let pull_log = run_komodo_command(
"git pull",
path.as_ref(),
format!("git pull --rebase --force origin {}", args.branch),
false,
)
.await;
let mut logs = vec![pull_log];
if !logs[0].success {
return Ok(GitRes {
logs,
hash: None,
message: None,
env_file_path: None,
});
}
if let Some(commit) = args.commit {
let reset_log = run_komodo_command(
"set commit",
path.as_ref(),
format!("git reset --hard {commit}"),
false,
)
.await;
logs.push(reset_log);
}
let (hash, message) = match get_commit_hash_log(&path).await {
Ok((log, hash, message)) => {
logs.push(log);
(Some(hash), Some(message))
}
Err(e) => {
logs.push(Log::simple(
"latest commit",
format_serror(
&e.context("failed to get latest commit").into(),
),
));
(None, None)
}
};
let Ok(env_file_path) = crate::environment::write_file(
environment,
env_file_path,
secrets,
&path,
&mut logs,
)
.await
else {
return Ok(GitRes {
logs,
hash,
message,
env_file_path: None,
});
};
if let Some(command) = args.on_pull {
if !command.command.is_empty() {
let on_pull_path = path.join(&command.path);
if let Some(secrets) = secrets {
let (full_command, mut replacers) =
match svi::interpolate_variables(
&command.command,
secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into on_pull command",
) {
Ok(res) => res,
Err(e) => {
logs.push(Log::error(
"interpolate secrets - on_pull",
format_serror(&e.into()),
));
return Ok(GitRes {
logs,
hash,
message,
env_file_path: None,
});
}
};
replacers.extend(core_replacers.to_owned());
let mut on_pull_log = run_komodo_command(
"on pull",
on_pull_path.as_ref(),
&full_command,
true,
)
.await;
on_pull_log.command =
svi::replace_in_string(&on_pull_log.command, &replacers);
on_pull_log.stdout =
svi::replace_in_string(&on_pull_log.stdout, &replacers);
on_pull_log.stderr =
svi::replace_in_string(&on_pull_log.stderr, &replacers);
tracing::debug!(
"run repo on_pull command | command: {} | cwd: {:?}",
on_pull_log.command,
on_pull_path
);
logs.push(on_pull_log);
} else {
let on_pull_log = run_komodo_command(
"on pull",
on_pull_path.as_ref(),
&command.command,
true,
)
.await;
tracing::debug!(
"run repo on_pull command | command: {} | cwd: {:?}",
command.command,
on_pull_path
);
logs.push(on_pull_log);
}
}
}
anyhow::Ok(GitRes {
logs,
hash,
message,
env_file_path: None,
});
};
env_file_path,
})
}
.await;
if let Some(command) = args.on_pull {
if !command.command.is_empty() {
let on_pull_path = path.join(&command.path);
if let Some(secrets) = secrets {
let (full_command, mut replacers) =
match svi::interpolate_variables(
&command.command,
secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into on_pull command",
) {
Ok(res) => res,
Err(e) => {
logs.push(Log::error(
"interpolate secrets - on_pull",
format_serror(&e.into()),
));
return Ok(GitRes {
logs,
hash,
message,
env_file_path: None,
});
}
};
replacers.extend(core_replacers.to_owned());
let mut on_pull_log = run_komodo_command(
"on pull",
on_pull_path.as_ref(),
&full_command,
true,
)
.await;
// Set the cache with results
locked.last_pulled = komodo_timestamp();
locked.res = clone_entry_res(&res);
on_pull_log.command =
svi::replace_in_string(&on_pull_log.command, &replacers);
on_pull_log.stdout =
svi::replace_in_string(&on_pull_log.stdout, &replacers);
on_pull_log.stderr =
svi::replace_in_string(&on_pull_log.stderr, &replacers);
res
}
tracing::debug!(
"run repo on_pull command | command: {} | cwd: {:?}",
on_pull_log.command,
on_pull_path
);
fn pull_cache() -> &'static PullCache {
static LAST_PULLED_MAP: OnceLock<PullCache> = OnceLock::new();
LAST_PULLED_MAP.get_or_init(|| Default::default())
}
logs.push(on_pull_log);
} else {
let on_pull_log = run_komodo_command(
"on pull",
on_pull_path.as_ref(),
&command.command,
true,
)
.await;
tracing::debug!(
"run repo on_pull command | command: {} | cwd: {:?}",
command.command,
on_pull_path
);
logs.push(on_pull_log);
}
struct PullCacheEntry {
last_pulled: i64,
res: anyhow::Result<GitRes>,
}
fn clone_entry_res(
res: &anyhow::Result<GitRes>,
) -> anyhow::Result<GitRes> {
match res {
Ok(res) => Ok(res.clone()),
Err(e) => Err(clone_anyhow_error(e)),
}
}
impl Default for PullCacheEntry {
fn default() -> Self {
PullCacheEntry {
last_pulled: 0,
res: Ok(GitRes::default()),
}
}
Ok(GitRes {
logs,
hash,
message,
env_file_path,
})
}
/// Prevents simulataneous pulls on the same repo,
/// as well as prevents redudant pulls within [PULL_TIMEOUT] milliseconds.
#[derive(Default)]
struct PullCache(Mutex<HashMap<PathBuf, Arc<Mutex<PullCacheEntry>>>>);
impl PullCache {
async fn get_lock(
&self,
path: PathBuf,
) -> Arc<Mutex<PullCacheEntry>> {
let mut lock = self.0.lock().await;
lock.entry(path).or_default().clone()
}
}
fn clone_anyhow_error(e: &anyhow::Error) -> anyhow::Error {
let mut reasons =
e.chain().map(|e| e.to_string()).collect::<Vec<_>>();
// Always guaranteed to be at least one reason
// Need to start the chain with the last reason
let mut e = anyhow::Error::msg(reasons.pop().unwrap());
// Need to reverse reason application from lowest context to highest context.
for reason in reasons.into_iter().rev() {
e = e.context(reason)
}
e
}