mirror of
https://github.com/moghtech/komodo.git
synced 2025-12-05 19:17:36 -06:00
1.18.2 (#591)
* feat: add maintenance window management to suppress alerts during planned activities (#550) * feat: add scheduled maintenance windows to server configuration - Add maintenance window configuration to server entities - Implement maintenance window UI components with data table layout - Add maintenance tab to server interface - Suppress alerts during maintenance windows * chore: enhance maintenance windows with types and permission improvements - Add chrono dependency to Rust client core for time handling - Add comprehensive TypeScript types for maintenance windows (MaintenanceWindow, MaintenanceScheduleType, MaintenanceTime, DayOfWeek) - Improve maintenance config component to use usePermissions hook for better permission handling - Update package dependencies * feat: restore alert buffer system to prevent noise * fix yarn fe * fix the merge with new alerting changes * move alert buffer handle out of loop * nit * fix server version changes * unneeded buffer clear --------- Co-authored-by: mbecker20 <becker.maxh@gmail.com> * set version 1.18.2 * failed OIDC provider init doesn't cause panic, just error log * OIDC: use userinfo endpoint to get preffered username for user. * add profile to scopes and account for username already taken * search through server docker lists * move maintenance stuff * refactor maintenance schedules to have more toml compatible structure * daily schedule type use struct * add timezone to core info response * frontend can build with new maintenance types * Action monaco expose KomodoClient to init another client * flatten out the nested enum * update maintenance schedule types * dev-3 * implement maintenance windows on alerters * dev-4 * add IanaTimezone enum * typeshare timezone enum * maintenance modes almost done on servers AND alerters * maintenance schedules working * remove mention of migrator * Procedure / Action schedule timezone selector * improve timezone selector to display configure core TZ * dev-5 * refetch core version * add version to server list item info * add periphery version in server table * dev-6 * capitalize Unknown server status in cache * handle unknown version case * set server table sizes * default resource_poll_interval 1-hr * ensure parent folder exists before cloning * document Build Attach permission * git actions return absolute path * stack linked repos * resource toml replace linked_repo id with name * validate incoming linked repo * add linked repo to stack list item info * stack list item info resolved linked repo information * configure linked repo stack * to repo links * dev-7 * sync: replace linked repo with name for execute compare * obscure provider tokens in table view * clean up stack write w/ refactor * Resource Sync / Build start support Repo attach * add stack clone path config * Builds + syncs can link to repos * dev-9 * update ts * fix linked repo not included in resource sync list item info * add linked repo UI for builds / syncs * fix commit linked repo sync * include linked repo syncs * correct Sync / Build config mode * dev-12 fix resource sync inclusion w/ linked_repo * remove unneed sync commit todo!() * fix other config.repo.is_empty issues * replace ids in all to toml exports * Ensure git pull before commit for linear history, add to update logs * fix fe for linked repo cases * consolidate linked repo config component * fix resource sync commit behavior * dev 17 * Build uses Pull or Clone api to setup build source * capitalize Clone Repo stage * mount PullOrCloneRepo * dev-19 * Expand supported container names and also avoid unnecessary name formatting * dev-20 * add periphery /terminal/execute/container api * periphery client execute_container_exec method * implement execute container, deployment, stack exec * gen types * execute container exec method * clean up client / fix fe * enumerate exec ts methods for each resource type * fix and gen ts client * fix FE use connect_exec * add url log when terminal ws fail to connect * ts client server allow terminal.js * FE preload terminal.js / .d.ts * dev-23 fix stack terminal fail to connect when not explicitly setting container name * update docs on attach perms * 1.18.2 --------- Co-authored-by: Samuel Cardoso <R3D2@users.noreply.github.com>
This commit is contained in:
198
Cargo.lock
generated
198
Cargo.lock
generated
@@ -572,7 +572,7 @@ dependencies = [
|
||||
"sha1",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-tungstenite 0.26.2",
|
||||
"tower 0.5.2",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
@@ -890,7 +890,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"tokio",
|
||||
@@ -1057,7 +1057,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "command"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"formatting",
|
||||
@@ -1541,7 +1541,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "environment_file"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
@@ -1621,7 +1621,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "formatting"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"serror",
|
||||
]
|
||||
@@ -1783,7 +1783,7 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "git"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cache",
|
||||
@@ -2181,22 +2181,28 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hyper-util"
|
||||
version = "0.1.11"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2"
|
||||
checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"hyper 1.6.0",
|
||||
"ipnet",
|
||||
"libc",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"windows-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2444,6 +2450,16 @@ version = "2.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is_terminal_polyfill"
|
||||
version = "1.70.1"
|
||||
@@ -2520,7 +2536,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_cli"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -2536,7 +2552,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_client"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2559,7 +2575,7 @@ dependencies = [
|
||||
"strum 0.27.1",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-tungstenite 0.27.0",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"typeshare",
|
||||
@@ -2568,7 +2584,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_core"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
@@ -2623,7 +2639,7 @@ dependencies = [
|
||||
"slack_client_rs",
|
||||
"svi",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-tungstenite 0.27.0",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"toml_pretty",
|
||||
@@ -2637,7 +2653,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_periphery"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2681,7 +2697,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_util"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dotenvy",
|
||||
@@ -2770,7 +2786,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "logger"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -3525,7 +3541,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "periphery_client"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -3537,7 +3553,7 @@ dependencies = [
|
||||
"serde_qs",
|
||||
"serror",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tokio-tungstenite 0.27.0",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
@@ -3908,9 +3924,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
|
||||
|
||||
[[package]]
|
||||
name = "reqwest"
|
||||
version = "0.12.15"
|
||||
version = "0.12.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb"
|
||||
checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
@@ -3925,36 +3941,32 @@ dependencies = [
|
||||
"hyper 1.6.0",
|
||||
"hyper-rustls 0.27.5",
|
||||
"hyper-util",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"quinn",
|
||||
"rustls 0.23.27",
|
||||
"rustls-native-certs 0.8.1",
|
||||
"rustls-pemfile 2.2.0",
|
||||
"rustls-pki-types",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.2",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
"tower-http",
|
||||
"tower-service",
|
||||
"url",
|
||||
"wasm-bindgen",
|
||||
"wasm-bindgen-futures",
|
||||
"wasm-streams",
|
||||
"web-sys",
|
||||
"webpki-roots 0.26.8",
|
||||
"windows-registry",
|
||||
"webpki-roots 1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4053,7 +4065,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "response"
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
@@ -5093,6 +5105,18 @@ name = "tokio-tungstenite"
|
||||
version = "0.26.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"log",
|
||||
"tokio",
|
||||
"tungstenite 0.26.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-tungstenite"
|
||||
version = "0.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "489a59b6730eda1b0171fcfda8b121f4bee2b35cba8645ca35c5f7ba3eb736c1"
|
||||
dependencies = [
|
||||
"futures-util",
|
||||
"log",
|
||||
@@ -5101,7 +5125,7 @@ dependencies = [
|
||||
"rustls-pki-types",
|
||||
"tokio",
|
||||
"tokio-rustls 0.26.2",
|
||||
"tungstenite",
|
||||
"tungstenite 0.27.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5238,24 +5262,27 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tower-http"
|
||||
version = "0.6.4"
|
||||
version = "0.6.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e"
|
||||
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"http-range-header",
|
||||
"httpdate",
|
||||
"iri-string",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
@@ -5373,6 +5400,23 @@ name = "tungstenite"
|
||||
version = "0.26.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"data-encoding",
|
||||
"http 1.3.1",
|
||||
"httparse",
|
||||
"log",
|
||||
"rand 0.9.1",
|
||||
"sha1",
|
||||
"thiserror 2.0.12",
|
||||
"utf-8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tungstenite"
|
||||
version = "0.27.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eadc29d668c91fcc564941132e17b28a7ceb2f3ebf0b9dae3e03fd7a6748eb0d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"data-encoding",
|
||||
@@ -5708,6 +5752,15 @@ dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki-roots"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb"
|
||||
dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "4.4.2"
|
||||
@@ -5789,7 +5842,7 @@ dependencies = [
|
||||
"windows-interface",
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings 0.4.0",
|
||||
"windows-strings",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5842,13 +5895,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "windows-registry"
|
||||
version = "0.4.0"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3"
|
||||
checksum = "ad1da3e436dc7653dfdf3da67332e22bff09bb0e28b0239e1624499c7830842e"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings 0.3.1",
|
||||
"windows-targets 0.53.0",
|
||||
"windows-strings",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5860,15 +5913,6 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-strings"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-strings"
|
||||
version = "0.4.0"
|
||||
@@ -5929,29 +5973,13 @@ dependencies = [
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.5"
|
||||
@@ -5964,12 +5992,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -5982,12 +6004,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.5"
|
||||
@@ -6000,24 +6016,12 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -6030,12 +6034,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.5"
|
||||
@@ -6048,12 +6046,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.5"
|
||||
@@ -6066,12 +6058,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.5"
|
||||
@@ -6084,12 +6070,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.7"
|
||||
|
||||
@@ -8,7 +8,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.18.1"
|
||||
version = "1.18.2"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -44,7 +44,7 @@ mungos = "3.2.0"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.15", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
reqwest = { version = "0.12.20", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.45.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.15", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
@@ -54,7 +54,7 @@ futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.26.2", features = ["rustls-tls-native-roots"] }
|
||||
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.4", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
|
||||
@@ -7,14 +7,18 @@ use komodo_client::entities::{
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
stack::StackState,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use std::collections::HashSet;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::interpolate::interpolate_variables_secrets_into_string;
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
use crate::helpers::{
|
||||
interpolate::interpolate_variables_secrets_into_string,
|
||||
maintenance::is_in_maintenance,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
mod discord;
|
||||
@@ -80,6 +84,13 @@ pub async fn send_alert_to_alerter(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if is_in_maintenance(
|
||||
&alerter.config.maintenance_windows,
|
||||
komodo_timestamp(),
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let alert_type = alert.data.extract_variant();
|
||||
|
||||
// In the test case, we don't want the filters inside this
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::{
|
||||
get_user_id_from_headers,
|
||||
github::{self, client::github_oauth_client},
|
||||
google::{self, client::google_oauth_client},
|
||||
oidc,
|
||||
oidc::{self, client::oidc_client},
|
||||
},
|
||||
config::core_config,
|
||||
helpers::query::get_user,
|
||||
@@ -114,15 +114,9 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: config.github_oauth.enabled
|
||||
&& !config.github_oauth.id.is_empty()
|
||||
&& !config.github_oauth.secret.is_empty(),
|
||||
google: config.google_oauth.enabled
|
||||
&& !config.google_oauth.id.is_empty()
|
||||
&& !config.google_oauth.secret.is_empty(),
|
||||
oidc: config.oidc_enabled
|
||||
&& !config.oidc_provider.is_empty()
|
||||
&& !config.oidc_client_id.is_empty(),
|
||||
github: github_oauth_client().is_some(),
|
||||
google: google_oauth_client().is_some(),
|
||||
oidc: oidc_client().load().is_some(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
})
|
||||
|
||||
@@ -16,6 +16,7 @@ use komodo_client::{
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
update::{Log, Update},
|
||||
user::auto_redeploy_user,
|
||||
},
|
||||
@@ -35,9 +36,9 @@ use tokio_util::sync::CancellationToken;
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
helpers::{
|
||||
build_git_token,
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
@@ -88,6 +89,16 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
// Add the $VERSION to variables. Use with [[$VERSION]]
|
||||
vars_and_secrets.variables.insert(
|
||||
@@ -117,15 +128,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.version = build.config.version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token = git_token(
|
||||
&build.config.git_provider,
|
||||
&build.config.git_account,
|
||||
|https| build.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
|
||||
)?;
|
||||
let git_token =
|
||||
build_git_token(&mut build, repo.as_mut()).await?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
@@ -253,13 +257,14 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
};
|
||||
|
||||
let commit_message = if !build.config.files_on_host
|
||||
&& !build.config.repo.is_empty()
|
||||
&& (!build.config.repo.is_empty()
|
||||
|| !build.config.linked_repo.is_empty())
|
||||
{
|
||||
// CLONE REPO
|
||||
// PULL OR CLONE REPO
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&build).into(),
|
||||
.request(api::git::PullOrCloneRepo {
|
||||
args: repo.as_ref().map(Into::into).unwrap_or((&build).into()),
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
@@ -285,10 +290,10 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
warn!("Failed build at clone repo | {e:#}");
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
@@ -307,6 +312,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
repo,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// Push a commit hash tagged image
|
||||
|
||||
@@ -131,8 +131,8 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
Ok(res) => res.logs,
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
)]
|
||||
}
|
||||
};
|
||||
@@ -479,8 +479,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ use komodo_client::{
|
||||
api::{execute::*, write::RefreshStackCache},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::{Stack, StackInfo},
|
||||
update::{Log, Update},
|
||||
@@ -26,6 +27,7 @@ use crate::{
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
stack_git_token,
|
||||
update::{add_update_without_send, update_update},
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
@@ -75,6 +77,16 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get the action state for the stack (or insert default).
|
||||
let action_state =
|
||||
action_states().stack.get_or_insert_default(&stack.id).await;
|
||||
@@ -98,13 +110,8 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
))
|
||||
}
|
||||
|
||||
let git_token = crate::helpers::git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
|
||||
)?;
|
||||
let git_token =
|
||||
stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let registry_token = crate::helpers::registry_token(
|
||||
&stack.config.registry_provider,
|
||||
@@ -188,6 +195,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
.request(ComposeUp {
|
||||
stack: stack.clone(),
|
||||
services: self.services,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
@@ -413,6 +421,7 @@ pub async fn pull_stack_inner(
|
||||
mut stack: Stack,
|
||||
services: Vec<String>,
|
||||
server: &Server,
|
||||
mut repo: Option<Repo>,
|
||||
mut update: Option<&mut Update>,
|
||||
) -> anyhow::Result<ComposePullResponse> {
|
||||
if let Some(update) = update.as_mut() {
|
||||
@@ -427,13 +436,7 @@ pub async fn pull_stack_inner(
|
||||
}
|
||||
}
|
||||
|
||||
let git_token = crate::helpers::git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
|
||||
)?;
|
||||
let git_token = stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let registry_token = crate::helpers::registry_token(
|
||||
&stack.config.registry_provider,
|
||||
@@ -476,6 +479,7 @@ pub async fn pull_stack_inner(
|
||||
.request(ComposePull {
|
||||
stack,
|
||||
services,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
})
|
||||
@@ -501,6 +505,16 @@ impl Resolve<ExecuteArgs> for PullStack {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get the action state for the stack (or insert default).
|
||||
let action_state =
|
||||
action_states().stack.get_or_insert_default(&stack.id).await;
|
||||
@@ -517,6 +531,7 @@ impl Resolve<ExecuteArgs> for PullStack {
|
||||
stack,
|
||||
self.services,
|
||||
&server,
|
||||
repo,
|
||||
Some(&mut update),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -28,11 +28,14 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
helpers::{query::get_id_to_tags, update::update_update},
|
||||
helpers::{
|
||||
all_resources::AllResourcesById, query::get_id_to_tags,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
state::{action_states, db_client},
|
||||
sync::{
|
||||
AllResourcesById, ResourceSyncTrait,
|
||||
ResourceSyncTrait,
|
||||
deploy::{
|
||||
SyncDeployParams, build_deploy_cache, deploy_from_cache,
|
||||
},
|
||||
@@ -61,6 +64,16 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get the action state for the sync (or insert default).
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
@@ -84,9 +97,10 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
message,
|
||||
file_errors,
|
||||
..
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
} =
|
||||
crate::sync::remote::get_remote_resources(&sync, repo.as_ref())
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
update_update(update.clone()).await?;
|
||||
@@ -197,7 +211,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -207,7 +220,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Server>(
|
||||
resources.servers,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -221,7 +233,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -235,7 +246,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -249,7 +259,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Build>(
|
||||
resources.builds,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -263,7 +272,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Repo>(
|
||||
resources.repos,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -277,7 +285,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -291,7 +298,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Action>(
|
||||
resources.actions,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -305,7 +311,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Builder>(
|
||||
resources.builders,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -319,7 +324,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Alerter>(
|
||||
resources.alerters,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -333,7 +337,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -371,7 +374,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
crate::sync::user_groups::get_updates_for_execution(
|
||||
resources.user_groups,
|
||||
delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
|
||||
@@ -296,6 +296,7 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
.iter()
|
||||
.map(|i| i.namespace.to_string())
|
||||
.collect(),
|
||||
timezone: config.timezone.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ use crate::{
|
||||
resource,
|
||||
state::db_client,
|
||||
sync::{
|
||||
AllResourcesById,
|
||||
toml::{ToToml, convert_resource},
|
||||
user_groups::{convert_user_groups, user_group_to_toml},
|
||||
variables::variable_to_toml,
|
||||
@@ -44,7 +43,7 @@ async fn get_all_targets(
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
targets.extend(
|
||||
resource::list_for_user::<Alerter>(
|
||||
resource::list_full_for_user::<Alerter>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -55,7 +54,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Builder>(
|
||||
resource::list_full_for_user::<Builder>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -66,7 +65,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Server>(
|
||||
resource::list_full_for_user::<Server>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -77,7 +76,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Stack>(
|
||||
resource::list_full_for_user::<Stack>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -88,7 +87,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Stack(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Deployment>(
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -99,7 +98,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(
|
||||
resource::list_full_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -110,7 +109,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Repo>(
|
||||
resource::list_full_for_user::<Repo>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -121,7 +120,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Procedure>(
|
||||
resource::list_full_for_user::<Procedure>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -132,7 +131,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Action>(
|
||||
resource::list_full_for_user::<Action>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -204,18 +203,18 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
include_variables,
|
||||
} = self;
|
||||
let mut res = ResourcesToml::default();
|
||||
let all = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
let ReadArgs { user } = args;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let alerter = get_check_permissions::<Alerter>(
|
||||
let mut alerter = get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Alerter::replace_ids(&mut alerter);
|
||||
res.alerters.push(convert_resource::<Alerter>(
|
||||
alerter,
|
||||
false,
|
||||
@@ -224,7 +223,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
let mut sync = get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
@@ -232,8 +231,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
.await?;
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty())
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| !sync.config.linked_repo.is_empty())
|
||||
{
|
||||
ResourceSync::replace_ids(&mut sync);
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
false,
|
||||
@@ -243,12 +244,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
}
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let mut server = get_check_permissions::<Server>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Server::replace_ids(&mut server);
|
||||
res.servers.push(convert_resource::<Server>(
|
||||
server,
|
||||
false,
|
||||
@@ -263,7 +265,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder, &all);
|
||||
Builder::replace_ids(&mut builder);
|
||||
res.builders.push(convert_resource::<Builder>(
|
||||
builder,
|
||||
false,
|
||||
@@ -278,7 +280,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Build::replace_ids(&mut build, &all);
|
||||
Build::replace_ids(&mut build);
|
||||
res.builds.push(convert_resource::<Build>(
|
||||
build,
|
||||
false,
|
||||
@@ -293,7 +295,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Deployment::replace_ids(&mut deployment, &all);
|
||||
Deployment::replace_ids(&mut deployment);
|
||||
res.deployments.push(convert_resource::<Deployment>(
|
||||
deployment,
|
||||
false,
|
||||
@@ -308,7 +310,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Repo::replace_ids(&mut repo, &all);
|
||||
Repo::replace_ids(&mut repo);
|
||||
res.repos.push(convert_resource::<Repo>(
|
||||
repo,
|
||||
false,
|
||||
@@ -323,7 +325,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Stack::replace_ids(&mut stack, &all);
|
||||
Stack::replace_ids(&mut stack);
|
||||
res.stacks.push(convert_resource::<Stack>(
|
||||
stack,
|
||||
false,
|
||||
@@ -338,7 +340,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Procedure::replace_ids(&mut procedure, &all);
|
||||
Procedure::replace_ids(&mut procedure);
|
||||
res.procedures.push(convert_resource::<Procedure>(
|
||||
procedure,
|
||||
false,
|
||||
@@ -353,7 +355,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Action::replace_ids(&mut action, &all);
|
||||
Action::replace_ids(&mut action);
|
||||
res.actions.push(convert_resource::<Action>(
|
||||
action,
|
||||
false,
|
||||
@@ -365,7 +367,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
};
|
||||
}
|
||||
|
||||
add_user_groups(user_groups, &mut res, &all, args)
|
||||
add_user_groups(user_groups, &mut res, args)
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
@@ -394,7 +396,6 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
async fn add_user_groups(
|
||||
user_groups: Vec<String>,
|
||||
res: &mut ResourcesToml,
|
||||
all: &AllResourcesById,
|
||||
args: &ReadArgs,
|
||||
) -> anyhow::Result<()> {
|
||||
let user_groups = ListUserGroups {}
|
||||
@@ -406,7 +407,7 @@ async fn add_user_groups(
|
||||
user_groups.contains(&ug.name) || user_groups.contains(&ug.id)
|
||||
});
|
||||
let mut ug = Vec::with_capacity(user_groups.size_hint().0);
|
||||
convert_user_groups(user_groups, all, &mut ug).await?;
|
||||
convert_user_groups(user_groups, &mut ug).await?;
|
||||
res.user_groups = ug.into_iter().map(|ug| ug.1).collect();
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use komodo_client::{
|
||||
api::terminal::ExecuteTerminalBody,
|
||||
api::terminal::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server::Server, user::User,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, user::User,
|
||||
},
|
||||
};
|
||||
use serror::Json;
|
||||
@@ -11,20 +12,28 @@ use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::periphery_client,
|
||||
permission::get_check_permissions,
|
||||
permission::get_check_permissions, resource::get,
|
||||
state::stack_status_cache,
|
||||
};
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/execute", post(execute))
|
||||
.route("/execute", post(execute_terminal))
|
||||
.route("/execute/container", post(execute_container_exec))
|
||||
.route("/execute/deployment", post(execute_deployment_exec))
|
||||
.route("/execute/stack", post(execute_stack_exec))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
// =================
|
||||
// ExecuteTerminal
|
||||
// =================
|
||||
|
||||
async fn execute_terminal(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_inner(Uuid::new_v4(), request, user).await
|
||||
execute_terminal_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
@@ -34,7 +43,7 @@ async fn execute(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_inner(
|
||||
async fn execute_terminal_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteTerminalBody {
|
||||
server,
|
||||
@@ -43,7 +52,7 @@ async fn execute_inner(
|
||||
}: ExecuteTerminalBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal request | user: {}", user.username);
|
||||
info!("/terminal/execute request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
@@ -67,7 +76,221 @@ async fn execute_inner(
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal request {req_id} error: {e:#}");
|
||||
warn!("/terminal/execute request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// ======================
|
||||
// ExecuteContainerExec
|
||||
// ======================
|
||||
|
||||
async fn execute_container_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteContainerExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_container_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteContainerExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_container_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteContainerExecBody {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteContainerExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/container request | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/container request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// =======================
|
||||
// ExecuteDeploymentExec
|
||||
// =======================
|
||||
|
||||
async fn execute_deployment_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteDeploymentExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_deployment_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteDeploymentExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_deployment_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteDeploymentExecBody {
|
||||
deployment,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteDeploymentExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/deployment request | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res = async {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&deployment.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(deployment.name, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/deployment request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// ==================
|
||||
// ExecuteStackExec
|
||||
// ==================
|
||||
|
||||
async fn execute_stack_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteStackExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_stack_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteStackExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_stack_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteStackExecBody {
|
||||
stack,
|
||||
service,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteStackExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute/stack request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&stack.config.server_id).await?;
|
||||
|
||||
let container = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.context("could not get stack status")?
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.context("could not find service")?
|
||||
.container
|
||||
.as_ref()
|
||||
.context("could not find service container")?
|
||||
.name
|
||||
.clone();
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal/execute/stack request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
@@ -11,6 +11,7 @@ use komodo_client::{
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
update::Update,
|
||||
},
|
||||
@@ -114,7 +115,10 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !build.config.files_on_host && build.config.repo.is_empty() {
|
||||
if !build.config.files_on_host
|
||||
&& build.config.repo.is_empty()
|
||||
&& build.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Build is not configured to use Files on Host or Git Repo, can't write dockerfile contents"
|
||||
).into());
|
||||
@@ -182,8 +186,16 @@ async fn write_dockerfile_contents_git(
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let mut clone_args: CloneArgs = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
(&crate::resource::get::<Repo>(&build.config.linked_repo).await?)
|
||||
.into()
|
||||
} else {
|
||||
(&build).into()
|
||||
};
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(root.display().to_string());
|
||||
|
||||
let build_path = build
|
||||
.config
|
||||
@@ -206,19 +218,19 @@ async fn write_dockerfile_contents_git(
|
||||
})?;
|
||||
}
|
||||
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
@@ -235,6 +247,34 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok(res) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
format!("Failed to write dockerfile contents to {full_path:?}")
|
||||
@@ -301,6 +341,16 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (
|
||||
remote_path,
|
||||
remote_contents,
|
||||
@@ -319,71 +369,20 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
(None, None, Some(format_serror(&e.into())), None, None)
|
||||
}
|
||||
}
|
||||
} else if !build.config.repo.is_empty() {
|
||||
// ================
|
||||
// REPO BASED BUILD
|
||||
// ================
|
||||
if build.config.git_provider.is_empty() {
|
||||
} else if let Some(repo) = &repo {
|
||||
let Some(res) = get_git_remote(&build, repo.into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
}
|
||||
let config = core_config();
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let relative_path = PathBuf::from_str(&build.config.build_path)
|
||||
.context("Invalid build path")?
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
let (contents, error) = match fs::read_to_string(&full_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to read dockerfile contents at {full_path:?}"
|
||||
)
|
||||
}) {
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
res
|
||||
} else if !build.config.repo.is_empty() {
|
||||
let Some(res) = get_git_remote(&build, (&build).into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
};
|
||||
|
||||
(
|
||||
Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
)
|
||||
res
|
||||
} else {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
@@ -476,6 +475,74 @@ async fn get_on_host_dockerfile(
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_git_remote(
|
||||
build: &Build,
|
||||
mut clone_args: CloneArgs,
|
||||
) -> anyhow::Result<
|
||||
Option<(
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
)>,
|
||||
> {
|
||||
if clone_args.provider.is_empty() {
|
||||
// Nothing to do here
|
||||
return Ok(None);
|
||||
}
|
||||
let config = core_config();
|
||||
let repo_path = clone_args.unique_path(&config.repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let relative_path = PathBuf::from_str(&build.config.build_path)
|
||||
.context("Invalid build path")?
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
let (contents, error) =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!("Failed to read dockerfile contents at {full_path:?}")
|
||||
}) {
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
};
|
||||
Ok(Some((
|
||||
Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
)))
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuildWebhook {
|
||||
#[instrument(name = "CreateBuildWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -11,7 +11,7 @@ use komodo_client::{
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
to_docker_compatible_name,
|
||||
to_container_compatible_name,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
@@ -207,9 +207,10 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_docker_compatible_name(&self.name);
|
||||
let name = to_container_compatible_name(&self.name);
|
||||
|
||||
let container_state = get_deployment_state(&deployment.id).await?;
|
||||
let container_state =
|
||||
get_deployment_state(&deployment.id).await?;
|
||||
|
||||
if container_state == DeploymentState::Unknown {
|
||||
return Err(
|
||||
|
||||
@@ -6,6 +6,7 @@ use komodo_client::{
|
||||
FileContents, NoData, Operation,
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
stack::{PartialStackConfig, Stack, StackInfo},
|
||||
update::Update,
|
||||
@@ -26,8 +27,9 @@ use crate::{
|
||||
api::execute::pull_stack_inner,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
periphery_client,
|
||||
query::get_server_with_state,
|
||||
stack_git_token,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
@@ -120,9 +122,22 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !stack.config.files_on_host && stack.config.repo.is_empty() {
|
||||
let mut repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !stack.config.files_on_host
|
||||
&& stack.config.repo.is_empty()
|
||||
&& stack.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Stack is not configured to use Files on Host or Git Repo, can't write file contents"
|
||||
"Stack is not configured to use Files on Host, Git Repo, or Linked Repo, can't write file contents"
|
||||
).into());
|
||||
}
|
||||
|
||||
@@ -155,25 +170,12 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
}
|
||||
};
|
||||
} else {
|
||||
let git_token = if !stack.config.git_account.is_empty() {
|
||||
git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. | {} | {}",
|
||||
stack.config.git_account, stack.config.git_provider
|
||||
)
|
||||
})?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let git_token =
|
||||
stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
match periphery_client(&server)?
|
||||
.request(WriteCommitComposeContents {
|
||||
stack,
|
||||
repo,
|
||||
username: Some(user.username.clone()),
|
||||
file_path,
|
||||
contents,
|
||||
@@ -236,8 +238,19 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let file_contents_empty = stack.config.file_contents.is_empty();
|
||||
let repo_empty = stack.config.repo.is_empty();
|
||||
let repo_empty =
|
||||
stack.config.repo.is_empty() && repo.as_ref().is_none();
|
||||
|
||||
if !stack.config.files_on_host
|
||||
&& file_contents_empty
|
||||
@@ -320,8 +333,12 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
hash: latest_hash,
|
||||
message: latest_message,
|
||||
..
|
||||
} = get_repo_compose_contents(&stack, Some(&mut missing_files))
|
||||
.await?;
|
||||
} = get_repo_compose_contents(
|
||||
&stack,
|
||||
repo.as_ref(),
|
||||
Some(&mut missing_files),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
@@ -402,7 +419,8 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
if state == ServerState::Ok {
|
||||
let name = stack.name.clone();
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), &server, None).await
|
||||
pull_stack_inner(stack, Vec::new(), &server, repo, None)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to pull latest images for Stack {name} | {e:#}",
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
@@ -44,6 +47,7 @@ use crate::{
|
||||
api::read::ReadArgs,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
all_resources::AllResourcesById,
|
||||
git_token,
|
||||
query::get_id_to_tags,
|
||||
update::{add_update, make_update, update_update},
|
||||
@@ -52,8 +56,8 @@ use crate::{
|
||||
resource,
|
||||
state::{db_client, github_client},
|
||||
sync::{
|
||||
AllResourcesById, deploy::SyncDeployParams,
|
||||
remote::RemoteResources, view::push_updates_for_view,
|
||||
deploy::SyncDeployParams, remote::RemoteResources,
|
||||
view::push_updates_for_view,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -142,7 +146,20 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !sync.config.files_on_host && sync.config.repo.is_empty() {
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !sync.config.files_on_host
|
||||
&& sync.config.repo.is_empty()
|
||||
&& sync.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"This method is only for 'files on host' or 'repo' based syncs."
|
||||
@@ -159,7 +176,8 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
|
||||
if sync.config.files_on_host {
|
||||
write_sync_file_contents_on_host(self, args, sync, update).await
|
||||
} else {
|
||||
write_sync_file_contents_git(self, args, sync, update).await
|
||||
write_sync_file_contents_git(self, args, sync, repo, update)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -237,6 +255,7 @@ async fn write_sync_file_contents_git(
|
||||
req: WriteSyncFileContents,
|
||||
args: &WriteArgs,
|
||||
sync: ResourceSync,
|
||||
repo: Option<Repo>,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
let WriteSyncFileContents {
|
||||
@@ -246,15 +265,34 @@ async fn write_sync_file_contents_git(
|
||||
contents,
|
||||
} = req;
|
||||
|
||||
let mut clone_args: CloneArgs = (&sync).into();
|
||||
let mut clone_args: CloneArgs = if let Some(repo) = &repo {
|
||||
repo.into()
|
||||
} else {
|
||||
(&sync).into()
|
||||
};
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(root.display().to_string());
|
||||
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let file_path =
|
||||
file_path.parse::<PathBuf>().context("Invalid file path")?;
|
||||
let resource_path = resource_path
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid resource path")?;
|
||||
let full_path = root.join(&resource_path).join(&file_path);
|
||||
let full_path = root
|
||||
.join(&resource_path)
|
||||
.join(&file_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).await.with_context(|| {
|
||||
@@ -267,16 +305,6 @@ async fn write_sync_file_contents_git(
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
@@ -288,11 +316,37 @@ async fn write_sync_file_contents_git(
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
}
|
||||
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok(res) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
format!(
|
||||
@@ -353,10 +407,21 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let file_contents_empty = sync.config.file_contents_empty();
|
||||
|
||||
let fresh_sync = !sync.config.files_on_host
|
||||
&& sync.config.repo.is_empty()
|
||||
&& repo.is_none()
|
||||
&& file_contents_empty;
|
||||
|
||||
if !sync.config.managed && !fresh_sync {
|
||||
@@ -367,29 +432,31 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
}
|
||||
|
||||
// Get this here so it can fail before update created.
|
||||
let resource_path =
|
||||
if sync.config.files_on_host || !sync.config.repo.is_empty() {
|
||||
let resource_path = sync
|
||||
.config
|
||||
.resource_path
|
||||
.first()
|
||||
.context("Sync does not have resource path configured.")?
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid resource path")?;
|
||||
let resource_path = if sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| repo.is_some()
|
||||
{
|
||||
let resource_path = sync
|
||||
.config
|
||||
.resource_path
|
||||
.first()
|
||||
.context("Sync does not have resource path configured.")?
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid resource path")?;
|
||||
|
||||
if resource_path
|
||||
.extension()
|
||||
.context("Resource path missing '.toml' extension")?
|
||||
!= "toml"
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Resource path missing '.toml' extension").into(),
|
||||
);
|
||||
}
|
||||
Some(resource_path)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if resource_path
|
||||
.extension()
|
||||
.context("Resource path missing '.toml' extension")?
|
||||
!= "toml"
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Resource path missing '.toml' extension").into(),
|
||||
);
|
||||
}
|
||||
Some(resource_path)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let res = ExportAllResourcesToToml {
|
||||
include_resources: sync.config.include_resources,
|
||||
@@ -440,34 +507,43 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
format!("File contents written to {file_path:?}"),
|
||||
);
|
||||
}
|
||||
} else if let Some(repo) = &repo {
|
||||
let Some(resource_path) = resource_path else {
|
||||
// Resource path checked above for repo mode.
|
||||
unreachable!()
|
||||
};
|
||||
let args: CloneArgs = repo.into();
|
||||
if let Err(e) =
|
||||
commit_git_sync(args, &resource_path, &res.toml, &mut update)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Write resource file",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
} else if !sync.config.repo.is_empty() {
|
||||
let Some(resource_path) = resource_path else {
|
||||
// Resource path checked above for repo mode.
|
||||
unreachable!()
|
||||
};
|
||||
// GIT REPO
|
||||
let args: CloneArgs = (&sync).into();
|
||||
let root = args.unique_path(&core_config().repo_directory)?;
|
||||
match git::write_commit_file(
|
||||
"Commit Sync",
|
||||
&root,
|
||||
&resource_path,
|
||||
&res.toml,
|
||||
&sync.config.branch,
|
||||
)
|
||||
.await
|
||||
if let Err(e) =
|
||||
commit_git_sync(args, &resource_path, &res.toml, &mut update)
|
||||
.await
|
||||
{
|
||||
Ok(res) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write resource file",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
update.push_error_log(
|
||||
"Write resource file",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
// ===========
|
||||
// UI DEFINED
|
||||
} else if let Err(e) = db_client()
|
||||
@@ -505,6 +581,54 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
}
|
||||
}
|
||||
|
||||
async fn commit_git_sync(
|
||||
mut args: CloneArgs,
|
||||
resource_path: &Path,
|
||||
toml: &str,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
let root = args.unique_path(&core_config().repo_directory)?;
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
let access_token = if let Some(account) = &args.account {
|
||||
git_token(&args.provider, account, |https| args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let pull = git::pull_or_clone(
|
||||
args.clone(),
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
)
|
||||
.await?;
|
||||
update.logs.extend(pull.logs);
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let res = git::write_commit_file(
|
||||
"Commit Sync",
|
||||
&root,
|
||||
resource_path,
|
||||
toml,
|
||||
&args.branch,
|
||||
)
|
||||
.await?;
|
||||
update.logs.extend(res.logs);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
#[instrument(
|
||||
name = "RefreshResourceSyncPending",
|
||||
@@ -525,10 +649,21 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !sync.config.managed
|
||||
&& !sync.config.files_on_host
|
||||
&& sync.config.file_contents.is_empty()
|
||||
&& sync.config.repo.is_empty()
|
||||
&& sync.config.linked_repo.is_empty()
|
||||
{
|
||||
// Sync not configured, nothing to refresh
|
||||
return Ok(sync);
|
||||
@@ -542,9 +677,12 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
hash,
|
||||
message,
|
||||
..
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
} = crate::sync::remote::get_remote_resources(
|
||||
&sync,
|
||||
repo.as_ref(),
|
||||
)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
sync.info.remote_contents = files;
|
||||
sync.info.remote_errors = file_errors;
|
||||
@@ -585,7 +723,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -595,7 +732,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Server>(
|
||||
resources.servers,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -606,7 +742,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -617,7 +752,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -628,7 +762,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Build>(
|
||||
resources.builds,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -639,7 +772,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Repo>(
|
||||
resources.repos,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -650,7 +782,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Procedure>(
|
||||
resources.procedures,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -661,7 +792,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Action>(
|
||||
resources.actions,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -672,7 +802,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Builder>(
|
||||
resources.builders,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -683,7 +812,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<Alerter>(
|
||||
resources.alerters,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -694,7 +822,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
push_updates_for_view::<ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
@@ -722,7 +849,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
crate::sync::user_groups::get_updates_for_view(
|
||||
resources.user_groups,
|
||||
delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
|
||||
@@ -13,8 +13,7 @@ use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
state::{db_client, jwt_client},
|
||||
config::core_config, helpers::random_string, state::{db_client, jwt_client}
|
||||
};
|
||||
|
||||
use self::client::github_oauth_client;
|
||||
@@ -82,9 +81,23 @@ async fn callback(
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
|
||||
let mut username = github_user.login;
|
||||
// Modify username if it already exists
|
||||
if db_client
|
||||
.users
|
||||
.find_one(doc! { "username": &username })
|
||||
.await
|
||||
.context("Failed to query users collection")?
|
||||
.is_some()
|
||||
{
|
||||
username += "-";
|
||||
username += &random_string(5);
|
||||
};
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: github_user.login,
|
||||
username,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
|
||||
@@ -12,6 +12,7 @@ use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
@@ -91,15 +92,28 @@ async fn callback(
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
let mut username = google_user
|
||||
.email
|
||||
.split('@')
|
||||
.collect::<Vec<&str>>()
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
// Modify username if it already exists
|
||||
if db_client
|
||||
.users
|
||||
.find_one(doc! { "username": &username })
|
||||
.await
|
||||
.context("Failed to query users collection")?
|
||||
.is_some()
|
||||
{
|
||||
username += "-";
|
||||
username += &random_string(5);
|
||||
};
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: google_user
|
||||
.email
|
||||
.split('@')
|
||||
.collect::<Vec<&str>>()
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
username,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
|
||||
@@ -48,10 +48,9 @@ pub async fn spawn_oidc_client_management() {
|
||||
{
|
||||
return;
|
||||
}
|
||||
reset_oidc_client()
|
||||
.await
|
||||
.context("Failed to initialize OIDC client.")
|
||||
.unwrap();
|
||||
if let Err(e) = reset_oidc_client().await {
|
||||
error!("Failed to initialize OIDC client | {e:#}");
|
||||
}
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
|
||||
@@ -12,9 +12,10 @@ use komodo_client::entities::{
|
||||
};
|
||||
use mungos::mongodb::bson::{Document, doc};
|
||||
use openidconnect::{
|
||||
AccessTokenHash, AuthorizationCode, CsrfToken, Nonce,
|
||||
OAuth2TokenResponse, PkceCodeChallenge, PkceCodeVerifier, Scope,
|
||||
TokenResponse, core::CoreAuthenticationFlow,
|
||||
AccessTokenHash, AuthorizationCode, CsrfToken,
|
||||
EmptyAdditionalClaims, Nonce, OAuth2TokenResponse,
|
||||
PkceCodeChallenge, PkceCodeVerifier, Scope, TokenResponse,
|
||||
core::{CoreAuthenticationFlow, CoreGenderClaim},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
@@ -22,6 +23,7 @@ use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
@@ -89,6 +91,7 @@ async fn login(
|
||||
)
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.add_scope(Scope::new("openid".to_string()))
|
||||
.add_scope(Scope::new("profile".to_string()))
|
||||
.add_scope(Scope::new("email".to_string()))
|
||||
.url();
|
||||
|
||||
@@ -137,7 +140,7 @@ async fn callback(
|
||||
) -> anyhow::Result<Redirect> {
|
||||
let client = oidc_client().load();
|
||||
let client =
|
||||
client.as_ref().context("OIDC Client not configured")?;
|
||||
client.as_ref().context("OIDC Client not initialized successfully. Is the provider properly configured?")?;
|
||||
|
||||
if let Some(e) = query.error {
|
||||
return Err(anyhow!("Provider returned error: {e}"));
|
||||
@@ -159,11 +162,12 @@ async fn callback(
|
||||
));
|
||||
}
|
||||
|
||||
let reqwest_client = reqwest_client();
|
||||
let token_response = client
|
||||
.exchange_code(AuthorizationCode::new(code))
|
||||
.context("Failed to get Oauth token at exchange code")?
|
||||
.set_pkce_verifier(pkce_verifier)
|
||||
.request_async(reqwest_client())
|
||||
.request_async(reqwest_client)
|
||||
.await
|
||||
.context("Failed to get Oauth token")?;
|
||||
|
||||
@@ -226,12 +230,26 @@ async fn callback(
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
|
||||
// Fetch user info
|
||||
let user_info = client
|
||||
.user_info(
|
||||
token_response.access_token().clone(),
|
||||
claims.subject().clone().into(),
|
||||
)
|
||||
.context("Invalid user info request")?
|
||||
.request_async::<EmptyAdditionalClaims, _, CoreGenderClaim>(
|
||||
reqwest_client,
|
||||
)
|
||||
.await
|
||||
.context("Failed to fetch user info for new user")?;
|
||||
|
||||
// Will use preferred_username, then email, then user_id if it isn't available.
|
||||
let username = claims
|
||||
let mut username = user_info
|
||||
.preferred_username()
|
||||
.map(|username| username.to_string())
|
||||
.unwrap_or_else(|| {
|
||||
let email = claims
|
||||
let email = user_info
|
||||
.email()
|
||||
.map(|email| email.as_str())
|
||||
.unwrap_or(user_id);
|
||||
@@ -245,6 +263,19 @@ async fn callback(
|
||||
}
|
||||
.to_string()
|
||||
});
|
||||
|
||||
// Modify username if it already exists
|
||||
if db_client
|
||||
.users
|
||||
.find_one(doc! { "username": &username })
|
||||
.await
|
||||
.context("Failed to query users collection")?
|
||||
.is_some()
|
||||
{
|
||||
username += "-";
|
||||
username += &random_string(5);
|
||||
};
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username,
|
||||
@@ -262,6 +293,7 @@ async fn callback(
|
||||
user_id: user_id.to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
let user_id = db_client
|
||||
.users
|
||||
.insert_one(user)
|
||||
@@ -271,6 +303,7 @@ async fn callback(
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
jwt_client()
|
||||
.encode(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
|
||||
@@ -135,6 +135,7 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
host: env.komodo_host.unwrap_or(config.host),
|
||||
port: env.komodo_port.unwrap_or(config.port),
|
||||
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
|
||||
timezone: env.komodo_timezone.unwrap_or(config.timezone),
|
||||
first_server: env.komodo_first_server.unwrap_or(config.first_server),
|
||||
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
|
||||
jwt_ttl: env
|
||||
|
||||
73
bin/core/src/helpers/all_resources.rs
Normal file
73
bin/core/src/helpers/all_resources.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use komodo_client::entities::{
|
||||
action::Action, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, stack::Stack, sync::ResourceSync,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AllResourcesById {
|
||||
pub servers: HashMap<String, Server>,
|
||||
pub deployments: HashMap<String, Deployment>,
|
||||
pub stacks: HashMap<String, Stack>,
|
||||
pub builds: HashMap<String, Build>,
|
||||
pub repos: HashMap<String, Repo>,
|
||||
pub procedures: HashMap<String, Procedure>,
|
||||
pub actions: HashMap<String, Action>,
|
||||
pub builders: HashMap<String, Builder>,
|
||||
pub alerters: HashMap<String, Alerter>,
|
||||
pub syncs: HashMap<String, ResourceSync>,
|
||||
}
|
||||
|
||||
impl AllResourcesById {
|
||||
/// Use `match_tags` to filter resources by tag.
|
||||
pub async fn load() -> anyhow::Result<Self> {
|
||||
let map = HashMap::new();
|
||||
let id_to_tags = ↦
|
||||
let match_tags = &[];
|
||||
Ok(Self {
|
||||
servers: crate::resource::get_id_to_resource_map::<Server>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
deployments: crate::resource::get_id_to_resource_map::<
|
||||
Deployment,
|
||||
>(id_to_tags, match_tags)
|
||||
.await?,
|
||||
builds: crate::resource::get_id_to_resource_map::<Build>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
repos: crate::resource::get_id_to_resource_map::<Repo>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
procedures:
|
||||
crate::resource::get_id_to_resource_map::<Procedure>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
actions: crate::resource::get_id_to_resource_map::<Action>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
builders: crate::resource::get_id_to_resource_map::<Builder>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
alerters: crate::resource::get_id_to_resource_map::<Alerter>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
syncs: crate::resource::get_id_to_resource_map::<ResourceSync>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
stacks: crate::resource::get_id_to_resource_map::<Stack>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
})
|
||||
}
|
||||
}
|
||||
114
bin/core/src/helpers/maintenance.rs
Normal file
114
bin/core/src/helpers/maintenance.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use chrono::{Datelike, Local};
|
||||
use komodo_client::entities::{
|
||||
DayOfWeek, MaintenanceScheduleType, MaintenanceWindow,
|
||||
};
|
||||
|
||||
use crate::config::core_config;
|
||||
|
||||
/// Check if a timestamp is currently in a maintenance window, given a list of windows.
|
||||
pub fn is_in_maintenance(
|
||||
windows: &[MaintenanceWindow],
|
||||
timestamp: i64,
|
||||
) -> bool {
|
||||
windows
|
||||
.iter()
|
||||
.any(|window| is_maintenance_window_active(window, timestamp))
|
||||
}
|
||||
|
||||
/// Check if the current timestamp falls within this maintenance window
|
||||
pub fn is_maintenance_window_active(
|
||||
window: &MaintenanceWindow,
|
||||
timestamp: i64,
|
||||
) -> bool {
|
||||
if !window.enabled {
|
||||
return false;
|
||||
}
|
||||
|
||||
let dt = chrono::DateTime::from_timestamp(timestamp / 1000, 0)
|
||||
.unwrap_or_else(chrono::Utc::now);
|
||||
|
||||
let (local_time, local_weekday, local_date) =
|
||||
match (window.timezone.as_str(), core_config().timezone.as_str())
|
||||
{
|
||||
("", "") => {
|
||||
let local_dt = dt.with_timezone(&Local);
|
||||
(local_dt.time(), local_dt.weekday(), local_dt.date_naive())
|
||||
}
|
||||
("", timezone) | (timezone, _) => {
|
||||
let tz: chrono_tz::Tz = match timezone
|
||||
.parse()
|
||||
.context("Failed to parse timezone")
|
||||
{
|
||||
Ok(tz) => tz,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to parse maintenance window timezone: {e:#}"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
let local_dt = dt.with_timezone(&tz);
|
||||
(local_dt.time(), local_dt.weekday(), local_dt.date_naive())
|
||||
}
|
||||
};
|
||||
|
||||
match window.schedule_type {
|
||||
MaintenanceScheduleType::Daily => {
|
||||
is_time_in_window(window, local_time)
|
||||
}
|
||||
MaintenanceScheduleType::Weekly => {
|
||||
let day_of_week =
|
||||
DayOfWeek::from_str(&window.day_of_week).unwrap_or_default();
|
||||
convert_day_of_week(local_weekday) == day_of_week
|
||||
&& is_time_in_window(window, local_time)
|
||||
}
|
||||
MaintenanceScheduleType::OneTime => {
|
||||
// Parse the date string and check if it matches current date
|
||||
if let Ok(maintenance_date) =
|
||||
chrono::NaiveDate::parse_from_str(&window.date, "%Y-%m-%d")
|
||||
{
|
||||
local_date == maintenance_date
|
||||
&& is_time_in_window(window, local_time)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_time_in_window(
|
||||
window: &MaintenanceWindow,
|
||||
current_time: chrono::NaiveTime,
|
||||
) -> bool {
|
||||
let start_time = chrono::NaiveTime::from_hms_opt(
|
||||
window.hour as u32,
|
||||
window.minute as u32,
|
||||
0,
|
||||
)
|
||||
.unwrap_or(chrono::NaiveTime::from_hms_opt(0, 0, 0).unwrap());
|
||||
|
||||
let end_time = start_time
|
||||
+ chrono::Duration::minutes(window.duration_minutes as i64);
|
||||
|
||||
// Handle case where maintenance window crosses midnight
|
||||
if end_time < start_time {
|
||||
current_time >= start_time || current_time <= end_time
|
||||
} else {
|
||||
current_time >= start_time && current_time <= end_time
|
||||
}
|
||||
}
|
||||
|
||||
fn convert_day_of_week(value: chrono::Weekday) -> DayOfWeek {
|
||||
match value {
|
||||
chrono::Weekday::Mon => DayOfWeek::Monday,
|
||||
chrono::Weekday::Tue => DayOfWeek::Tuesday,
|
||||
chrono::Weekday::Wed => DayOfWeek::Wednesday,
|
||||
chrono::Weekday::Thu => DayOfWeek::Thursday,
|
||||
chrono::Weekday::Fri => DayOfWeek::Friday,
|
||||
chrono::Weekday::Sat => DayOfWeek::Saturday,
|
||||
chrono::Weekday::Sun => DayOfWeek::Sunday,
|
||||
}
|
||||
}
|
||||
@@ -4,10 +4,13 @@ use anyhow::{Context, anyhow};
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::entities::{
|
||||
ResourceTarget,
|
||||
build::Build,
|
||||
permission::{
|
||||
Permission, PermissionLevel, SpecificPermission, UserTarget,
|
||||
},
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
user::User,
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
@@ -18,10 +21,12 @@ use rand::Rng;
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
pub mod action_state;
|
||||
pub mod all_resources;
|
||||
pub mod builder;
|
||||
pub mod cache;
|
||||
pub mod channel;
|
||||
pub mod interpolate;
|
||||
pub mod maintenance;
|
||||
pub mod matcher;
|
||||
pub mod procedure;
|
||||
pub mod prune;
|
||||
@@ -95,6 +100,70 @@ pub async fn git_token(
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn stack_git_token(
|
||||
stack: &mut Stack,
|
||||
repo: Option<&mut Repo>,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if let Some(repo) = repo {
|
||||
return git_token(
|
||||
&repo.config.git_provider,
|
||||
&repo.config.git_account,
|
||||
|https| repo.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. Stopping run. | {} | {}",
|
||||
repo.config.git_provider, repo.config.git_account
|
||||
)
|
||||
});
|
||||
}
|
||||
git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. Stopping run. | {} | {}",
|
||||
stack.config.git_provider, stack.config.git_account
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn build_git_token(
|
||||
build: &mut Build,
|
||||
repo: Option<&mut Repo>,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if let Some(repo) = repo {
|
||||
return git_token(
|
||||
&repo.config.git_provider,
|
||||
&repo.config.git_account,
|
||||
|https| repo.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. Stopping run. | {} | {}",
|
||||
repo.config.git_provider, repo.config.git_account
|
||||
)
|
||||
});
|
||||
}
|
||||
git_token(
|
||||
&build.config.git_provider,
|
||||
&build.config.git_account,
|
||||
|https| build.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. Stopping run. | {} | {}",
|
||||
build.config.git_provider, build.config.git_account
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// First checks db for token, then checks core config.
|
||||
/// Only errors if db call errors.
|
||||
pub async fn registry_token(
|
||||
|
||||
@@ -65,6 +65,7 @@ async fn app() -> anyhow::Result<()> {
|
||||
// Spawn background tasks
|
||||
monitor::spawn_monitor_loop();
|
||||
resource::spawn_resource_refresh_loop();
|
||||
resource::spawn_all_resources_refresh_loop();
|
||||
resource::spawn_build_state_refresh_loop();
|
||||
resource::spawn_repo_state_refresh_loop();
|
||||
resource::spawn_procedure_state_refresh_loop();
|
||||
|
||||
@@ -22,6 +22,7 @@ use mungos::{
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
helpers::maintenance::is_in_maintenance,
|
||||
state::{db_client, server_status_cache},
|
||||
};
|
||||
|
||||
@@ -30,6 +31,48 @@ type OpenAlertMap<T = AlertDataVariant> =
|
||||
HashMap<ResourceTarget, HashMap<T, Alert>>;
|
||||
type OpenDiskAlertMap = OpenAlertMap<PathBuf>;
|
||||
|
||||
/// Alert buffer to prevent immediate alerts on transient issues
|
||||
struct AlertBuffer {
|
||||
buffer: Mutex<HashMap<(String, AlertDataVariant), bool>>,
|
||||
}
|
||||
|
||||
impl AlertBuffer {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
buffer: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if alert should be opened. Requires two consecutive calls to return true.
|
||||
fn ready_to_open(
|
||||
&self,
|
||||
server_id: String,
|
||||
variant: AlertDataVariant,
|
||||
) -> bool {
|
||||
let mut lock = self.buffer.lock().unwrap();
|
||||
let ready = lock.entry((server_id, variant)).or_default();
|
||||
if *ready {
|
||||
*ready = false;
|
||||
true
|
||||
} else {
|
||||
*ready = true;
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset buffer state for a specific server/alert combination
|
||||
fn reset(&self, server_id: String, variant: AlertDataVariant) {
|
||||
let mut lock = self.buffer.lock().unwrap();
|
||||
lock.remove(&(server_id, variant));
|
||||
}
|
||||
}
|
||||
|
||||
/// Global alert buffer instance
|
||||
fn alert_buffer() -> &'static AlertBuffer {
|
||||
static BUFFER: OnceLock<AlertBuffer> = OnceLock::new();
|
||||
BUFFER.get_or_init(AlertBuffer::new)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn alert_servers(
|
||||
ts: i64,
|
||||
@@ -59,6 +102,10 @@ pub async fn alert_servers(
|
||||
let server_alerts = open_alerts
|
||||
.get(&ResourceTarget::Server(server_status.id.clone()));
|
||||
|
||||
// Check if server is in maintenance mode
|
||||
let in_maintenance =
|
||||
is_in_maintenance(&server.config.maintenance_windows, ts);
|
||||
|
||||
// ===================
|
||||
// SERVER HEALTH
|
||||
// ===================
|
||||
@@ -67,11 +114,13 @@ pub async fn alert_servers(
|
||||
});
|
||||
match (server_status.state, health_alert) {
|
||||
(ServerState::NotOk, None) => {
|
||||
if buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerUnreachable,
|
||||
) {
|
||||
// open unreachable alert
|
||||
// Only open unreachable alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerUnreachable,
|
||||
)
|
||||
{
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
@@ -143,11 +192,13 @@ pub async fn alert_servers(
|
||||
match (health.cpu.level, cpu_alert, health.cpu.should_close_alert)
|
||||
{
|
||||
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
|
||||
// open alert
|
||||
if buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerCpu,
|
||||
) {
|
||||
// Only open CPU alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerCpu,
|
||||
)
|
||||
{
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
@@ -174,8 +225,8 @@ pub async fn alert_servers(
|
||||
Some(mut alert),
|
||||
_,
|
||||
) => {
|
||||
// modify alert level only if it has increased
|
||||
if alert.level < health.cpu.level {
|
||||
// modify alert level only if it has increased and not in maintenance
|
||||
if !in_maintenance && alert.level < health.cpu.level {
|
||||
alert.level = health.cpu.level;
|
||||
alert.data = AlertData::ServerCpu {
|
||||
id: server_status.id.clone(),
|
||||
@@ -206,8 +257,7 @@ pub async fn alert_servers(
|
||||
alert_ids_to_close
|
||||
.push((alert, server.config.send_cpu_alerts))
|
||||
}
|
||||
(SeverityLevel::Ok, Some(_), false) => {}
|
||||
(SeverityLevel::Ok, None, _) => buffer
|
||||
(SeverityLevel::Ok, _, _) => buffer
|
||||
.reset(server_status.id.clone(), AlertDataVariant::ServerCpu),
|
||||
}
|
||||
|
||||
@@ -221,11 +271,13 @@ pub async fn alert_servers(
|
||||
match (health.mem.level, mem_alert, health.mem.should_close_alert)
|
||||
{
|
||||
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
|
||||
// open alert
|
||||
if buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerMem,
|
||||
) {
|
||||
// Only open memory alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerMem,
|
||||
)
|
||||
{
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
@@ -257,8 +309,8 @@ pub async fn alert_servers(
|
||||
Some(mut alert),
|
||||
_,
|
||||
) => {
|
||||
// modify alert level only if it has increased
|
||||
if alert.level < health.mem.level {
|
||||
// modify alert level only if it has increased and not in maintenance
|
||||
if !in_maintenance && alert.level < health.mem.level {
|
||||
alert.level = health.mem.level;
|
||||
alert.data = AlertData::ServerMem {
|
||||
id: server_status.id.clone(),
|
||||
@@ -299,8 +351,7 @@ pub async fn alert_servers(
|
||||
alert_ids_to_close
|
||||
.push((alert, server.config.send_mem_alerts))
|
||||
}
|
||||
(SeverityLevel::Ok, Some(_), false) => {}
|
||||
(SeverityLevel::Ok, None, _) => buffer
|
||||
(SeverityLevel::Ok, _, _) => buffer
|
||||
.reset(server_status.id.clone(), AlertDataVariant::ServerMem),
|
||||
}
|
||||
|
||||
@@ -322,11 +373,13 @@ pub async fn alert_servers(
|
||||
None,
|
||||
_,
|
||||
) => {
|
||||
// open alert
|
||||
if buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerDisk,
|
||||
) {
|
||||
// Only open disk alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerDisk,
|
||||
)
|
||||
{
|
||||
let disk =
|
||||
server_status.stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
@@ -360,8 +413,8 @@ pub async fn alert_servers(
|
||||
Some(mut alert),
|
||||
_,
|
||||
) => {
|
||||
// modify alert level only if it has increased
|
||||
if health.level < alert.level {
|
||||
// modify alert level only if it has increased and not in maintenance
|
||||
if !in_maintenance && health.level < alert.level {
|
||||
let disk =
|
||||
server_status.stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
@@ -396,8 +449,7 @@ pub async fn alert_servers(
|
||||
alert_ids_to_close
|
||||
.push((alert, server.config.send_disk_alerts))
|
||||
}
|
||||
(SeverityLevel::Ok, Some(_), false) => {}
|
||||
(SeverityLevel::Ok, None, _) => buffer.reset(
|
||||
(SeverityLevel::Ok, _, _) => buffer.reset(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerDisk,
|
||||
),
|
||||
@@ -606,40 +658,3 @@ async fn get_open_alerts()
|
||||
|
||||
Ok((map, disk_map))
|
||||
}
|
||||
|
||||
/// Alerts should only be opened after
|
||||
/// 2 *consecutive* alerting conditions.
|
||||
/// This reduces alerting noise.
|
||||
#[derive(Default)]
|
||||
struct AlertBuffer {
|
||||
/// (ServerId, AlertType) -> should_open.
|
||||
buffer: Mutex<HashMap<(String, AlertDataVariant), bool>>,
|
||||
}
|
||||
|
||||
impl AlertBuffer {
|
||||
fn reset(&self, server_id: String, variant: AlertDataVariant) {
|
||||
let mut lock = self.buffer.lock().unwrap();
|
||||
lock.remove(&(server_id, variant));
|
||||
}
|
||||
|
||||
fn ready_to_open(
|
||||
&self,
|
||||
server_id: String,
|
||||
variant: AlertDataVariant,
|
||||
) -> bool {
|
||||
let mut lock = self.buffer.lock().unwrap();
|
||||
let ready = lock.entry((server_id, variant)).or_default();
|
||||
if *ready {
|
||||
*ready = false;
|
||||
true
|
||||
} else {
|
||||
*ready = true;
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn alert_buffer() -> &'static AlertBuffer {
|
||||
static ALERT_BUFFER: OnceLock<AlertBuffer> = OnceLock::new();
|
||||
ALERT_BUFFER.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
@@ -145,8 +145,8 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
// Handle server disabled
|
||||
if !server.config.enabled {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::Disabled,
|
||||
@@ -170,12 +170,12 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
Ok(version) => version.version,
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::NotOk,
|
||||
String::from("unknown"),
|
||||
String::from("Unknown"),
|
||||
None,
|
||||
(None, None, None, None, None),
|
||||
Serror::from(&e),
|
||||
@@ -190,8 +190,8 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
Ok(stats) => Some(filter_volumes(server, stats)),
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::NotOk,
|
||||
|
||||
@@ -14,6 +14,7 @@ use komodo_client::{
|
||||
builder::Builder,
|
||||
environment_vars_from_str, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
to_docker_compatible_name,
|
||||
update::Update,
|
||||
@@ -32,7 +33,10 @@ use crate::{
|
||||
helpers::{
|
||||
empty_or_only_spaces, query::get_latest_update, repo_link,
|
||||
},
|
||||
state::{action_states, build_state_cache, db_client},
|
||||
permission::get_check_permissions,
|
||||
state::{
|
||||
action_states, all_resources_cache, build_state_cache, db_client,
|
||||
},
|
||||
};
|
||||
|
||||
impl super::KomodoResource for Build {
|
||||
@@ -64,6 +68,32 @@ impl super::KomodoResource for Build {
|
||||
build: Resource<Self::Config, Self::Info>,
|
||||
) -> Self::ListItem {
|
||||
let state = get_build_state(&build.id).await;
|
||||
|
||||
let default_git = (
|
||||
build.config.git_provider,
|
||||
build.config.repo,
|
||||
build.config.branch,
|
||||
build.config.git_https,
|
||||
);
|
||||
let (git_provider, repo, branch, git_https) =
|
||||
if build.config.linked_repo.is_empty() {
|
||||
default_git
|
||||
} else {
|
||||
all_resources_cache()
|
||||
.load()
|
||||
.repos
|
||||
.get(&build.config.linked_repo)
|
||||
.map(|r| {
|
||||
(
|
||||
r.config.git_provider.clone(),
|
||||
r.config.repo.clone(),
|
||||
r.config.branch.clone(),
|
||||
r.config.git_https,
|
||||
)
|
||||
})
|
||||
.unwrap_or(default_git)
|
||||
};
|
||||
|
||||
BuildListItem {
|
||||
name: build.name,
|
||||
id: build.id,
|
||||
@@ -74,15 +104,17 @@ impl super::KomodoResource for Build {
|
||||
version: build.config.version,
|
||||
builder_id: build.config.builder_id,
|
||||
files_on_host: build.config.files_on_host,
|
||||
dockerfile_contents: !build.config.dockerfile.is_empty(),
|
||||
linked_repo: build.config.linked_repo,
|
||||
repo_link: repo_link(
|
||||
&build.config.git_provider,
|
||||
&build.config.repo,
|
||||
&build.config.branch,
|
||||
build.config.git_https,
|
||||
&git_provider,
|
||||
&repo,
|
||||
&branch,
|
||||
git_https,
|
||||
),
|
||||
git_provider: build.config.git_provider,
|
||||
repo: build.config.repo,
|
||||
branch: build.config.branch,
|
||||
git_provider,
|
||||
repo,
|
||||
branch,
|
||||
image_registry_domain: optional_string(
|
||||
build.config.image_registry.domain,
|
||||
),
|
||||
@@ -234,6 +266,19 @@ async fn validate_config(
|
||||
config.builder_id = Some(builder.id)
|
||||
}
|
||||
}
|
||||
if let Some(linked_repo) = &config.linked_repo {
|
||||
if !linked_repo.is_empty() {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
linked_repo,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Repo to this Build")?;
|
||||
// in case it comes in as name
|
||||
config.linked_repo = Some(repo.id);
|
||||
}
|
||||
}
|
||||
if let Some(build_args) = &config.build_args {
|
||||
environment_vars_from_str(build_args)
|
||||
.context("Invalid build_args")?;
|
||||
|
||||
@@ -14,7 +14,7 @@ use komodo_client::entities::{
|
||||
permission::{PermissionLevel, SpecificPermission},
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
to_docker_compatible_name,
|
||||
to_container_compatible_name,
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
@@ -50,7 +50,7 @@ impl super::KomodoResource for Deployment {
|
||||
}
|
||||
|
||||
fn validated_name(name: &str) -> String {
|
||||
to_docker_compatible_name(name)
|
||||
to_container_compatible_name(name)
|
||||
}
|
||||
|
||||
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
|
||||
|
||||
@@ -69,7 +69,10 @@ pub use build::{
|
||||
pub use procedure::{
|
||||
refresh_procedure_state_cache, spawn_procedure_state_refresh_loop,
|
||||
};
|
||||
pub use refresh::spawn_resource_refresh_loop;
|
||||
pub use refresh::{
|
||||
refresh_all_resources_cache, spawn_all_resources_refresh_loop,
|
||||
spawn_resource_refresh_loop,
|
||||
};
|
||||
pub use repo::{
|
||||
refresh_repo_state_cache, spawn_repo_state_refresh_loop,
|
||||
};
|
||||
@@ -537,6 +540,8 @@ pub async fn create<T: KomodoResource>(
|
||||
|
||||
T::post_create(&resource, &mut update).await?;
|
||||
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
update.finalize();
|
||||
add_update(update).await?;
|
||||
|
||||
@@ -632,8 +637,9 @@ pub async fn update<T: KomodoResource>(
|
||||
|
||||
T::post_update(&updated, &mut update).await?;
|
||||
|
||||
update.finalize();
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
update.finalize();
|
||||
add_update(update).await?;
|
||||
|
||||
Ok(updated)
|
||||
@@ -706,6 +712,7 @@ pub async fn update_tags<T: KomodoResource>(
|
||||
doc! { "$set": { "tags": tags } },
|
||||
)
|
||||
.await?;
|
||||
refresh_all_resources_cache().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -769,8 +776,11 @@ pub async fn rename<T: KomodoResource>(
|
||||
),
|
||||
);
|
||||
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
@@ -829,6 +839,8 @@ pub async fn delete<T: KomodoResource>(
|
||||
update.push_error_log("post delete", format_serror(&e.into()));
|
||||
}
|
||||
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
update.finalize();
|
||||
add_update(update).await?;
|
||||
|
||||
|
||||
@@ -14,9 +14,31 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
api::{execute::pull_deployment_inner, write::WriteArgs},
|
||||
config::core_config,
|
||||
state::db_client,
|
||||
helpers::all_resources::AllResourcesById,
|
||||
state::{all_resources_cache, db_client},
|
||||
};
|
||||
|
||||
pub fn spawn_all_resources_refresh_loop() {
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
refresh_all_resources_cache().await;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn refresh_all_resources_cache() {
|
||||
let all = match AllResourcesById::load().await {
|
||||
Ok(all) => all,
|
||||
Err(e) => {
|
||||
error!("Failed to load all resources by id cache | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
all_resources_cache().store(all.into());
|
||||
}
|
||||
|
||||
pub fn spawn_resource_refresh_loop() {
|
||||
let interval: Timelength = core_config()
|
||||
.resource_poll_interval
|
||||
@@ -167,9 +189,6 @@ async fn refresh_syncs() {
|
||||
return;
|
||||
};
|
||||
for sync in syncs {
|
||||
if sync.config.repo.is_empty() {
|
||||
continue;
|
||||
}
|
||||
RefreshResourceSyncPending { sync: sync.id }
|
||||
.resolve(
|
||||
&WriteArgs { user: sync_user().clone() },
|
||||
|
||||
@@ -68,7 +68,10 @@ impl super::KomodoResource for Server {
|
||||
tags: server.tags,
|
||||
resource_type: ResourceTargetVariant::Server,
|
||||
info: ServerListItemInfo {
|
||||
state: status.map(|s| s.state).unwrap_or_default(),
|
||||
state: status.as_ref().map(|s| s.state).unwrap_or_default(),
|
||||
version: status
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("Unknown")),
|
||||
region: server.config.region,
|
||||
address: server.config.address,
|
||||
send_unreachable_alerts: server
|
||||
|
||||
@@ -6,6 +6,7 @@ use komodo_client::{
|
||||
entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
permission::{PermissionLevel, SpecificPermission},
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
stack::{
|
||||
@@ -28,7 +29,8 @@ use crate::{
|
||||
helpers::{periphery_client, query::get_stack_state, repo_link},
|
||||
monitor::update_cache_for_server,
|
||||
state::{
|
||||
action_states, db_client, server_status_cache, stack_status_cache,
|
||||
action_states, all_resources_cache, db_client,
|
||||
server_status_cache, stack_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -109,6 +111,31 @@ impl super::KomodoResource for Stack {
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let default_git = (
|
||||
stack.config.git_provider,
|
||||
stack.config.repo,
|
||||
stack.config.branch,
|
||||
stack.config.git_https,
|
||||
);
|
||||
let (git_provider, repo, branch, git_https) =
|
||||
if stack.config.linked_repo.is_empty() {
|
||||
default_git
|
||||
} else {
|
||||
all_resources_cache()
|
||||
.load()
|
||||
.repos
|
||||
.get(&stack.config.linked_repo)
|
||||
.map(|r| {
|
||||
(
|
||||
r.config.git_provider.clone(),
|
||||
r.config.repo.clone(),
|
||||
r.config.branch.clone(),
|
||||
r.config.git_https,
|
||||
)
|
||||
})
|
||||
.unwrap_or(default_git)
|
||||
};
|
||||
|
||||
// This is only true if it is KNOWN to be true. so other cases are false.
|
||||
let (project_missing, status) =
|
||||
if stack.config.server_id.is_empty()
|
||||
@@ -149,17 +176,18 @@ impl super::KomodoResource for Stack {
|
||||
project_missing,
|
||||
file_contents: !stack.config.file_contents.is_empty(),
|
||||
server_id: stack.config.server_id,
|
||||
linked_repo: stack.config.linked_repo,
|
||||
missing_files: stack.info.missing_files,
|
||||
files_on_host: stack.config.files_on_host,
|
||||
repo_link: repo_link(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.repo,
|
||||
&stack.config.branch,
|
||||
stack.config.git_https,
|
||||
&git_provider,
|
||||
&repo,
|
||||
&branch,
|
||||
git_https,
|
||||
),
|
||||
git_provider: stack.config.git_provider,
|
||||
repo: stack.config.repo,
|
||||
branch: stack.config.branch,
|
||||
git_provider,
|
||||
repo,
|
||||
branch,
|
||||
latest_hash: stack.info.latest_hash,
|
||||
deployed_hash: stack.info.deployed_hash,
|
||||
},
|
||||
@@ -357,110 +385,23 @@ async fn validate_config(
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach stack to this Server")?;
|
||||
.context("Cannot attach Stack to this Server")?;
|
||||
// in case it comes in as name
|
||||
config.server_id = Some(server.id);
|
||||
}
|
||||
}
|
||||
if let Some(linked_repo) = &config.linked_repo {
|
||||
if !linked_repo.is_empty() {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
linked_repo,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Repo to this Stack")?;
|
||||
// in case it comes in as name
|
||||
config.linked_repo = Some(repo.id);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// pub fn spawn_resource_sync_state_refresh_loop() {
|
||||
// tokio::spawn(async move {
|
||||
// loop {
|
||||
// refresh_resource_sync_state_cache().await;
|
||||
// tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
// }
|
||||
// });
|
||||
// }
|
||||
|
||||
// pub async fn refresh_resource_sync_state_cache() {
|
||||
// let _ = async {
|
||||
// let resource_syncs =
|
||||
// find_collect(&db_client().resource_syncs, None, None)
|
||||
// .await
|
||||
// .context("failed to get resource_syncs from db")?;
|
||||
// let cache = resource_sync_state_cache();
|
||||
// for resource_sync in resource_syncs {
|
||||
// let state =
|
||||
// get_resource_sync_state_from_db(&resource_sync.id).await;
|
||||
// cache.insert(resource_sync.id, state).await;
|
||||
// }
|
||||
// anyhow::Ok(())
|
||||
// }
|
||||
// .await
|
||||
// .inspect_err(|e| {
|
||||
// error!("failed to refresh resource_sync state cache | {e:#}")
|
||||
// });
|
||||
// }
|
||||
|
||||
// async fn get_resource_sync_state(
|
||||
// id: &String,
|
||||
// data: &PendingSyncUpdatesData,
|
||||
// ) -> StackState {
|
||||
// if let Some(state) = action_states()
|
||||
// .resource_sync
|
||||
// .get(id)
|
||||
// .await
|
||||
// .and_then(|s| {
|
||||
// s.get()
|
||||
// .map(|s| {
|
||||
// if s.syncing {
|
||||
// Some(StackState::Syncing)
|
||||
// } else {
|
||||
// None
|
||||
// }
|
||||
// })
|
||||
// .ok()
|
||||
// })
|
||||
// .flatten()
|
||||
// {
|
||||
// return state;
|
||||
// }
|
||||
// let data = match data {
|
||||
// PendingSyncUpdatesData::Err(_) => return StackState::Failed,
|
||||
// PendingSyncUpdatesData::Ok(data) => data,
|
||||
// };
|
||||
// if !data.no_updates() {
|
||||
// return StackState::Pending;
|
||||
// }
|
||||
// resource_sync_state_cache()
|
||||
// .get(id)
|
||||
// .await
|
||||
// .unwrap_or_default()
|
||||
// }
|
||||
|
||||
// async fn get_resource_sync_state_from_db(id: &str) -> StackState {
|
||||
// async {
|
||||
// let state = db_client()
|
||||
// .await
|
||||
// .updates
|
||||
// .find_one(doc! {
|
||||
// "target.type": "Stack",
|
||||
// "target.id": id,
|
||||
// "operation": "RunSync"
|
||||
// })
|
||||
// .with_options(
|
||||
// FindOneOptions::builder()
|
||||
// .sort(doc! { "start_ts": -1 })
|
||||
// .build(),
|
||||
// )
|
||||
// .await?
|
||||
// .map(|u| {
|
||||
// if u.success {
|
||||
// StackState::Ok
|
||||
// } else {
|
||||
// StackState::Failed
|
||||
// }
|
||||
// })
|
||||
// .unwrap_or(StackState::Ok);
|
||||
// anyhow::Ok(state)
|
||||
// }
|
||||
// .await
|
||||
// .inspect_err(|e| {
|
||||
// warn!(
|
||||
// "failed to get resource sync state from db for {id} | {e:#}"
|
||||
// )
|
||||
// })
|
||||
// .unwrap_or(StackState::Unknown)
|
||||
// }
|
||||
|
||||
@@ -5,6 +5,8 @@ use komodo_client::{
|
||||
entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, ResourceSync, ResourceSyncConfig,
|
||||
@@ -23,7 +25,8 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
helpers::repo_link,
|
||||
state::{action_states, db_client},
|
||||
permission::get_check_permissions,
|
||||
state::{action_states, all_resources_cache, db_client},
|
||||
};
|
||||
|
||||
impl super::KomodoResource for ResourceSync {
|
||||
@@ -53,6 +56,32 @@ impl super::KomodoResource for ResourceSync {
|
||||
let state =
|
||||
get_resource_sync_state(&resource_sync.id, &resource_sync.info)
|
||||
.await;
|
||||
|
||||
let default_git = (
|
||||
resource_sync.config.git_provider,
|
||||
resource_sync.config.repo,
|
||||
resource_sync.config.branch,
|
||||
resource_sync.config.git_https,
|
||||
);
|
||||
let (git_provider, repo, branch, git_https) =
|
||||
if resource_sync.config.linked_repo.is_empty() {
|
||||
default_git
|
||||
} else {
|
||||
all_resources_cache()
|
||||
.load()
|
||||
.repos
|
||||
.get(&resource_sync.config.linked_repo)
|
||||
.map(|r| {
|
||||
(
|
||||
r.config.git_provider.clone(),
|
||||
r.config.repo.clone(),
|
||||
r.config.branch.clone(),
|
||||
r.config.git_https,
|
||||
)
|
||||
})
|
||||
.unwrap_or(default_git)
|
||||
};
|
||||
|
||||
ResourceSyncListItem {
|
||||
id: resource_sync.id,
|
||||
name: resource_sync.name,
|
||||
@@ -62,15 +91,16 @@ impl super::KomodoResource for ResourceSync {
|
||||
file_contents: !resource_sync.config.file_contents.is_empty(),
|
||||
files_on_host: resource_sync.config.files_on_host,
|
||||
managed: resource_sync.config.managed,
|
||||
linked_repo: resource_sync.config.linked_repo,
|
||||
repo_link: repo_link(
|
||||
&resource_sync.config.git_provider,
|
||||
&resource_sync.config.repo,
|
||||
&resource_sync.config.branch,
|
||||
resource_sync.config.git_https,
|
||||
&git_provider,
|
||||
&repo,
|
||||
&branch,
|
||||
git_https,
|
||||
),
|
||||
git_provider: resource_sync.config.git_provider,
|
||||
repo: resource_sync.config.repo,
|
||||
branch: resource_sync.config.branch,
|
||||
git_provider,
|
||||
repo,
|
||||
branch,
|
||||
last_sync_ts: resource_sync.info.last_sync_ts,
|
||||
last_sync_hash: resource_sync.info.last_sync_hash,
|
||||
last_sync_message: resource_sync.info.last_sync_message,
|
||||
@@ -100,10 +130,10 @@ impl super::KomodoResource for ResourceSync {
|
||||
}
|
||||
|
||||
async fn validate_create_config(
|
||||
_config: &mut Self::PartialConfig,
|
||||
_user: &User,
|
||||
config: &mut Self::PartialConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
validate_config(config, user).await
|
||||
}
|
||||
|
||||
async fn post_create(
|
||||
@@ -134,10 +164,10 @@ impl super::KomodoResource for ResourceSync {
|
||||
|
||||
async fn validate_update_config(
|
||||
_id: &str,
|
||||
_config: &mut Self::PartialConfig,
|
||||
_user: &User,
|
||||
config: &mut Self::PartialConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
validate_config(config, user).await
|
||||
}
|
||||
|
||||
async fn post_update(
|
||||
@@ -185,6 +215,27 @@ impl super::KomodoResource for ResourceSync {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(user))]
|
||||
async fn validate_config(
|
||||
config: &mut PartialResourceSyncConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(linked_repo) = &config.linked_repo {
|
||||
if !linked_repo.is_empty() {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
linked_repo,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Repo to this Resource Sync")?;
|
||||
// in case it comes in as name
|
||||
config.linked_repo = Some(repo.id);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_resource_sync_state(
|
||||
id: &String,
|
||||
data: &ResourceSyncInfo,
|
||||
|
||||
@@ -24,6 +24,7 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
api::execute::{ExecuteArgs, ExecuteRequest},
|
||||
config::core_config,
|
||||
helpers::update::init_execution_update,
|
||||
state::db_client,
|
||||
};
|
||||
@@ -313,23 +314,26 @@ fn find_next_occurrence(
|
||||
})?
|
||||
}
|
||||
};
|
||||
let next = if schedule.timezone().is_empty() {
|
||||
let tz_time = chrono::Local::now().with_timezone(&Local);
|
||||
cron
|
||||
.find_next_occurrence(&tz_time, false)
|
||||
.context("Failed to find next run time")?
|
||||
.timestamp_millis()
|
||||
} else {
|
||||
let tz: chrono_tz::Tz = schedule
|
||||
.timezone()
|
||||
.parse()
|
||||
.context("Failed to parse schedule timezone")?;
|
||||
let tz_time = chrono::Local::now().with_timezone(&tz);
|
||||
cron
|
||||
.find_next_occurrence(&tz_time, false)
|
||||
.context("Failed to find next run time")?
|
||||
.timestamp_millis()
|
||||
};
|
||||
let next =
|
||||
match (schedule.timezone(), core_config().timezone.as_str()) {
|
||||
("", "") => {
|
||||
let tz_time = chrono::Local::now().with_timezone(&Local);
|
||||
cron
|
||||
.find_next_occurrence(&tz_time, false)
|
||||
.context("Failed to find next run time")?
|
||||
.timestamp_millis()
|
||||
}
|
||||
("", timezone) | (timezone, _) => {
|
||||
let tz: chrono_tz::Tz = timezone
|
||||
.parse()
|
||||
.context("Failed to parse timezone")?;
|
||||
let tz_time = chrono::Local::now().with_timezone(&tz);
|
||||
cron
|
||||
.find_next_occurrence(&tz_time, false)
|
||||
.context("Failed to find next run time")?
|
||||
.timestamp_millis()
|
||||
}
|
||||
};
|
||||
Ok(next)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::{fs, path::PathBuf};
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, FileContents, stack::Stack, update::Log,
|
||||
CloneArgs, FileContents, repo::Repo, stack::Stack, update::Log,
|
||||
};
|
||||
|
||||
use crate::{config::core_config, helpers::git_token};
|
||||
@@ -19,10 +19,12 @@ pub struct RemoteComposeContents {
|
||||
/// Returns Result<(read paths, error paths, logs, short hash, commit message)>
|
||||
pub async fn get_repo_compose_contents(
|
||||
stack: &Stack,
|
||||
repo: Option<&Repo>,
|
||||
// Collect any files which are missing in the repo.
|
||||
mut missing_files: Option<&mut Vec<String>>,
|
||||
) -> anyhow::Result<RemoteComposeContents> {
|
||||
let clone_args: CloneArgs = stack.into();
|
||||
let clone_args: CloneArgs =
|
||||
repo.map(Into::into).unwrap_or(stack.into());
|
||||
let (repo_path, _logs, hash, message) =
|
||||
ensure_remote_repo(clone_args)
|
||||
.await
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::{
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use arc_swap::ArcSwap;
|
||||
use komodo_client::entities::{
|
||||
action::ActionState,
|
||||
build::BuildState,
|
||||
@@ -21,7 +22,10 @@ use crate::{
|
||||
auth::jwt::JwtClient,
|
||||
config::core_config,
|
||||
db::DbClient,
|
||||
helpers::{action_state::ActionStates, cache::Cache},
|
||||
helpers::{
|
||||
action_state::ActionStates, all_resources::AllResourcesById,
|
||||
cache::Cache,
|
||||
},
|
||||
monitor::{
|
||||
CachedDeploymentStatus, CachedRepoStatus, CachedServerStatus,
|
||||
CachedStackStatus, History,
|
||||
@@ -196,3 +200,9 @@ pub fn action_state_cache() -> &'static ActionStateCache {
|
||||
OnceLock::new();
|
||||
ACTION_STATE_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub fn all_resources_cache() -> &'static ArcSwap<AllResourcesById> {
|
||||
static ALL_RESOURCES: OnceLock<ArcSwap<AllResourcesById>> =
|
||||
OnceLock::new();
|
||||
ALL_RESOURCES.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ use crate::{
|
||||
state::{deployment_status_cache, stack_status_cache},
|
||||
};
|
||||
|
||||
use super::{AllResourcesById, ResourceSyncTrait};
|
||||
use super::ResourceSyncTrait;
|
||||
|
||||
/// All entries in here are due to be deployed,
|
||||
/// after the given dependencies,
|
||||
@@ -48,7 +48,6 @@ pub struct SyncDeployParams<'a> {
|
||||
pub stacks: &'a [ResourceToml<PartialStackConfig>],
|
||||
// Names to stacks
|
||||
pub stack_map: &'a HashMap<String, Stack>,
|
||||
pub all_resources: &'a AllResourcesById,
|
||||
}
|
||||
|
||||
pub async fn deploy_from_cache(
|
||||
@@ -307,7 +306,6 @@ fn build_cache_for_deployment<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
}: SyncDeployParams<'a>,
|
||||
cache: &'a mut ToDeployCacheInner,
|
||||
build_version_cache: &'a mut BuildVersionCache,
|
||||
@@ -367,11 +365,8 @@ fn build_cache_for_deployment<'a>(
|
||||
|
||||
Deployment::validate_partial_config(&mut config);
|
||||
|
||||
let mut diff = Deployment::get_diff(
|
||||
original.config.clone(),
|
||||
config,
|
||||
all_resources,
|
||||
)?;
|
||||
let mut diff =
|
||||
Deployment::get_diff(original.config.clone(), config)?;
|
||||
|
||||
Deployment::validate_diff(&mut diff);
|
||||
// Needs to only check config fields that affect docker run
|
||||
@@ -486,7 +481,6 @@ fn build_cache_for_deployment<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
},
|
||||
cache,
|
||||
build_version_cache,
|
||||
@@ -502,7 +496,6 @@ fn build_cache_for_stack<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
}: SyncDeployParams<'a>,
|
||||
cache: &'a mut ToDeployCacheInner,
|
||||
build_version_cache: &'a mut BuildVersionCache,
|
||||
@@ -600,11 +593,8 @@ fn build_cache_for_stack<'a>(
|
||||
|
||||
Stack::validate_partial_config(&mut config);
|
||||
|
||||
let mut diff = Stack::get_diff(
|
||||
original.config.clone(),
|
||||
config,
|
||||
all_resources,
|
||||
)?;
|
||||
let mut diff =
|
||||
Stack::get_diff(original.config.clone(), config)?;
|
||||
|
||||
Stack::validate_diff(&mut diff);
|
||||
// Needs to only check config fields that affect docker compose command
|
||||
@@ -650,7 +640,6 @@ fn build_cache_for_stack<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
},
|
||||
cache,
|
||||
build_version_cache,
|
||||
@@ -667,7 +656,6 @@ async fn insert_target_using_after_list<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
}: SyncDeployParams<'a>,
|
||||
cache: &'a mut ToDeployCacheInner,
|
||||
build_version_cache: &'a mut BuildVersionCache,
|
||||
@@ -709,7 +697,6 @@ async fn insert_target_using_after_list<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
},
|
||||
cache,
|
||||
build_version_cache,
|
||||
@@ -756,7 +743,6 @@ async fn insert_target_using_after_list<'a>(
|
||||
deployment_map,
|
||||
stacks,
|
||||
stack_map,
|
||||
all_resources,
|
||||
},
|
||||
cache,
|
||||
build_version_cache,
|
||||
|
||||
@@ -15,9 +15,7 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::api::write::WriteArgs;
|
||||
|
||||
use super::{
|
||||
AllResourcesById, ResourceSyncTrait, SyncDeltas, ToUpdateItem,
|
||||
};
|
||||
use super::{ResourceSyncTrait, SyncDeltas, ToUpdateItem};
|
||||
|
||||
/// Gets all the resources to update. For use in sync execution.
|
||||
pub async fn get_updates_for_execution<
|
||||
@@ -25,7 +23,6 @@ pub async fn get_updates_for_execution<
|
||||
>(
|
||||
resources: Vec<ResourceToml<Resource::PartialConfig>>,
|
||||
delete: bool,
|
||||
all_resources: &AllResourcesById,
|
||||
match_resource_type: Option<ResourceTargetVariant>,
|
||||
match_resources: Option<&[String]>,
|
||||
id_to_tags: &HashMap<String, Tag>,
|
||||
@@ -86,7 +83,6 @@ pub async fn get_updates_for_execution<
|
||||
let mut diff = Resource::get_diff(
|
||||
original.config.clone(),
|
||||
resource.config,
|
||||
all_resources,
|
||||
)?;
|
||||
|
||||
Resource::validate_diff(&mut diff);
|
||||
|
||||
@@ -3,16 +3,6 @@ use std::{collections::HashMap, str::FromStr};
|
||||
use anyhow::anyhow;
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant,
|
||||
action::Action,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
toml::{ResourceToml, ResourcesToml},
|
||||
};
|
||||
@@ -105,7 +95,6 @@ pub trait ResourceSyncTrait: ToToml + Sized {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff>;
|
||||
|
||||
/// Apply any changes to computed config diff
|
||||
@@ -155,71 +144,6 @@ pub fn include_resource_by_resource_type_and_name<
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AllResourcesById {
|
||||
pub servers: HashMap<String, Server>,
|
||||
pub deployments: HashMap<String, Deployment>,
|
||||
pub stacks: HashMap<String, Stack>,
|
||||
pub builds: HashMap<String, Build>,
|
||||
pub repos: HashMap<String, Repo>,
|
||||
pub procedures: HashMap<String, Procedure>,
|
||||
pub actions: HashMap<String, Action>,
|
||||
pub builders: HashMap<String, Builder>,
|
||||
pub alerters: HashMap<String, Alerter>,
|
||||
pub syncs: HashMap<String, ResourceSync>,
|
||||
}
|
||||
|
||||
impl AllResourcesById {
|
||||
/// Use `match_tags` to filter resources by tag.
|
||||
pub async fn load() -> anyhow::Result<Self> {
|
||||
let map = HashMap::new();
|
||||
let id_to_tags = ↦
|
||||
let match_tags = &[];
|
||||
Ok(Self {
|
||||
servers: crate::resource::get_id_to_resource_map::<Server>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
deployments: crate::resource::get_id_to_resource_map::<
|
||||
Deployment,
|
||||
>(id_to_tags, match_tags)
|
||||
.await?,
|
||||
builds: crate::resource::get_id_to_resource_map::<Build>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
repos: crate::resource::get_id_to_resource_map::<Repo>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
procedures:
|
||||
crate::resource::get_id_to_resource_map::<Procedure>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
actions: crate::resource::get_id_to_resource_map::<Action>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
builders: crate::resource::get_id_to_resource_map::<Builder>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
alerters: crate::resource::get_id_to_resource_map::<Alerter>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
syncs: crate::resource::get_id_to_resource_map::<ResourceSync>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
stacks: crate::resource::get_id_to_resource_map::<Stack>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn deserialize_resources_toml(
|
||||
toml_str: &str,
|
||||
) -> anyhow::Result<ResourcesToml> {
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use git::GitRes;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs,
|
||||
repo::Repo,
|
||||
sync::{ResourceSync, SyncFileContents},
|
||||
to_path_compatible_name,
|
||||
toml::ResourcesToml,
|
||||
@@ -24,79 +25,49 @@ pub struct RemoteResources {
|
||||
/// Use `match_tags` to filter resources by tag.
|
||||
pub async fn get_remote_resources(
|
||||
sync: &ResourceSync,
|
||||
repo: Option<&Repo>,
|
||||
) -> anyhow::Result<RemoteResources> {
|
||||
if sync.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
let root_path = core_config()
|
||||
.sync_directory
|
||||
.join(to_path_compatible_name(&sync.name));
|
||||
let (mut logs, mut files, mut file_errors) =
|
||||
(Vec::new(), Vec::new(), Vec::new());
|
||||
let resources = super::file::read_resources(
|
||||
&root_path,
|
||||
&sync.config.resource_path,
|
||||
&sync.config.match_tags,
|
||||
&mut logs,
|
||||
&mut files,
|
||||
&mut file_errors,
|
||||
);
|
||||
return Ok(RemoteResources {
|
||||
resources,
|
||||
files,
|
||||
file_errors,
|
||||
logs,
|
||||
hash: None,
|
||||
message: None,
|
||||
});
|
||||
} else if sync.config.repo.is_empty() {
|
||||
// ==========
|
||||
// UI DEFINED
|
||||
// ==========
|
||||
let mut resources = ResourcesToml::default();
|
||||
let resources = if !sync.config.file_contents.is_empty() {
|
||||
super::deserialize_resources_toml(&sync.config.file_contents)
|
||||
.context("failed to parse resource file contents")
|
||||
.map(|more| {
|
||||
extend_resources(
|
||||
&mut resources,
|
||||
more,
|
||||
&sync.config.match_tags,
|
||||
);
|
||||
resources
|
||||
})
|
||||
} else {
|
||||
Ok(resources)
|
||||
};
|
||||
|
||||
return Ok(RemoteResources {
|
||||
resources,
|
||||
files: vec![SyncFileContents {
|
||||
resource_path: String::new(),
|
||||
path: "database file".to_string(),
|
||||
contents: sync.config.file_contents.clone(),
|
||||
}],
|
||||
file_errors: vec![],
|
||||
logs: vec![Log::simple(
|
||||
"Read from database",
|
||||
"Resources added from database file".to_string(),
|
||||
)],
|
||||
hash: None,
|
||||
message: None,
|
||||
});
|
||||
get_files_on_host(sync).await
|
||||
} else if let Some(repo) = repo {
|
||||
get_repo(sync, repo.into()).await
|
||||
} else if !sync.config.repo.is_empty() {
|
||||
get_repo(sync, sync.into()).await
|
||||
} else {
|
||||
get_ui_defined(sync).await
|
||||
}
|
||||
}
|
||||
|
||||
// ===============
|
||||
// REPO BASED SYNC
|
||||
// ===============
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!("No sync files configured"));
|
||||
}
|
||||
|
||||
let mut clone_args: CloneArgs = sync.into();
|
||||
async fn get_files_on_host(
|
||||
sync: &ResourceSync,
|
||||
) -> anyhow::Result<RemoteResources> {
|
||||
let root_path = core_config()
|
||||
.sync_directory
|
||||
.join(to_path_compatible_name(&sync.name));
|
||||
let (mut logs, mut files, mut file_errors) =
|
||||
(Vec::new(), Vec::new(), Vec::new());
|
||||
let resources = super::file::read_resources(
|
||||
&root_path,
|
||||
&sync.config.resource_path,
|
||||
&sync.config.match_tags,
|
||||
&mut logs,
|
||||
&mut files,
|
||||
&mut file_errors,
|
||||
);
|
||||
Ok(RemoteResources {
|
||||
resources,
|
||||
files,
|
||||
file_errors,
|
||||
logs,
|
||||
hash: None,
|
||||
message: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_repo(
|
||||
sync: &ResourceSync,
|
||||
mut clone_args: CloneArgs,
|
||||
) -> anyhow::Result<RemoteResources> {
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
@@ -156,3 +127,36 @@ pub async fn get_remote_resources(
|
||||
message,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_ui_defined(
|
||||
sync: &ResourceSync,
|
||||
) -> anyhow::Result<RemoteResources> {
|
||||
let mut resources = ResourcesToml::default();
|
||||
let resources =
|
||||
super::deserialize_resources_toml(&sync.config.file_contents)
|
||||
.context("failed to parse resource file contents")
|
||||
.map(|more| {
|
||||
extend_resources(
|
||||
&mut resources,
|
||||
more,
|
||||
&sync.config.match_tags,
|
||||
);
|
||||
resources
|
||||
});
|
||||
|
||||
Ok(RemoteResources {
|
||||
resources,
|
||||
files: vec![SyncFileContents {
|
||||
resource_path: String::new(),
|
||||
path: "database file".to_string(),
|
||||
contents: sync.config.file_contents.clone(),
|
||||
}],
|
||||
file_errors: vec![],
|
||||
logs: vec![Log::simple(
|
||||
"Read from database",
|
||||
"Resources added from database file".to_string(),
|
||||
)],
|
||||
hash: None,
|
||||
message: None,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use partial_derive2::{MaybeNone, PartialDiff};
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
resource::KomodoResource,
|
||||
state::all_resources_cache,
|
||||
sync::{
|
||||
ToUpdateItem,
|
||||
execute::{run_update_description, run_update_tags},
|
||||
@@ -32,8 +33,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use super::{
|
||||
AllResourcesById, ResourceSyncTrait, SyncDeltas,
|
||||
execute::ExecuteResourceSync,
|
||||
ResourceSyncTrait, SyncDeltas, execute::ExecuteResourceSync,
|
||||
include_resource_by_resource_type_and_name,
|
||||
include_resource_by_tags,
|
||||
};
|
||||
@@ -42,7 +42,6 @@ impl ResourceSyncTrait for Server {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
@@ -54,8 +53,8 @@ impl ResourceSyncTrait for Deployment {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
// need to replace the server id with name
|
||||
original.server_id = resources
|
||||
.servers
|
||||
@@ -87,14 +86,20 @@ impl ResourceSyncTrait for Stack {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
// Need to replace server id with name
|
||||
original.server_id = resources
|
||||
.servers
|
||||
.get(&original.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
// Replace linked repo with name
|
||||
original.linked_repo = resources
|
||||
.repos
|
||||
.get(&original.linked_repo)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
@@ -106,13 +111,18 @@ impl ResourceSyncTrait for Build {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
original.builder_id = resources
|
||||
.builders
|
||||
.get(&original.builder_id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
original.linked_repo = resources
|
||||
.repos
|
||||
.get(&original.linked_repo)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
@@ -135,8 +145,8 @@ impl ResourceSyncTrait for Repo {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
// Need to replace server id with name
|
||||
original.server_id = resources
|
||||
.servers
|
||||
@@ -161,7 +171,6 @@ impl ResourceSyncTrait for Alerter {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
@@ -173,10 +182,10 @@ impl ResourceSyncTrait for Builder {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace server builder id with name
|
||||
if let BuilderConfig::Server(config) = &mut original {
|
||||
let resources = all_resources_cache().load();
|
||||
config.server_id = resources
|
||||
.servers
|
||||
.get(&config.server_id)
|
||||
@@ -194,7 +203,6 @@ impl ResourceSyncTrait for Action {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
@@ -228,13 +236,14 @@ impl ResourceSyncTrait for ResourceSync {
|
||||
if contents_empty
|
||||
&& !config.files_on_host
|
||||
&& config.repo.is_empty()
|
||||
&& config.linked_repo.is_empty()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// The file contents MUST be empty
|
||||
contents_empty &&
|
||||
// The sync must be files on host mode OR git repo mode
|
||||
(config.files_on_host || !config.repo.is_empty())
|
||||
(config.files_on_host || !config.repo.is_empty() || !config.linked_repo.is_empty())
|
||||
}
|
||||
|
||||
fn include_resource_partial(
|
||||
@@ -267,20 +276,31 @@ impl ResourceSyncTrait for ResourceSync {
|
||||
if contents_empty
|
||||
&& !files_on_host
|
||||
&& config.repo.as_ref().map(String::is_empty).unwrap_or(true)
|
||||
&& config
|
||||
.linked_repo
|
||||
.as_ref()
|
||||
.map(String::is_empty)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// The file contents MUST be empty
|
||||
contents_empty &&
|
||||
// The sync must be files on host mode OR git repo mode
|
||||
(files_on_host || !config.repo.as_deref().unwrap_or_default().is_empty())
|
||||
(files_on_host || !config.repo.as_deref().unwrap_or_default().is_empty() || !config.linked_repo.as_deref().unwrap_or_default().is_empty())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
original.linked_repo = resources
|
||||
.repos
|
||||
.get(&original.linked_repo)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
@@ -291,8 +311,8 @@ impl ResourceSyncTrait for Procedure {
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
let resources = all_resources_cache().load();
|
||||
for stage in &mut original.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
|
||||
@@ -22,9 +22,7 @@ use komodo_client::{
|
||||
};
|
||||
use partial_derive2::{MaybeNone, PartialDiff};
|
||||
|
||||
use crate::resource::KomodoResource;
|
||||
|
||||
use super::AllResourcesById;
|
||||
use crate::{resource::KomodoResource, state::all_resources_cache};
|
||||
|
||||
pub const TOML_PRETTY_OPTIONS: toml_pretty::Options =
|
||||
toml_pretty::Options {
|
||||
@@ -36,10 +34,7 @@ pub const TOML_PRETTY_OPTIONS: toml_pretty::Options =
|
||||
|
||||
pub trait ToToml: KomodoResource {
|
||||
/// Replace linked ids (server_id, build_id, etc) with the resource name.
|
||||
fn replace_ids(
|
||||
_resource: &mut Resource<Self::Config, Self::Info>,
|
||||
_all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(_resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
}
|
||||
|
||||
fn edit_config_object(
|
||||
@@ -108,10 +103,9 @@ pub fn resource_push_to_toml<R: ToToml>(
|
||||
deploy: bool,
|
||||
after: Vec<String>,
|
||||
toml: &mut String,
|
||||
all: &AllResourcesById,
|
||||
all_tags: &HashMap<String, Tag>,
|
||||
) -> anyhow::Result<()> {
|
||||
R::replace_ids(&mut resource, all);
|
||||
R::replace_ids(&mut resource);
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
@@ -128,12 +122,11 @@ pub fn resource_to_toml<R: ToToml>(
|
||||
resource: Resource<R::Config, R::Info>,
|
||||
deploy: bool,
|
||||
after: Vec<String>,
|
||||
all: &AllResourcesById,
|
||||
all_tags: &HashMap<String, Tag>,
|
||||
) -> anyhow::Result<String> {
|
||||
let mut toml = String::new();
|
||||
resource_push_to_toml::<R>(
|
||||
resource, deploy, after, &mut toml, all, all_tags,
|
||||
resource, deploy, after, &mut toml, all_tags,
|
||||
)?;
|
||||
Ok(toml)
|
||||
}
|
||||
@@ -163,14 +156,24 @@ pub fn convert_resource<R: KomodoResource>(
|
||||
// These have no linked resource ids to replace
|
||||
impl ToToml for Alerter {}
|
||||
impl ToToml for Server {}
|
||||
impl ToToml for ResourceSync {}
|
||||
impl ToToml for Action {}
|
||||
|
||||
impl ToToml for ResourceSync {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
resource.config.linked_repo.clone_from(
|
||||
all
|
||||
.repos
|
||||
.get(&resource.config.linked_repo)
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl ToToml for Stack {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
resource.config.server_id.clone_from(
|
||||
all
|
||||
.servers
|
||||
@@ -178,6 +181,13 @@ impl ToToml for Stack {
|
||||
.map(|s| &s.name)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
resource.config.linked_repo.clone_from(
|
||||
all
|
||||
.repos
|
||||
.get(&resource.config.linked_repo)
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
|
||||
fn edit_config_object(
|
||||
@@ -199,10 +209,8 @@ impl ToToml for Stack {
|
||||
}
|
||||
|
||||
impl ToToml for Deployment {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
resource.config.server_id.clone_from(
|
||||
all
|
||||
.servers
|
||||
@@ -263,10 +271,8 @@ impl ToToml for Deployment {
|
||||
}
|
||||
|
||||
impl ToToml for Build {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
resource.config.builder_id.clone_from(
|
||||
all
|
||||
.builders
|
||||
@@ -274,6 +280,13 @@ impl ToToml for Build {
|
||||
.map(|s| &s.name)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
resource.config.linked_repo.clone_from(
|
||||
all
|
||||
.repos
|
||||
.get(&resource.config.linked_repo)
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
|
||||
fn edit_config_object(
|
||||
@@ -308,10 +321,8 @@ impl ToToml for Build {
|
||||
}
|
||||
|
||||
impl ToToml for Repo {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
resource.config.server_id.clone_from(
|
||||
all
|
||||
.servers
|
||||
@@ -349,11 +360,9 @@ impl ToToml for Repo {
|
||||
}
|
||||
|
||||
impl ToToml for Builder {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
if let BuilderConfig::Server(config) = &mut resource.config {
|
||||
let all = all_resources_cache().load();
|
||||
config.server_id.clone_from(
|
||||
all
|
||||
.servers
|
||||
@@ -382,10 +391,8 @@ impl ToToml for Builder {
|
||||
}
|
||||
|
||||
impl ToToml for Procedure {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
all: &AllResourcesById,
|
||||
) {
|
||||
fn replace_ids(resource: &mut Resource<Self::Config, Self::Info>) {
|
||||
let all = all_resources_cache().load();
|
||||
for stage in &mut resource.config.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
|
||||
@@ -34,10 +34,10 @@ use serde::Serialize;
|
||||
use crate::{
|
||||
api::{read::ReadArgs, write::WriteArgs},
|
||||
helpers::matcher::Matcher,
|
||||
state::db_client,
|
||||
state::{all_resources_cache, db_client},
|
||||
};
|
||||
|
||||
use super::{AllResourcesById, toml::TOML_PRETTY_OPTIONS};
|
||||
use super::toml::TOML_PRETTY_OPTIONS;
|
||||
|
||||
/// Used to serialize user group
|
||||
#[derive(Serialize)]
|
||||
@@ -141,14 +141,12 @@ pub struct DeleteItem {
|
||||
pub async fn get_updates_for_view(
|
||||
user_groups: Vec<UserGroupToml>,
|
||||
delete: bool,
|
||||
all_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Vec<DiffData>> {
|
||||
let _curr = find_collect(&db_client().user_groups, None, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?;
|
||||
let mut curr = Vec::with_capacity(_curr.capacity());
|
||||
convert_user_groups(_curr.into_iter(), all_resources, &mut curr)
|
||||
.await?;
|
||||
convert_user_groups(_curr.into_iter(), &mut curr).await?;
|
||||
let map = curr
|
||||
.into_iter()
|
||||
.map(|ug| (ug.1.name.clone(), ug))
|
||||
@@ -175,17 +173,15 @@ pub async fn get_updates_for_view(
|
||||
.permissions
|
||||
.retain(|p| p.level > PermissionLevel::None);
|
||||
|
||||
user_group.permissions = expand_user_group_permissions(
|
||||
user_group.permissions,
|
||||
all_resources,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to expand user group {} permissions",
|
||||
user_group.name
|
||||
)
|
||||
})?;
|
||||
user_group.permissions =
|
||||
expand_user_group_permissions(user_group.permissions)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to expand user group {} permissions",
|
||||
user_group.name
|
||||
)
|
||||
})?;
|
||||
|
||||
let (_original_id, original) =
|
||||
match map.get(&user_group.name).cloned() {
|
||||
@@ -229,7 +225,6 @@ pub async fn get_updates_for_view(
|
||||
pub async fn get_updates_for_execution(
|
||||
user_groups: Vec<UserGroupToml>,
|
||||
delete: bool,
|
||||
all_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<(
|
||||
Vec<UserGroupToml>,
|
||||
Vec<UpdateItem>,
|
||||
@@ -285,17 +280,15 @@ pub async fn get_updates_for_execution(
|
||||
.permissions
|
||||
.retain(|p| p.level > PermissionLevel::None);
|
||||
|
||||
user_group.permissions = expand_user_group_permissions(
|
||||
user_group.permissions,
|
||||
all_resources,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to expand user group {} permissions",
|
||||
user_group.name
|
||||
)
|
||||
})?;
|
||||
user_group.permissions =
|
||||
expand_user_group_permissions(user_group.permissions)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to expand user group {} permissions",
|
||||
user_group.name
|
||||
)
|
||||
})?;
|
||||
|
||||
let original = match map.get(&user_group.name).cloned() {
|
||||
Some(original) => original,
|
||||
@@ -313,6 +306,8 @@ pub async fn get_updates_for_execution(
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let all_resources = all_resources_cache().load();
|
||||
|
||||
let mut original_permissions = (ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(original.id),
|
||||
})
|
||||
@@ -789,10 +784,10 @@ async fn run_update_permissions(
|
||||
/// Expands any regex defined targets into the full list
|
||||
async fn expand_user_group_permissions(
|
||||
permissions: Vec<PermissionToml>,
|
||||
all_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Vec<PermissionToml>> {
|
||||
let mut expanded =
|
||||
Vec::<PermissionToml>::with_capacity(permissions.capacity());
|
||||
let all_resources = all_resources_cache().load();
|
||||
|
||||
for permission in permissions {
|
||||
let (variant, id) = permission.target.extract_variant_id();
|
||||
@@ -1012,7 +1007,6 @@ fn specific_equal(
|
||||
|
||||
pub async fn convert_user_groups(
|
||||
user_groups: impl Iterator<Item = UserGroup>,
|
||||
all: &AllResourcesById,
|
||||
res: &mut Vec<(String, UserGroupToml)>,
|
||||
) -> anyhow::Result<()> {
|
||||
let db = db_client();
|
||||
@@ -1023,6 +1017,8 @@ pub async fn convert_user_groups(
|
||||
.map(|user| (user.id, user.username))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let all = all_resources_cache().load();
|
||||
|
||||
for mut user_group in user_groups {
|
||||
user_group.all.retain(|_, p| {
|
||||
p.level > PermissionLevel::None || !p.specific.is_empty()
|
||||
|
||||
@@ -10,13 +10,12 @@ use komodo_client::entities::{
|
||||
use mungos::find::find_collect;
|
||||
use partial_derive2::MaybeNone;
|
||||
|
||||
use super::{AllResourcesById, ResourceSyncTrait};
|
||||
use super::ResourceSyncTrait;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
|
||||
resources: Vec<ResourceToml<Resource::PartialConfig>>,
|
||||
delete: bool,
|
||||
all_resources: &AllResourcesById,
|
||||
match_resource_type: Option<ResourceTargetVariant>,
|
||||
match_resources: Option<&[String]>,
|
||||
id_to_tags: &HashMap<String, Tag>,
|
||||
@@ -68,7 +67,6 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
|
||||
current_resource.clone(),
|
||||
false,
|
||||
vec![],
|
||||
all_resources,
|
||||
id_to_tags,
|
||||
)?,
|
||||
},
|
||||
@@ -97,7 +95,6 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
|
||||
let mut diff = Resource::get_diff(
|
||||
current_resource.config.clone(),
|
||||
proposed_resource.config,
|
||||
all_resources,
|
||||
)?;
|
||||
|
||||
Resource::validate_diff(&mut diff);
|
||||
@@ -127,7 +124,6 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
|
||||
current_resource.clone(),
|
||||
proposed_resource.deploy,
|
||||
proposed_resource.after,
|
||||
all_resources,
|
||||
id_to_tags,
|
||||
)?,
|
||||
proposed,
|
||||
|
||||
@@ -23,6 +23,8 @@ const ALLOWED_FILES: &[&str] = &[
|
||||
"types.d.ts",
|
||||
"responses.js",
|
||||
"responses.d.ts",
|
||||
"terminal.js",
|
||||
"terminal.d.ts",
|
||||
];
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
||||
@@ -10,7 +10,10 @@ use komodo_client::{
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{permission::get_check_permissions, resource::get};
|
||||
use crate::{
|
||||
permission::get_check_permissions, resource::get,
|
||||
state::stack_status_cache,
|
||||
};
|
||||
|
||||
#[instrument(name = "ConnectStackExec", skip(ws))]
|
||||
pub async fn terminal(
|
||||
@@ -58,16 +61,35 @@ pub async fn terminal(
|
||||
}
|
||||
};
|
||||
|
||||
let services = stack
|
||||
.info
|
||||
.deployed_services
|
||||
.unwrap_or(stack.info.latest_services);
|
||||
let Some(status) = stack_status_cache().get(&stack.id).await
|
||||
else {
|
||||
debug!("could not get stack status");
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!(
|
||||
"ERROR: could not get stack status"
|
||||
)))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
};
|
||||
|
||||
let container = match services
|
||||
.into_iter()
|
||||
.find(|s| s.service_name == service)
|
||||
let container = match status
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.map(|s| s.container.as_ref())
|
||||
{
|
||||
Some(service) => service.container_name,
|
||||
Some(Some(container)) => container.name.clone(),
|
||||
Some(None) => {
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!(
|
||||
"ERROR: Service {service} container could not be found"
|
||||
)))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
None => {
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!(
|
||||
|
||||
@@ -14,7 +14,7 @@ use komodo_client::{
|
||||
EnvironmentVar, Version,
|
||||
build::{Build, BuildConfig},
|
||||
environment_vars_from_str, get_image_name, optional_string,
|
||||
to_docker_compatible_name, to_path_compatible_name,
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
},
|
||||
parsers::QUOTE_PATTERN,
|
||||
@@ -124,6 +124,7 @@ impl Resolve<super::Args> for build::Build {
|
||||
) -> serror::Result<Vec<Log>> {
|
||||
let build::Build {
|
||||
build,
|
||||
repo: linked_repo,
|
||||
registry_token,
|
||||
additional_tags,
|
||||
replacers: mut core_replacers,
|
||||
@@ -152,7 +153,11 @@ impl Resolve<super::Args> for build::Build {
|
||||
..
|
||||
} = &build;
|
||||
|
||||
if !*files_on_host && repo.is_empty() && dockerfile.is_empty() {
|
||||
if !*files_on_host
|
||||
&& repo.is_empty()
|
||||
&& linked_repo.is_none()
|
||||
&& dockerfile.is_empty()
|
||||
{
|
||||
return Err(anyhow!("Build must be files on host mode, have a repo attached, or have dockerfile contents set to build").into());
|
||||
}
|
||||
|
||||
@@ -178,15 +183,29 @@ impl Resolve<super::Args> for build::Build {
|
||||
}
|
||||
};
|
||||
|
||||
let name = to_docker_compatible_name(name);
|
||||
let build_path = if let Some(repo) = &linked_repo {
|
||||
periphery_config()
|
||||
.repo_dir()
|
||||
.join(to_path_compatible_name(&repo.name))
|
||||
.join(build_path)
|
||||
} else {
|
||||
periphery_config()
|
||||
.build_dir()
|
||||
.join(to_path_compatible_name(&name))
|
||||
.join(build_path)
|
||||
}
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let build_path =
|
||||
periphery_config().build_dir().join(&name).join(build_path);
|
||||
let dockerfile_path = optional_string(dockerfile_path)
|
||||
.unwrap_or("Dockerfile".to_owned());
|
||||
|
||||
// Write UI defined Dockerfile to host
|
||||
if !*files_on_host && repo.is_empty() && !dockerfile.is_empty() {
|
||||
if !*files_on_host
|
||||
&& repo.is_empty()
|
||||
&& linked_repo.is_none()
|
||||
&& !dockerfile.is_empty()
|
||||
{
|
||||
let dockerfile = if *skip_secret_interp {
|
||||
dockerfile.to_string()
|
||||
} else {
|
||||
@@ -200,13 +219,13 @@ impl Resolve<super::Args> for build::Build {
|
||||
dockerfile
|
||||
};
|
||||
|
||||
let full_path = build_path
|
||||
let full_dockerfile_path = build_path
|
||||
.join(&dockerfile_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = full_path.parent() {
|
||||
if let Some(parent) = full_dockerfile_path.parent() {
|
||||
if !parent.exists() {
|
||||
tokio::fs::create_dir_all(parent)
|
||||
.await
|
||||
@@ -214,15 +233,17 @@ impl Resolve<super::Args> for build::Build {
|
||||
}
|
||||
}
|
||||
|
||||
fs::write(&full_path, dockerfile).await.with_context(|| {
|
||||
fs::write(&full_dockerfile_path, dockerfile).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to write dockerfile contents to {full_path:?}"
|
||||
"Failed to write dockerfile contents to {full_dockerfile_path:?}"
|
||||
)
|
||||
})?;
|
||||
|
||||
logs.push(Log::simple(
|
||||
"Write Dockerfile",
|
||||
format!("Dockerfile contents written to {full_path:?}"),
|
||||
format!(
|
||||
"Dockerfile contents written to {full_dockerfile_path:?}"
|
||||
),
|
||||
));
|
||||
};
|
||||
|
||||
|
||||
@@ -14,7 +14,11 @@ use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
compose::{WriteStackRes, compose_up, docker_compose, write_stack},
|
||||
compose::{
|
||||
docker_compose,
|
||||
up::compose_up,
|
||||
write::{WriteStackRes, write_stack},
|
||||
},
|
||||
config::periphery_config,
|
||||
docker::docker_login,
|
||||
helpers::{log_grep, pull_or_clone_stack},
|
||||
@@ -240,13 +244,15 @@ impl Resolve<super::Args> for WriteCommitComposeContents {
|
||||
) -> serror::Result<RepoActionResponse> {
|
||||
let WriteCommitComposeContents {
|
||||
stack,
|
||||
repo,
|
||||
username,
|
||||
file_path,
|
||||
contents,
|
||||
git_token,
|
||||
} = self;
|
||||
|
||||
let root = pull_or_clone_stack(&stack, git_token).await?;
|
||||
let root =
|
||||
pull_or_clone_stack(&stack, repo.as_ref(), git_token).await?;
|
||||
|
||||
let file_path = stack
|
||||
.config
|
||||
@@ -263,6 +269,7 @@ impl Resolve<super::Args> for WriteCommitComposeContents {
|
||||
|
||||
let GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
..
|
||||
@@ -277,6 +284,7 @@ impl Resolve<super::Args> for WriteCommitComposeContents {
|
||||
|
||||
Ok(RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path: None,
|
||||
@@ -308,13 +316,16 @@ impl Resolve<super::Args> for ComposePull {
|
||||
let ComposePull {
|
||||
stack,
|
||||
services,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
} = self;
|
||||
let mut res = ComposePullResponse::default();
|
||||
|
||||
let (run_directory, env_file_path, _replacers) =
|
||||
match write_stack(&stack, git_token, &mut res).await {
|
||||
match write_stack(&stack, repo.as_ref(), git_token, &mut res)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
res.logs.push(Log::error(
|
||||
@@ -430,6 +441,7 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
let ComposeUp {
|
||||
stack,
|
||||
services,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers,
|
||||
@@ -438,6 +450,7 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
if let Err(e) = compose_up(
|
||||
stack,
|
||||
services,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
&mut res,
|
||||
|
||||
@@ -3,7 +3,6 @@ use command::run_komodo_command;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::entities::{
|
||||
docker::container::{Container, ContainerListItem, ContainerStats},
|
||||
to_docker_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::container::*;
|
||||
@@ -234,8 +233,7 @@ impl Resolve<super::Args> for RenameContainer {
|
||||
curr_name,
|
||||
new_name,
|
||||
} = self;
|
||||
let new = to_docker_compatible_name(&new_name);
|
||||
let command = format!("docker rename {curr_name} {new}");
|
||||
let command = format!("docker rename {curr_name} {new_name}");
|
||||
Ok(run_komodo_command("Docker Rename", None, command).await)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use komodo_client::{
|
||||
Conversion, Deployment, DeploymentConfig, DeploymentImage,
|
||||
RestartMode, conversions_from_str, extract_registry_domain,
|
||||
},
|
||||
environment_vars_from_str, to_docker_compatible_name,
|
||||
environment_vars_from_str,
|
||||
update::Log,
|
||||
},
|
||||
parsers::QUOTE_PATTERN,
|
||||
@@ -129,7 +129,6 @@ fn docker_run_command(
|
||||
}: &Deployment,
|
||||
image: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let name = to_docker_compatible_name(name);
|
||||
let ports = parse_conversions(
|
||||
&conversions_from_str(ports).context("Invalid ports")?,
|
||||
"-p",
|
||||
|
||||
@@ -82,12 +82,14 @@ impl Resolve<super::Args> for CloneRepo {
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
@@ -152,12 +154,14 @@ impl Resolve<super::Args> for PullRepo {
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
@@ -222,12 +226,14 @@ impl Resolve<super::Args> for PullOrCloneRepo {
|
||||
.map(
|
||||
|GitRes {
|
||||
logs,
|
||||
path,
|
||||
hash,
|
||||
message,
|
||||
env_file_path,
|
||||
}| {
|
||||
RepoActionResponse {
|
||||
logs,
|
||||
path,
|
||||
commit_hash: hash,
|
||||
commit_message: message,
|
||||
env_file_path,
|
||||
|
||||
@@ -63,6 +63,7 @@ pub enum PeripheryRequest {
|
||||
// Repo (Write)
|
||||
CloneRepo(CloneRepo),
|
||||
PullRepo(PullRepo),
|
||||
PullOrCloneRepo(PullOrCloneRepo),
|
||||
RenameRepo(RenameRepo),
|
||||
DeleteRepo(DeleteRepo),
|
||||
|
||||
|
||||
@@ -36,6 +36,10 @@ pub fn router() -> Router {
|
||||
"/execute",
|
||||
Router::new()
|
||||
.route("/", post(super::terminal::execute_terminal))
|
||||
.route(
|
||||
"/container",
|
||||
post(super::terminal::execute_container_exec),
|
||||
)
|
||||
.layer(middleware::from_fn(guard_request_by_passkey)),
|
||||
),
|
||||
)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::{collections::HashMap, sync::OnceLock, task::Poll};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
extract::{
|
||||
@@ -10,16 +8,12 @@ use axum::{
|
||||
response::Response,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::{SinkExt, Stream, StreamExt, TryStreamExt};
|
||||
use futures::{SinkExt, StreamExt, TryStreamExt};
|
||||
use komodo_client::{
|
||||
api::write::TerminalRecreateMode,
|
||||
entities::{
|
||||
KOMODO_EXIT_CODE, NoData, komodo_timestamp, server::TerminalInfo,
|
||||
},
|
||||
entities::{KOMODO_EXIT_CODE, NoData, server::TerminalInfo},
|
||||
};
|
||||
use periphery_client::api::terminal::*;
|
||||
use pin_project_lite::pin_project;
|
||||
use rand::Rng;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCodeError, Json};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -81,53 +75,6 @@ impl Resolve<super::Args> for CreateTerminalAuthToken {
|
||||
}
|
||||
}
|
||||
|
||||
/// Tokens valid for 3 seconds
|
||||
const TOKEN_VALID_FOR_MS: i64 = 3_000;
|
||||
|
||||
fn auth_tokens() -> &'static AuthTokens {
|
||||
static AUTH_TOKENS: OnceLock<AuthTokens> = OnceLock::new();
|
||||
AUTH_TOKENS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct AuthTokens {
|
||||
map: std::sync::Mutex<HashMap<String, i64>>,
|
||||
}
|
||||
|
||||
impl AuthTokens {
|
||||
pub fn create_auth_token(&self) -> String {
|
||||
let mut lock = self.map.lock().unwrap();
|
||||
// clear out any old tokens here (prevent unbounded growth)
|
||||
let ts = komodo_timestamp();
|
||||
lock.retain(|_, valid_until| *valid_until > ts);
|
||||
let token: String = rand::rng()
|
||||
.sample_iter(&rand::distr::Alphanumeric)
|
||||
.take(30)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
lock.insert(token.clone(), ts + TOKEN_VALID_FOR_MS);
|
||||
token
|
||||
}
|
||||
|
||||
pub fn check_token(&self, token: String) -> serror::Result<()> {
|
||||
let Some(valid_until) = self.map.lock().unwrap().remove(&token)
|
||||
else {
|
||||
return Err(
|
||||
anyhow!("Terminal auth token not found")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
};
|
||||
if komodo_timestamp() <= valid_until {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(
|
||||
anyhow!("Terminal token is expired")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn connect_terminal(
|
||||
Query(query): Query<ConnectTerminalQuery>,
|
||||
ws: WebSocketUpgrade,
|
||||
@@ -336,10 +283,6 @@ async fn handle_terminal_websocket(
|
||||
}))
|
||||
}
|
||||
|
||||
/// Sentinels
|
||||
const START_OF_OUTPUT: &str = "__KOMODO_START_OF_OUTPUT__";
|
||||
const END_OF_OUTPUT: &str = "__KOMODO_END_OF_OUTPUT__";
|
||||
|
||||
pub async fn execute_terminal(
|
||||
Json(ExecuteTerminalBody { terminal, command }): Json<
|
||||
ExecuteTerminalBody,
|
||||
@@ -352,7 +295,47 @@ pub async fn execute_terminal(
|
||||
);
|
||||
}
|
||||
|
||||
let terminal = get_terminal(&terminal).await?;
|
||||
execute_command_on_terminal(&terminal, &command).await
|
||||
}
|
||||
|
||||
pub async fn execute_container_exec(
|
||||
Json(ExecuteContainerExecBody {
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
}): Json<ExecuteContainerExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
if periphery_config().disable_container_exec {
|
||||
return Err(
|
||||
anyhow!("Container exec is disabled in the periphery config")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if container.contains("&&") || shell.contains("&&") {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"The use of '&&' is forbidden in the container name or shell"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
// Create terminal (recreate if shell changed)
|
||||
create_terminal(
|
||||
container.clone(),
|
||||
format!("docker exec -it {container} {shell}"),
|
||||
TerminalRecreateMode::DifferentCommand,
|
||||
)
|
||||
.await
|
||||
.context("Failed to create terminal for container exec")?;
|
||||
|
||||
execute_command_on_terminal(&container, &command).await
|
||||
}
|
||||
|
||||
async fn execute_command_on_terminal(
|
||||
terminal_name: &str,
|
||||
command: &str,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
let terminal = get_terminal(terminal_name).await?;
|
||||
|
||||
// Read the bytes into lines
|
||||
// This is done to check the lines for the EOF sentinal
|
||||
@@ -399,43 +382,3 @@ pub async fn execute_terminal(
|
||||
|
||||
Ok(axum::body::Body::from_stream(TerminalStream { stdout }))
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
struct TerminalStream<S> { #[pin] stdout: S }
|
||||
}
|
||||
|
||||
impl<S> Stream for TerminalStream<S>
|
||||
where
|
||||
S:
|
||||
Stream<Item = Result<String, tokio_util::codec::LinesCodecError>>,
|
||||
{
|
||||
// Axum expects a stream of results
|
||||
type Item = Result<String, String>;
|
||||
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
let this = self.project();
|
||||
match this.stdout.poll_next(cx) {
|
||||
Poll::Ready(None) => {
|
||||
// This is if a None comes in before END_OF_OUTPUT.
|
||||
// This probably means the terminal has exited early,
|
||||
// and needs to be cleaned up
|
||||
tokio::spawn(async move { clean_up_terminals().await });
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(Some(line)) => {
|
||||
match line {
|
||||
Ok(line) if line.as_str() == END_OF_OUTPUT => {
|
||||
// Stop the stream on end sentinel
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Ok(line) => Poll::Ready(Some(Ok(line + "\n"))),
|
||||
Err(e) => Poll::Ready(Some(Err(format!("{e:?}")))),
|
||||
}
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
44
bin/periphery/src/compose/mod.rs
Normal file
44
bin/periphery/src/compose/mod.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use anyhow::anyhow;
|
||||
use command::run_komodo_command;
|
||||
use periphery_client::api::compose::ComposeUpResponse;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
|
||||
pub mod up;
|
||||
pub mod write;
|
||||
|
||||
pub fn docker_compose() -> &'static str {
|
||||
if periphery_config().legacy_compose_cli {
|
||||
"docker-compose"
|
||||
} else {
|
||||
"docker compose"
|
||||
}
|
||||
}
|
||||
|
||||
async fn compose_down(
|
||||
project: &str,
|
||||
services: &[String],
|
||||
res: &mut ComposeUpResponse,
|
||||
) -> anyhow::Result<()> {
|
||||
let docker_compose = docker_compose();
|
||||
let service_args = if services.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
let log = run_komodo_command(
|
||||
"Compose Down",
|
||||
None,
|
||||
format!("{docker_compose} -p {project} down{service_args}"),
|
||||
)
|
||||
.await;
|
||||
let success = log.success;
|
||||
res.logs.push(log);
|
||||
if !success {
|
||||
return Err(anyhow!(
|
||||
"Failed to bring down existing container(s) with docker compose down. Stopping run."
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -6,41 +6,30 @@ use command::{
|
||||
run_komodo_command_with_interpolation,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use git::environment;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, FileContents, all_logs_success,
|
||||
environment_vars_from_str,
|
||||
FileContents, all_logs_success,
|
||||
repo::Repo,
|
||||
stack::{
|
||||
ComposeFile, ComposeService, ComposeServiceDeploy, Stack,
|
||||
StackServiceNames,
|
||||
},
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::ComposeUpResponse,
|
||||
git::{CloneRepo, PullOrCloneRepo, RepoActionResponse},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use periphery_client::api::compose::ComposeUpResponse;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
config::periphery_config, docker::docker_login,
|
||||
helpers::parse_extra_args,
|
||||
compose::compose_down, config::periphery_config,
|
||||
docker::docker_login, helpers::parse_extra_args,
|
||||
};
|
||||
|
||||
pub fn docker_compose() -> &'static str {
|
||||
if periphery_config().legacy_compose_cli {
|
||||
"docker-compose"
|
||||
} else {
|
||||
"docker compose"
|
||||
}
|
||||
}
|
||||
use super::{docker_compose, write::write_stack};
|
||||
|
||||
/// If this fn returns Err, the caller of `compose_up` has to write result to the log before return.
|
||||
pub async fn compose_up(
|
||||
stack: Stack,
|
||||
services: Vec<String>,
|
||||
repo: Option<Repo>,
|
||||
git_token: Option<String>,
|
||||
registry_token: Option<String>,
|
||||
res: &mut ComposeUpResponse,
|
||||
@@ -50,7 +39,7 @@ pub async fn compose_up(
|
||||
// Will also set additional fields on the reponse.
|
||||
// Use the env_file_path in the compose command.
|
||||
let (run_directory, env_file_path, periphery_replacers) =
|
||||
write_stack(&stack, git_token, &mut *res)
|
||||
write_stack(&stack, repo.as_ref(), git_token, &mut *res)
|
||||
.await
|
||||
.context("Failed to write / clone compose file")?;
|
||||
|
||||
@@ -394,318 +383,3 @@ pub async fn compose_up(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub trait WriteStackRes {
|
||||
fn logs(&mut self) -> &mut Vec<Log>;
|
||||
fn add_remote_error(&mut self, _contents: FileContents) {}
|
||||
fn set_commit_hash(&mut self, _hash: Option<String>) {}
|
||||
fn set_commit_message(&mut self, _message: Option<String>) {}
|
||||
}
|
||||
|
||||
impl WriteStackRes for &mut ComposeUpResponse {
|
||||
fn logs(&mut self) -> &mut Vec<Log> {
|
||||
&mut self.logs
|
||||
}
|
||||
fn add_remote_error(&mut self, contents: FileContents) {
|
||||
self.remote_errors.push(contents);
|
||||
}
|
||||
fn set_commit_hash(&mut self, hash: Option<String>) {
|
||||
self.commit_hash = hash;
|
||||
}
|
||||
fn set_commit_message(&mut self, message: Option<String>) {
|
||||
self.commit_message = message;
|
||||
}
|
||||
}
|
||||
|
||||
/// Either writes the stack file_contents to a file, or clones the repo.
|
||||
/// Performs variable replacement on env and writes file.
|
||||
/// Returns (run_directory, env_file_path, periphery_replacers)
|
||||
pub async fn write_stack(
|
||||
stack: &Stack,
|
||||
git_token: Option<String>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
PathBuf,
|
||||
Option<&str>,
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name));
|
||||
let run_directory = root.join(&stack.config.run_directory);
|
||||
// This will remove any intermediate '/./' in the path, which is a problem for some OS.
|
||||
// Cannot use 'canonicalize' yet as directory may not exist.
|
||||
let run_directory = run_directory.components().collect::<PathBuf>();
|
||||
|
||||
let (env_interpolated, env_replacers) =
|
||||
if stack.config.skip_secret_interp {
|
||||
(stack.config.environment.clone(), None)
|
||||
} else {
|
||||
let (environment, replacers) = svi::interpolate_variables(
|
||||
&stack.config.environment,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context(
|
||||
"Failed to interpolate Periphery secrets into Environment",
|
||||
)?;
|
||||
(environment, Some(replacers))
|
||||
};
|
||||
match &env_replacers {
|
||||
Some(replacers) if !replacers.is_empty() => {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Environment (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let env_vars = environment_vars_from_str(&env_interpolated)
|
||||
.context("Invalid environment variables")?;
|
||||
|
||||
if stack.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
Ok((
|
||||
run_directory,
|
||||
// Env file paths are expected to be already relative to run directory,
|
||||
// so need to pass original env_file_path here.
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
} else if stack.config.repo.is_empty() {
|
||||
if stack.config.file_contents.trim().is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must either input compose file contents directly, or use files on host / git repo options."
|
||||
));
|
||||
}
|
||||
// ==============
|
||||
// UI BASED FILES
|
||||
// ==============
|
||||
// Ensure run directory exists
|
||||
fs::create_dir_all(&run_directory).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create stack run directory at {run_directory:?}"
|
||||
)
|
||||
})?;
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
let file_path = run_directory
|
||||
.join(
|
||||
stack
|
||||
.config
|
||||
.file_paths
|
||||
// only need the first one, or default
|
||||
.first()
|
||||
.map(String::as_str)
|
||||
.unwrap_or("compose.yaml"),
|
||||
)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let (file_contents, file_replacers) = if !stack
|
||||
.config
|
||||
.skip_secret_interp
|
||||
{
|
||||
let (contents, replacers) = svi::interpolate_variables(
|
||||
&stack.config.file_contents,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context("failed to interpolate secrets into file contents")?;
|
||||
if !replacers.is_empty() {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Compose file (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
));
|
||||
}
|
||||
(contents, Some(replacers))
|
||||
} else {
|
||||
(stack.config.file_contents.clone(), None)
|
||||
};
|
||||
|
||||
fs::write(&file_path, &file_contents).await.with_context(
|
||||
|| format!("Failed to write compose file to {file_path:?}"),
|
||||
)?;
|
||||
|
||||
Ok((
|
||||
run_directory,
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
match (env_replacers, file_replacers) {
|
||||
(Some(env_replacers), Some(file_replacers)) => Some(
|
||||
env_replacers.into_iter().chain(file_replacers).collect(),
|
||||
),
|
||||
(Some(env_replacers), None) => Some(env_replacers),
|
||||
(None, Some(file_replacers)) => Some(file_replacers),
|
||||
(None, None) => None,
|
||||
},
|
||||
))
|
||||
} else {
|
||||
// ================
|
||||
// REPO BASED FILES
|
||||
// ================
|
||||
let mut args: CloneArgs = stack.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
let git_token = match git_token {
|
||||
Some(token) => Some(token),
|
||||
None => {
|
||||
if !stack.config.git_account.is_empty() {
|
||||
match crate::helpers::git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
) {
|
||||
Ok(token) => Some(token.to_string()),
|
||||
Err(e) => {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("no git token", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
"failed to find required git token, stopping run"
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let env_file_path = stack
|
||||
.config
|
||||
.run_directory
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid run_directory")?
|
||||
.join(&stack.config.env_file_path)
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
let clone_or_pull_res = if stack.config.reclone {
|
||||
CloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
} else {
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
};
|
||||
|
||||
let RepoActionResponse {
|
||||
logs,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
env_file_path,
|
||||
} = match clone_or_pull_res {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let error = format_serror(
|
||||
&e.error.context("Failed to pull stack repo").into(),
|
||||
);
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("Pull Stack Repo", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
"Failed to pull stack repo, stopping run"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
res.logs().extend(logs);
|
||||
res.set_commit_hash(commit_hash);
|
||||
res.set_commit_message(commit_message);
|
||||
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Stopped after repo pull failure"));
|
||||
}
|
||||
|
||||
Ok((
|
||||
run_directory,
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn compose_down(
|
||||
project: &str,
|
||||
services: &[String],
|
||||
res: &mut ComposeUpResponse,
|
||||
) -> anyhow::Result<()> {
|
||||
let docker_compose = docker_compose();
|
||||
let service_args = if services.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
let log = run_komodo_command(
|
||||
"Compose Down",
|
||||
None,
|
||||
format!("{docker_compose} -p {project} down{service_args}"),
|
||||
)
|
||||
.await;
|
||||
let success = log.success;
|
||||
res.logs.push(log);
|
||||
if !success {
|
||||
return Err(anyhow!(
|
||||
"Failed to bring down existing container(s) with docker compose down. Stopping run."
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
452
bin/periphery/src/compose/write.rs
Normal file
452
bin/periphery/src/compose/write.rs
Normal file
@@ -0,0 +1,452 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use git::environment;
|
||||
use komodo_client::entities::{
|
||||
CloneArgs, EnvironmentVar, FileContents, all_logs_success,
|
||||
environment_vars_from_str, repo::Repo, stack::Stack,
|
||||
to_path_compatible_name, update::Log,
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::ComposeUpResponse,
|
||||
git::{CloneRepo, PullOrCloneRepo, RepoActionResponse},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::config::periphery_config;
|
||||
|
||||
pub trait WriteStackRes {
|
||||
fn logs(&mut self) -> &mut Vec<Log>;
|
||||
fn add_remote_error(&mut self, _contents: FileContents) {}
|
||||
fn set_commit_hash(&mut self, _hash: Option<String>) {}
|
||||
fn set_commit_message(&mut self, _message: Option<String>) {}
|
||||
}
|
||||
|
||||
impl WriteStackRes for &mut ComposeUpResponse {
|
||||
fn logs(&mut self) -> &mut Vec<Log> {
|
||||
&mut self.logs
|
||||
}
|
||||
fn add_remote_error(&mut self, contents: FileContents) {
|
||||
self.remote_errors.push(contents);
|
||||
}
|
||||
fn set_commit_hash(&mut self, hash: Option<String>) {
|
||||
self.commit_hash = hash;
|
||||
}
|
||||
fn set_commit_message(&mut self, message: Option<String>) {
|
||||
self.commit_message = message;
|
||||
}
|
||||
}
|
||||
|
||||
/// Either writes the stack file_contents to a file, or clones the repo.
|
||||
/// Performs variable replacement on env and writes file.
|
||||
/// Returns (run_directory, env_file_path, periphery_replacers)
|
||||
pub async fn write_stack<'a>(
|
||||
stack: &'a Stack,
|
||||
repo: Option<&Repo>,
|
||||
git_token: Option<String>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&'a str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let (env_interpolated, env_replacers) =
|
||||
if stack.config.skip_secret_interp {
|
||||
(stack.config.environment.clone(), None)
|
||||
} else {
|
||||
let (environment, replacers) = svi::interpolate_variables(
|
||||
&stack.config.environment,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context(
|
||||
"Failed to interpolate Periphery secrets into Environment",
|
||||
)?;
|
||||
(environment, Some(replacers))
|
||||
};
|
||||
match &env_replacers {
|
||||
Some(replacers) if !replacers.is_empty() => {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Environment (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
))
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let env_vars = environment_vars_from_str(&env_interpolated)
|
||||
.context("Invalid environment variables")?;
|
||||
|
||||
if stack.config.files_on_host {
|
||||
write_stack_files_on_host(stack, env_vars, env_replacers, res)
|
||||
.await
|
||||
} else if let Some(repo) = repo {
|
||||
write_stack_linked_repo(
|
||||
stack,
|
||||
repo,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
} else if !stack.config.repo.is_empty() {
|
||||
write_stack_inline_repo(
|
||||
stack,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
write_stack_ui_defined(stack, env_vars, env_replacers, res).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_stack_files_on_host(
|
||||
stack: &Stack,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let run_directory = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
Ok((
|
||||
run_directory,
|
||||
// Env file paths are expected to be already relative to run directory,
|
||||
// so need to pass original env_file_path here.
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
}
|
||||
|
||||
async fn write_stack_linked_repo<'a>(
|
||||
stack: &'a Stack,
|
||||
repo: &Repo,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&'a str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let root = periphery_config()
|
||||
.repo_dir()
|
||||
.join(to_path_compatible_name(&repo.name))
|
||||
.join(&repo.config.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let mut args: CloneArgs = repo.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
write_stack_repo(
|
||||
stack,
|
||||
args,
|
||||
root,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn write_stack_inline_repo(
|
||||
stack: &Stack,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.join(&stack.config.clone_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let mut args: CloneArgs = stack.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
write_stack_repo(
|
||||
stack,
|
||||
args,
|
||||
root,
|
||||
git_token,
|
||||
env_vars,
|
||||
env_replacers,
|
||||
res,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn write_stack_repo(
|
||||
stack: &Stack,
|
||||
args: CloneArgs,
|
||||
root: PathBuf,
|
||||
git_token: Option<String>,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
let git_token = match git_token {
|
||||
Some(token) => Some(token),
|
||||
None => {
|
||||
if let Some(account) = &args.account {
|
||||
match crate::helpers::git_token(
|
||||
args.account.as_deref().unwrap_or("github.com"),
|
||||
account,
|
||||
) {
|
||||
Ok(token) => Some(token.to_string()),
|
||||
Err(e) => {
|
||||
let error = format_serror(&e.into());
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("no git token", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
"failed to find required git token, stopping run"
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let env_file_path = root
|
||||
.join(&stack.config.run_directory)
|
||||
.join(if stack.config.env_file_path.is_empty() {
|
||||
".env"
|
||||
} else {
|
||||
&stack.config.env_file_path
|
||||
})
|
||||
.components()
|
||||
.collect::<PathBuf>()
|
||||
.display()
|
||||
.to_string();
|
||||
|
||||
let clone_or_pull_res = if stack.config.reclone {
|
||||
CloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
} else {
|
||||
PullOrCloneRepo {
|
||||
args,
|
||||
git_token,
|
||||
environment: env_vars,
|
||||
env_file_path,
|
||||
// Env has already been interpolated above
|
||||
skip_secret_interp: true,
|
||||
replacers: Default::default(),
|
||||
}
|
||||
.resolve(&crate::api::Args)
|
||||
.await
|
||||
};
|
||||
|
||||
let RepoActionResponse {
|
||||
logs,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
env_file_path,
|
||||
path: _,
|
||||
} = match clone_or_pull_res {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
let error = format_serror(
|
||||
&e.error.context("Failed to pull stack repo").into(),
|
||||
);
|
||||
res
|
||||
.logs()
|
||||
.push(Log::error("Pull Stack Repo", error.clone()));
|
||||
res.add_remote_error(FileContents {
|
||||
path: Default::default(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!("Failed to pull stack repo, stopping run"));
|
||||
}
|
||||
};
|
||||
|
||||
res.logs().extend(logs);
|
||||
res.set_commit_hash(commit_hash);
|
||||
res.set_commit_message(commit_message);
|
||||
|
||||
if !all_logs_success(res.logs()) {
|
||||
return Err(anyhow!("Stopped after repo pull failure"));
|
||||
}
|
||||
|
||||
Ok((
|
||||
root
|
||||
.join(&stack.config.run_directory)
|
||||
.components()
|
||||
.collect(),
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
env_replacers,
|
||||
))
|
||||
}
|
||||
|
||||
async fn write_stack_ui_defined(
|
||||
stack: &Stack,
|
||||
env_vars: Vec<EnvironmentVar>,
|
||||
env_replacers: Option<Vec<(String, String)>>,
|
||||
mut res: impl WriteStackRes,
|
||||
) -> anyhow::Result<(
|
||||
// run_directory
|
||||
PathBuf,
|
||||
// env_file_path
|
||||
Option<&str>,
|
||||
// periphery_replacers
|
||||
Option<Vec<(String, String)>>,
|
||||
)> {
|
||||
if stack.config.file_contents.trim().is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must either input compose file contents directly, or use files on host / git repo options."
|
||||
));
|
||||
}
|
||||
|
||||
let run_directory = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
// Ensure run directory exists
|
||||
fs::create_dir_all(&run_directory).await.with_context(|| {
|
||||
format!(
|
||||
"failed to create stack run directory at {run_directory:?}"
|
||||
)
|
||||
})?;
|
||||
let env_file_path = environment::write_file_simple(
|
||||
&env_vars,
|
||||
&stack.config.env_file_path,
|
||||
run_directory.as_ref(),
|
||||
res.logs(),
|
||||
)
|
||||
.await?;
|
||||
let file_path = run_directory
|
||||
.join(
|
||||
stack
|
||||
.config
|
||||
.file_paths
|
||||
// only need the first one, or default
|
||||
.first()
|
||||
.map(String::as_str)
|
||||
.unwrap_or("compose.yaml"),
|
||||
)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
|
||||
let (file_contents, file_replacers) = if !stack
|
||||
.config
|
||||
.skip_secret_interp
|
||||
{
|
||||
let (contents, replacers) = svi::interpolate_variables(
|
||||
&stack.config.file_contents,
|
||||
&periphery_config().secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
true,
|
||||
)
|
||||
.context("failed to interpolate secrets into file contents")?;
|
||||
if !replacers.is_empty() {
|
||||
res.logs().push(Log::simple(
|
||||
"Interpolate - Compose file (Periphery)",
|
||||
replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
));
|
||||
}
|
||||
(contents, Some(replacers))
|
||||
} else {
|
||||
(stack.config.file_contents.clone(), None)
|
||||
};
|
||||
|
||||
fs::write(&file_path, &file_contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write compose file to {file_path:?}")
|
||||
})?;
|
||||
|
||||
Ok((
|
||||
run_directory,
|
||||
env_file_path
|
||||
.is_some()
|
||||
.then_some(&stack.config.env_file_path),
|
||||
match (env_replacers, file_replacers) {
|
||||
(Some(env_replacers), Some(file_replacers)) => Some(
|
||||
env_replacers.into_iter().chain(file_replacers).collect(),
|
||||
),
|
||||
(Some(env_replacers), None) => Some(env_replacers),
|
||||
(None, Some(file_replacers)) => Some(file_replacers),
|
||||
(None, None) => None,
|
||||
},
|
||||
))
|
||||
}
|
||||
@@ -16,7 +16,6 @@ use komodo_client::entities::{
|
||||
ContainerConfig, GraphDriverData, HealthConfig, PortBinding,
|
||||
container::*, image::*, network::*, volume::*,
|
||||
},
|
||||
to_docker_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
use run_command::async_run_command;
|
||||
@@ -987,7 +986,6 @@ pub fn stop_container_command(
|
||||
signal: Option<TerminationSignal>,
|
||||
time: Option<i32>,
|
||||
) -> String {
|
||||
let container_name = to_docker_compatible_name(container_name);
|
||||
let signal = signal
|
||||
.map(|signal| format!(" --signal {signal}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
@@ -3,8 +3,8 @@ use std::path::PathBuf;
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
entities::{
|
||||
CloneArgs, EnvironmentVar, SearchCombinator, stack::Stack,
|
||||
to_path_compatible_name,
|
||||
CloneArgs, EnvironmentVar, SearchCombinator, repo::Repo,
|
||||
stack::Stack, to_path_compatible_name,
|
||||
},
|
||||
parsers::QUOTE_PATTERN,
|
||||
};
|
||||
@@ -89,6 +89,7 @@ pub fn log_grep(
|
||||
/// Returns path to root directory of the stack repo.
|
||||
pub async fn pull_or_clone_stack(
|
||||
stack: &Stack,
|
||||
repo: Option<&Repo>,
|
||||
git_token: Option<String>,
|
||||
) -> anyhow::Result<PathBuf> {
|
||||
if stack.config.files_on_host {
|
||||
@@ -96,26 +97,36 @@ pub async fn pull_or_clone_stack(
|
||||
"Wrong method called for files on host stack"
|
||||
));
|
||||
}
|
||||
if stack.config.repo.is_empty() {
|
||||
if repo.is_none() && stack.config.repo.is_empty() {
|
||||
return Err(anyhow!("Repo is not configured"));
|
||||
}
|
||||
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name));
|
||||
|
||||
let mut args: CloneArgs = stack.into();
|
||||
// Set the clone destination to the one created for this run
|
||||
let (root, mut args) = if let Some(repo) = repo {
|
||||
let root = periphery_config()
|
||||
.repo_dir()
|
||||
.join(to_path_compatible_name(&repo.name))
|
||||
.join(&repo.config.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: CloneArgs = repo.into();
|
||||
(root, args)
|
||||
} else {
|
||||
let root = periphery_config()
|
||||
.stack_dir()
|
||||
.join(to_path_compatible_name(&stack.name))
|
||||
.join(&stack.config.clone_path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
let args: CloneArgs = stack.into();
|
||||
(root, args)
|
||||
};
|
||||
args.destination = Some(root.display().to_string());
|
||||
|
||||
let git_token = match git_token {
|
||||
Some(token) => Some(token),
|
||||
None => {
|
||||
if !stack.config.git_account.is_empty() {
|
||||
match crate::helpers::git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
) {
|
||||
if let Some(account) = &args.account {
|
||||
match crate::helpers::git_token(&args.provider, account) {
|
||||
Ok(token) => Some(token.to_string()),
|
||||
Err(e) => {
|
||||
return Err(
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
pin::Pin,
|
||||
sync::{Arc, OnceLock},
|
||||
task::Poll,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::http::StatusCode;
|
||||
use bytes::Bytes;
|
||||
use futures::Stream;
|
||||
use komodo_client::{
|
||||
api::write::TerminalRecreateMode, entities::server::TerminalInfo,
|
||||
api::write::TerminalRecreateMode,
|
||||
entities::{komodo_timestamp, server::TerminalInfo},
|
||||
};
|
||||
use pin_project_lite::pin_project;
|
||||
use portable_pty::{CommandBuilder, PtySize, native_pty_system};
|
||||
use rand::Rng;
|
||||
use serror::AddStatusCodeError;
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
@@ -307,7 +315,7 @@ impl Terminal {
|
||||
}
|
||||
}
|
||||
|
||||
/// 1 MiB max history size per terminal
|
||||
/// 1 MiB rolling max history size per terminal
|
||||
const MAX_BYTES: usize = 1024 * 1024;
|
||||
|
||||
pub struct History {
|
||||
@@ -344,3 +352,94 @@ impl History {
|
||||
self.buf.read().unwrap().len() as f64 / 1024.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute Sentinels
|
||||
pub const START_OF_OUTPUT: &str = "__KOMODO_START_OF_OUTPUT__";
|
||||
pub const END_OF_OUTPUT: &str = "__KOMODO_END_OF_OUTPUT__";
|
||||
|
||||
pin_project! {
|
||||
pub struct TerminalStream<S> { #[pin] pub stdout: S }
|
||||
}
|
||||
|
||||
impl<S> Stream for TerminalStream<S>
|
||||
where
|
||||
S:
|
||||
Stream<Item = Result<String, tokio_util::codec::LinesCodecError>>,
|
||||
{
|
||||
// Axum expects a stream of results
|
||||
type Item = Result<String, String>;
|
||||
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
let this = self.project();
|
||||
match this.stdout.poll_next(cx) {
|
||||
Poll::Ready(None) => {
|
||||
// This is if a None comes in before END_OF_OUTPUT.
|
||||
// This probably means the terminal has exited early,
|
||||
// and needs to be cleaned up
|
||||
tokio::spawn(async move { clean_up_terminals().await });
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(Some(line)) => {
|
||||
match line {
|
||||
Ok(line) if line.as_str() == END_OF_OUTPUT => {
|
||||
// Stop the stream on end sentinel
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Ok(line) => Poll::Ready(Some(Ok(line + "\n"))),
|
||||
Err(e) => Poll::Ready(Some(Err(format!("{e:?}")))),
|
||||
}
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tokens valid for 3 seconds
|
||||
const TOKEN_VALID_FOR_MS: i64 = 3_000;
|
||||
|
||||
pub fn auth_tokens() -> &'static AuthTokens {
|
||||
static AUTH_TOKENS: OnceLock<AuthTokens> = OnceLock::new();
|
||||
AUTH_TOKENS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AuthTokens {
|
||||
map: std::sync::Mutex<HashMap<String, i64>>,
|
||||
}
|
||||
|
||||
impl AuthTokens {
|
||||
pub fn create_auth_token(&self) -> String {
|
||||
let mut lock = self.map.lock().unwrap();
|
||||
// clear out any old tokens here (prevent unbounded growth)
|
||||
let ts = komodo_timestamp();
|
||||
lock.retain(|_, valid_until| *valid_until > ts);
|
||||
let token: String = rand::rng()
|
||||
.sample_iter(&rand::distr::Alphanumeric)
|
||||
.take(30)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
lock.insert(token.clone(), ts + TOKEN_VALID_FOR_MS);
|
||||
token
|
||||
}
|
||||
|
||||
pub fn check_token(&self, token: String) -> serror::Result<()> {
|
||||
let Some(valid_until) = self.map.lock().unwrap().remove(&token)
|
||||
else {
|
||||
return Err(
|
||||
anyhow!("Terminal auth token not found")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
};
|
||||
if komodo_timestamp() <= valid_until {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(
|
||||
anyhow!("Terminal token is expired")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,6 +108,8 @@ pub struct GetCoreInfoResponse {
|
||||
pub github_webhook_owners: Vec<String>,
|
||||
/// Whether to disable websocket automatic reconnect.
|
||||
pub disable_websocket_reconnect: bool,
|
||||
/// TZ identifier Core is using, if manually set.
|
||||
pub timezone: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
@@ -15,45 +15,6 @@ pub struct ConnectTerminalQuery {
|
||||
pub terminal: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given server.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectContainerExecQuery {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// The container name
|
||||
pub container: String,
|
||||
/// The shell to connect to
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given Deployment.
|
||||
/// This call will use access to the Deployment Terminal to permission the call.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectDeploymentExecQuery {
|
||||
/// Deployment Id or name
|
||||
pub deployment: String,
|
||||
/// The shell to connect to
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given Stack / service.
|
||||
/// This call will use access to the Stack Terminal to permission the call.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectStackExecQuery {
|
||||
/// Stack Id or name
|
||||
pub stack: String,
|
||||
/// The service name to connect to
|
||||
pub service: String,
|
||||
/// The shell to connect to
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Execute a terminal command on the given server.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
@@ -69,3 +30,85 @@ pub struct ExecuteTerminalBody {
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given server.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectContainerExecQuery {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// The container name
|
||||
pub container: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Execute a command in the given containers shell.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteContainerExecBody {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// The container name
|
||||
pub container: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given Deployment.
|
||||
/// This call will use access to the Deployment Terminal to permission the call.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectDeploymentExecQuery {
|
||||
/// Deployment Id or name
|
||||
pub deployment: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Execute a command in the given containers shell.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteDeploymentExecBody {
|
||||
/// Deployment Id or name
|
||||
pub deployment: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given Stack / service.
|
||||
/// This call will use access to the Stack Terminal to permission the call.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectStackExecQuery {
|
||||
/// Stack Id or name
|
||||
pub stack: String,
|
||||
/// The service name to connect to
|
||||
pub service: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Execute a command in the given containers shell.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteStackExecBody {
|
||||
/// Stack Id or name
|
||||
pub stack: String,
|
||||
/// The service name to connect to
|
||||
pub service: String,
|
||||
/// The shell to use (eg. `sh` or `bash`)
|
||||
pub shell: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::MaintenanceWindow;
|
||||
|
||||
use super::{
|
||||
ResourceTarget,
|
||||
alert::AlertDataVariant,
|
||||
@@ -64,6 +66,11 @@ pub struct AlerterConfig {
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub except_resources: Vec<ResourceTarget>,
|
||||
|
||||
/// Scheduled maintenance windows during which alerts will be suppressed.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub maintenance_windows: Vec<MaintenanceWindow>,
|
||||
}
|
||||
|
||||
impl AlerterConfig {
|
||||
@@ -81,6 +88,7 @@ impl Default for AlerterConfig {
|
||||
alert_types: Default::default(),
|
||||
resources: Default::default(),
|
||||
except_resources: Default::default(),
|
||||
maintenance_windows: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,11 @@ pub struct BuildListItemInfo {
|
||||
|
||||
/// Whether build is in files on host mode.
|
||||
pub files_on_host: bool,
|
||||
/// Whether build has UI defined dockerfile contents
|
||||
pub dockerfile_contents: bool,
|
||||
|
||||
/// Linked repo, if one is attached.
|
||||
pub linked_repo: String,
|
||||
/// The git provider domain
|
||||
pub git_provider: String,
|
||||
/// The repo used as the source of the build
|
||||
@@ -53,7 +57,6 @@ pub struct BuildListItemInfo {
|
||||
/// Full link to the repo.
|
||||
pub repo_link: String,
|
||||
|
||||
|
||||
/// Latest built short commit hash, or null.
|
||||
pub built_hash: Option<String>,
|
||||
/// Latest short commit hash, or null. Only for repo based stacks
|
||||
@@ -165,6 +168,11 @@ pub struct BuildConfig {
|
||||
#[builder(default)]
|
||||
pub links: Vec<String>,
|
||||
|
||||
/// Choose a Komodo Repo (Resource) to source the build files.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub linked_repo: String,
|
||||
|
||||
/// The git provider domain. Default: github.com
|
||||
#[serde(default = "default_git_provider")]
|
||||
#[builder(default = "default_git_provider()")]
|
||||
@@ -359,6 +367,7 @@ impl Default for BuildConfig {
|
||||
image_name: Default::default(),
|
||||
image_tag: Default::default(),
|
||||
links: Default::default(),
|
||||
linked_repo: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
git_https: default_git_https(),
|
||||
repo: Default::default(),
|
||||
|
||||
@@ -50,6 +50,9 @@ pub struct Env {
|
||||
pub komodo_passkey: Option<String>,
|
||||
/// Override `passkey` with file
|
||||
pub komodo_passkey_file: Option<PathBuf>,
|
||||
/// Override `timezone`
|
||||
#[serde(alias = "tz", alias = "TZ")]
|
||||
pub komodo_timezone: Option<String>,
|
||||
/// Override `first_server`
|
||||
pub komodo_first_server: Option<String>,
|
||||
/// Override `frontend_path`
|
||||
@@ -268,6 +271,12 @@ pub struct CoreConfig {
|
||||
/// Should be some secure hash, maybe 20-40 chars.
|
||||
pub passkey: String,
|
||||
|
||||
/// A TZ Identifier. If not provided, will use Core local timezone.
|
||||
/// https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
/// This will be populated by TZ env variable in addition to KOMODO_TIMEZONE.
|
||||
#[serde(default)]
|
||||
pub timezone: String,
|
||||
|
||||
/// Disable user ability to use the UI to update resource configuration.
|
||||
#[serde(default)]
|
||||
pub ui_write_disabled: bool,
|
||||
@@ -566,7 +575,7 @@ fn default_prune_days() -> u64 {
|
||||
}
|
||||
|
||||
fn default_poll_interval() -> Timelength {
|
||||
Timelength::FiveMinutes
|
||||
Timelength::OneHour
|
||||
}
|
||||
|
||||
fn default_monitoring_interval() -> Timelength {
|
||||
@@ -590,6 +599,7 @@ impl CoreConfig {
|
||||
port: config.port,
|
||||
bind_ip: config.bind_ip,
|
||||
passkey: empty_or_redacted(&config.passkey),
|
||||
timezone: config.timezone,
|
||||
first_server: config.first_server,
|
||||
frontend_path: config.frontend_path,
|
||||
jwt_secret: empty_or_redacted(&config.jwt_secret),
|
||||
|
||||
@@ -145,9 +145,9 @@ pub fn get_image_name(
|
||||
}: &build::Build,
|
||||
) -> anyhow::Result<String> {
|
||||
let name = if image_name.is_empty() {
|
||||
to_docker_compatible_name(name)
|
||||
name
|
||||
} else {
|
||||
to_docker_compatible_name(image_name)
|
||||
image_name
|
||||
};
|
||||
let name = match (
|
||||
!domain.is_empty(),
|
||||
@@ -156,24 +156,36 @@ pub fn get_image_name(
|
||||
) {
|
||||
// If organization and account provided, name under organization.
|
||||
(true, true, true) => {
|
||||
format!("{domain}/{}/{name}", organization.to_lowercase())
|
||||
format!("{domain}/{}/{name}", organization)
|
||||
}
|
||||
// Just domain / account provided
|
||||
(true, false, true) => format!("{domain}/{account}/{name}"),
|
||||
// Otherwise, just use name
|
||||
_ => name,
|
||||
_ => name.to_string(),
|
||||
};
|
||||
Ok(name)
|
||||
}
|
||||
|
||||
pub fn to_general_name(name: &str) -> String {
|
||||
name.replace('\n', "_").trim().to_string()
|
||||
name.trim().replace('\n', "_").to_string()
|
||||
}
|
||||
|
||||
pub fn to_path_compatible_name(name: &str) -> String {
|
||||
name.replace([' ', '\n'], "_").trim().to_string()
|
||||
name.trim().replace([' ', '\n'], "_").to_string()
|
||||
}
|
||||
|
||||
/// Enforce common container naming rules.
|
||||
/// [a-zA-Z0-9_.-]
|
||||
pub fn to_container_compatible_name(name: &str) -> String {
|
||||
name.trim().replace([' ', ',', '\n', '&'], "_").to_string()
|
||||
}
|
||||
|
||||
/// Enforce common docker naming rules, such as only lowercase, and no '.'.
|
||||
/// These apply to:
|
||||
/// - Stacks (docker project name)
|
||||
/// - Builds (docker image name)
|
||||
/// - Networks
|
||||
/// - Volumes
|
||||
pub fn to_docker_compatible_name(name: &str) -> String {
|
||||
name
|
||||
.to_lowercase()
|
||||
@@ -388,6 +400,48 @@ pub struct FileContents {
|
||||
pub contents: String,
|
||||
}
|
||||
|
||||
/// Represents a scheduled maintenance window
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct MaintenanceWindow {
|
||||
/// Name for the maintenance window (required)
|
||||
pub name: String,
|
||||
/// Description of what maintenance is performed (optional)
|
||||
#[serde(default)]
|
||||
pub description: String,
|
||||
/// The type of maintenance schedule:
|
||||
/// - Daily (default)
|
||||
/// - Weekly
|
||||
/// - OneTime
|
||||
#[serde(default)]
|
||||
pub schedule_type: MaintenanceScheduleType,
|
||||
/// For Weekly schedules: Specify the day of the week (Monday, Tuesday, etc.)
|
||||
#[serde(default)]
|
||||
pub day_of_week: String,
|
||||
/// For OneTime window: ISO 8601 date format (YYYY-MM-DD)
|
||||
#[serde(default)]
|
||||
pub date: String,
|
||||
/// Start hour in 24-hour format (0-23) (optional, defaults to 0)
|
||||
#[serde(default)]
|
||||
pub hour: u8,
|
||||
/// Start minute (0-59) (optional, defaults to 0)
|
||||
#[serde(default)]
|
||||
pub minute: u8,
|
||||
/// Duration of the maintenance window in minutes (required)
|
||||
pub duration_minutes: u32,
|
||||
/// Timezone for maintenance window specificiation.
|
||||
/// If empty, will use Core timezone.
|
||||
#[serde(default)]
|
||||
pub timezone: String,
|
||||
/// Whether this maintenance window is currently enabled
|
||||
#[serde(default = "default_enabled")]
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
fn default_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CloneArgs {
|
||||
@@ -416,12 +470,15 @@ pub struct CloneArgs {
|
||||
}
|
||||
|
||||
impl CloneArgs {
|
||||
pub fn path(&self, repo_dir: &Path) -> PathBuf {
|
||||
let path = match &self.destination {
|
||||
Some(destination) => PathBuf::from(&destination),
|
||||
None => repo_dir.join(to_path_compatible_name(&self.name)),
|
||||
};
|
||||
path.components().collect::<PathBuf>()
|
||||
pub fn path(&self, root_repo_dir: &Path) -> PathBuf {
|
||||
match &self.destination {
|
||||
Some(destination) => root_repo_dir
|
||||
.join(to_path_compatible_name(&self.name))
|
||||
.join(destination),
|
||||
None => root_repo_dir.join(to_path_compatible_name(&self.name)),
|
||||
}
|
||||
.components()
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn remote_url(
|
||||
@@ -455,7 +512,9 @@ impl CloneArgs {
|
||||
.join(self.provider.replace('/', "-"))
|
||||
.join(repo.replace('/', "-"))
|
||||
.join(self.branch.replace('/', "-"))
|
||||
.join(self.commit.as_deref().unwrap_or("latest"));
|
||||
.join(self.commit.as_deref().unwrap_or("latest"))
|
||||
.components()
|
||||
.collect();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -531,7 +590,7 @@ impl From<&self::stack::Stack> for CloneArgs {
|
||||
.unwrap_or_else(|| String::from("main")),
|
||||
commit: optional_string(&stack.config.commit),
|
||||
is_build: false,
|
||||
destination: None,
|
||||
destination: optional_string(&stack.config.clone_path),
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
https: stack.config.git_https,
|
||||
@@ -633,6 +692,291 @@ impl TryInto<async_timing_util::Timelength> for Timelength {
|
||||
}
|
||||
}
|
||||
|
||||
/// Days of the week
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Default,
|
||||
EnumString,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
)]
|
||||
pub enum DayOfWeek {
|
||||
#[default]
|
||||
#[serde(alias = "monday", alias = "Mon", alias = "mon")]
|
||||
#[strum(serialize = "monday", serialize = "Mon", serialize = "mon")]
|
||||
Monday,
|
||||
#[serde(alias = "tuesday", alias = "Tue", alias = "tue")]
|
||||
#[strum(
|
||||
serialize = "tuesday",
|
||||
serialize = "Tue",
|
||||
serialize = "tue"
|
||||
)]
|
||||
Tuesday,
|
||||
#[serde(alias = "wednesday", alias = "Wed", alias = "wed")]
|
||||
#[strum(
|
||||
serialize = "wednesday",
|
||||
serialize = "Wed",
|
||||
serialize = "wed"
|
||||
)]
|
||||
Wednesday,
|
||||
#[serde(alias = "thursday", alias = "Thurs", alias = "thurs")]
|
||||
#[strum(
|
||||
serialize = "thursday",
|
||||
serialize = "Thurs",
|
||||
serialize = "thurs"
|
||||
)]
|
||||
Thursday,
|
||||
#[serde(alias = "friday", alias = "Fri", alias = "fri")]
|
||||
#[strum(serialize = "friday", serialize = "Fri", serialize = "fri")]
|
||||
Friday,
|
||||
#[serde(alias = "saturday", alias = "Sat", alias = "sat")]
|
||||
#[strum(
|
||||
serialize = "saturday",
|
||||
serialize = "Sat",
|
||||
serialize = "sat"
|
||||
)]
|
||||
Saturday,
|
||||
#[serde(alias = "sunday", alias = "Sun", alias = "sun")]
|
||||
#[strum(serialize = "sunday", serialize = "Sun", serialize = "sun")]
|
||||
Sunday,
|
||||
}
|
||||
|
||||
/// Types of maintenance schedules
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Default,
|
||||
EnumString,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
)]
|
||||
pub enum MaintenanceScheduleType {
|
||||
/// Daily at the specified time
|
||||
#[default]
|
||||
Daily,
|
||||
/// Weekly on the specified day and time
|
||||
Weekly,
|
||||
/// One-time maintenance on a specific date and time
|
||||
OneTime, // ISO 8601 date format (YYYY-MM-DD)
|
||||
}
|
||||
|
||||
/// One representative IANA zone for each distinct base UTC offset in the tz database.
|
||||
/// https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
///
|
||||
/// The `serde`/`strum` renames ensure the canonical identifier is used
|
||||
/// when serializing or parsing from a string such as `"Etc/UTC"`.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
PartialEq,
|
||||
Default,
|
||||
EnumString,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
)]
|
||||
pub enum IanaTimezone {
|
||||
/// UTC−12:00
|
||||
#[serde(rename = "Etc/GMT+12")]
|
||||
#[strum(serialize = "Etc/GMT+12")]
|
||||
EtcGmtMinus12,
|
||||
|
||||
/// UTC−11:00
|
||||
#[serde(rename = "Pacific/Pago_Pago")]
|
||||
#[strum(serialize = "Pacific/Pago_Pago")]
|
||||
PacificPagoPago,
|
||||
|
||||
/// UTC−10:00
|
||||
#[serde(rename = "Pacific/Honolulu")]
|
||||
#[strum(serialize = "Pacific/Honolulu")]
|
||||
PacificHonolulu,
|
||||
|
||||
/// UTC−09:30
|
||||
#[serde(rename = "Pacific/Marquesas")]
|
||||
#[strum(serialize = "Pacific/Marquesas")]
|
||||
PacificMarquesas,
|
||||
|
||||
/// UTC−09:00
|
||||
#[serde(rename = "America/Anchorage")]
|
||||
#[strum(serialize = "America/Anchorage")]
|
||||
AmericaAnchorage,
|
||||
|
||||
/// UTC−08:00
|
||||
#[serde(rename = "America/Los_Angeles")]
|
||||
#[strum(serialize = "America/Los_Angeles")]
|
||||
AmericaLosAngeles,
|
||||
|
||||
/// UTC−07:00
|
||||
#[serde(rename = "America/Denver")]
|
||||
#[strum(serialize = "America/Denver")]
|
||||
AmericaDenver,
|
||||
|
||||
/// UTC−06:00
|
||||
#[serde(rename = "America/Chicago")]
|
||||
#[strum(serialize = "America/Chicago")]
|
||||
AmericaChicago,
|
||||
|
||||
/// UTC−05:00
|
||||
#[serde(rename = "America/New_York")]
|
||||
#[strum(serialize = "America/New_York")]
|
||||
AmericaNewYork,
|
||||
|
||||
/// UTC−04:00
|
||||
#[serde(rename = "America/Halifax")]
|
||||
#[strum(serialize = "America/Halifax")]
|
||||
AmericaHalifax,
|
||||
|
||||
/// UTC−03:30
|
||||
#[serde(rename = "America/St_Johns")]
|
||||
#[strum(serialize = "America/St_Johns")]
|
||||
AmericaStJohns,
|
||||
|
||||
/// UTC−03:00
|
||||
#[serde(rename = "America/Sao_Paulo")]
|
||||
#[strum(serialize = "America/Sao_Paulo")]
|
||||
AmericaSaoPaulo,
|
||||
|
||||
/// UTC−02:00
|
||||
#[serde(rename = "America/Noronha")]
|
||||
#[strum(serialize = "America/Noronha")]
|
||||
AmericaNoronha,
|
||||
|
||||
/// UTC−01:00
|
||||
#[serde(rename = "Atlantic/Azores")]
|
||||
#[strum(serialize = "Atlantic/Azores")]
|
||||
AtlanticAzores,
|
||||
|
||||
/// UTC±00:00
|
||||
#[default]
|
||||
#[serde(rename = "Etc/UTC")]
|
||||
#[strum(serialize = "Etc/UTC")]
|
||||
EtcUtc,
|
||||
|
||||
/// UTC+01:00
|
||||
#[serde(rename = "Europe/Berlin")]
|
||||
#[strum(serialize = "Europe/Berlin")]
|
||||
EuropeBerlin,
|
||||
|
||||
/// UTC+02:00
|
||||
#[serde(rename = "Europe/Bucharest")]
|
||||
#[strum(serialize = "Europe/Bucharest")]
|
||||
EuropeBucharest,
|
||||
|
||||
/// UTC+03:00
|
||||
#[serde(rename = "Europe/Moscow")]
|
||||
#[strum(serialize = "Europe/Moscow")]
|
||||
EuropeMoscow,
|
||||
|
||||
/// UTC+03:30
|
||||
#[serde(rename = "Asia/Tehran")]
|
||||
#[strum(serialize = "Asia/Tehran")]
|
||||
AsiaTehran,
|
||||
|
||||
/// UTC+04:00
|
||||
#[serde(rename = "Asia/Dubai")]
|
||||
#[strum(serialize = "Asia/Dubai")]
|
||||
AsiaDubai,
|
||||
|
||||
/// UTC+04:30
|
||||
#[serde(rename = "Asia/Kabul")]
|
||||
#[strum(serialize = "Asia/Kabul")]
|
||||
AsiaKabul,
|
||||
|
||||
/// UTC+05:00
|
||||
#[serde(rename = "Asia/Karachi")]
|
||||
#[strum(serialize = "Asia/Karachi")]
|
||||
AsiaKarachi,
|
||||
|
||||
/// UTC+05:30
|
||||
#[serde(rename = "Asia/Kolkata")]
|
||||
#[strum(serialize = "Asia/Kolkata")]
|
||||
AsiaKolkata,
|
||||
|
||||
/// UTC+05:45
|
||||
#[serde(rename = "Asia/Kathmandu")]
|
||||
#[strum(serialize = "Asia/Kathmandu")]
|
||||
AsiaKathmandu,
|
||||
|
||||
/// UTC+06:00
|
||||
#[serde(rename = "Asia/Dhaka")]
|
||||
#[strum(serialize = "Asia/Dhaka")]
|
||||
AsiaDhaka,
|
||||
|
||||
/// UTC+06:30
|
||||
#[serde(rename = "Asia/Yangon")]
|
||||
#[strum(serialize = "Asia/Yangon")]
|
||||
AsiaYangon,
|
||||
|
||||
/// UTC+07:00
|
||||
#[serde(rename = "Asia/Bangkok")]
|
||||
#[strum(serialize = "Asia/Bangkok")]
|
||||
AsiaBangkok,
|
||||
|
||||
/// UTC+08:00
|
||||
#[serde(rename = "Asia/Shanghai")]
|
||||
#[strum(serialize = "Asia/Shanghai")]
|
||||
AsiaShanghai,
|
||||
|
||||
/// UTC+08:45
|
||||
#[serde(rename = "Australia/Eucla")]
|
||||
#[strum(serialize = "Australia/Eucla")]
|
||||
AustraliaEucla,
|
||||
|
||||
/// UTC+09:00
|
||||
#[serde(rename = "Asia/Tokyo")]
|
||||
#[strum(serialize = "Asia/Tokyo")]
|
||||
AsiaTokyo,
|
||||
|
||||
/// UTC+09:30
|
||||
#[serde(rename = "Australia/Adelaide")]
|
||||
#[strum(serialize = "Australia/Adelaide")]
|
||||
AustraliaAdelaide,
|
||||
|
||||
/// UTC+10:00
|
||||
#[serde(rename = "Australia/Sydney")]
|
||||
#[strum(serialize = "Australia/Sydney")]
|
||||
AustraliaSydney,
|
||||
|
||||
/// UTC+10:30
|
||||
#[serde(rename = "Australia/Lord_Howe")]
|
||||
#[strum(serialize = "Australia/Lord_Howe")]
|
||||
AustraliaLordHowe,
|
||||
|
||||
/// UTC+11:00
|
||||
#[serde(rename = "Pacific/Port_Moresby")]
|
||||
#[strum(serialize = "Pacific/Port_Moresby")]
|
||||
PacificPortMoresby,
|
||||
|
||||
/// UTC+12:00
|
||||
#[serde(rename = "Pacific/Auckland")]
|
||||
#[strum(serialize = "Pacific/Auckland")]
|
||||
PacificAuckland,
|
||||
|
||||
/// UTC+12:45
|
||||
#[serde(rename = "Pacific/Chatham")]
|
||||
#[strum(serialize = "Pacific/Chatham")]
|
||||
PacificChatham,
|
||||
|
||||
/// UTC+13:00
|
||||
#[serde(rename = "Pacific/Tongatapu")]
|
||||
#[strum(serialize = "Pacific/Tongatapu")]
|
||||
PacificTongatapu,
|
||||
|
||||
/// UTC+14:00
|
||||
#[serde(rename = "Pacific/Kiritimati")]
|
||||
#[strum(serialize = "Pacific/Kiritimati")]
|
||||
PacificKiritimati,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
|
||||
@@ -3,7 +3,9 @@ use std::fmt::Write;
|
||||
use derive_variants::EnumVariants;
|
||||
use indexmap::IndexSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString, IntoStaticStr, VariantArray};
|
||||
use strum::{
|
||||
AsRefStr, Display, EnumString, IntoStaticStr, VariantArray,
|
||||
};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use super::{MongoId, ResourceTarget};
|
||||
@@ -277,7 +279,11 @@ impl PermissionLevelAndSpecifics {
|
||||
pub fn specifics_for_log(&self) -> String {
|
||||
let mut res = String::new();
|
||||
for specific in self.specific.iter() {
|
||||
write!(&mut res, ", {specific}").unwrap();
|
||||
if res.is_empty() {
|
||||
write!(&mut res, "{specific}").unwrap();
|
||||
} else {
|
||||
write!(&mut res, ", {specific}").unwrap();
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
@@ -156,6 +156,10 @@ pub struct RepoConfig {
|
||||
pub commit: String,
|
||||
|
||||
/// Explicitly specify the folder to clone the repo in.
|
||||
/// - If absolute (has leading '/')
|
||||
/// - Used directly as the path
|
||||
/// - If relative
|
||||
/// - Taken relative to Periphery `repo_dir` (ie `${root_directory}/repos`)
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub path: String,
|
||||
|
||||
@@ -5,8 +5,11 @@ use partial_derive2::Partial;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::deserializers::{
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
use crate::{
|
||||
deserializers::{
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
},
|
||||
entities::MaintenanceWindow,
|
||||
};
|
||||
|
||||
use super::{
|
||||
@@ -30,6 +33,8 @@ pub struct ServerListItemInfo {
|
||||
pub region: String,
|
||||
/// Address of the server.
|
||||
pub address: String,
|
||||
/// The Komodo Periphery version of the server.
|
||||
pub version: String,
|
||||
/// Whether server is configured to send unreachable alerts.
|
||||
pub send_unreachable_alerts: bool,
|
||||
/// Whether server is configured to send cpu alerts.
|
||||
@@ -180,6 +185,11 @@ pub struct ServerConfig {
|
||||
#[builder(default = "default_disk_critical()")]
|
||||
#[partial_default(default_disk_critical())]
|
||||
pub disk_critical: f64,
|
||||
|
||||
/// Scheduled maintenance windows during which alerts will be suppressed.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub maintenance_windows: Vec<MaintenanceWindow>,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
@@ -258,6 +268,7 @@ impl Default for ServerConfig {
|
||||
mem_critical: default_mem_critical(),
|
||||
disk_warning: default_disk_warning(),
|
||||
disk_critical: default_disk_critical(),
|
||||
maintenance_windows: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ use super::{
|
||||
FileContents, SystemCommand,
|
||||
docker::container::ContainerListItem,
|
||||
resource::{Resource, ResourceListItem, ResourceQuery},
|
||||
to_docker_compatible_name,
|
||||
};
|
||||
|
||||
#[typeshare]
|
||||
@@ -35,9 +34,9 @@ impl Stack {
|
||||
}
|
||||
}
|
||||
if self.config.project_name.is_empty() {
|
||||
to_docker_compatible_name(&self.name)
|
||||
self.name.clone()
|
||||
} else {
|
||||
to_docker_compatible_name(&self.config.project_name)
|
||||
self.config.project_name.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,6 +67,8 @@ pub struct StackListItemInfo {
|
||||
pub files_on_host: bool,
|
||||
/// Whether stack has file contents defined.
|
||||
pub file_contents: bool,
|
||||
/// Linked repo, if one is attached.
|
||||
pub linked_repo: String,
|
||||
/// The git provider domain
|
||||
pub git_provider: String,
|
||||
/// The configured repo
|
||||
@@ -272,6 +273,11 @@ pub struct StackConfig {
|
||||
#[builder(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
|
||||
/// Choose a Komodo Repo (Resource) to source the compose files.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub linked_repo: String,
|
||||
|
||||
/// The git provider domain. Default: github.com
|
||||
#[serde(default = "default_git_provider")]
|
||||
#[builder(default = "default_git_provider()")]
|
||||
@@ -295,7 +301,8 @@ pub struct StackConfig {
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// The Github repo used as the source of the build.
|
||||
/// The repo used as the source of the build.
|
||||
/// {namespace}/{repo_name}
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub repo: String,
|
||||
@@ -311,6 +318,11 @@ pub struct StackConfig {
|
||||
#[builder(default)]
|
||||
pub commit: String,
|
||||
|
||||
/// Optionally set a specific clone path
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub clone_path: String,
|
||||
|
||||
/// By default, the Stack will `git pull` the repo after it is first cloned.
|
||||
/// If this option is enabled, the repo folder will be deleted and recloned instead.
|
||||
#[serde(default)]
|
||||
@@ -520,11 +532,13 @@ impl Default for StackConfig {
|
||||
destroy_before_deploy: Default::default(),
|
||||
build_extra_args: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
linked_repo: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
git_https: default_git_https(),
|
||||
repo: Default::default(),
|
||||
branch: default_branch(),
|
||||
commit: Default::default(),
|
||||
clone_path: Default::default(),
|
||||
reclone: Default::default(),
|
||||
git_account: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
|
||||
@@ -33,6 +33,8 @@ pub struct ResourceSyncListItemInfo {
|
||||
pub managed: bool,
|
||||
/// Resource paths to the files.
|
||||
pub resource_path: Vec<String>,
|
||||
/// Linked repo, if one is attached.
|
||||
pub linked_repo: String,
|
||||
/// The git provider domain.
|
||||
pub git_provider: String,
|
||||
/// The Github repo used as the source of the sync resources
|
||||
@@ -161,6 +163,11 @@ pub type _PartialResourceSyncConfig = PartialResourceSyncConfig;
|
||||
#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
pub struct ResourceSyncConfig {
|
||||
/// Choose a Komodo Repo (Resource) to source the sync files.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub linked_repo: String,
|
||||
|
||||
/// The git provider domain. Default: github.com
|
||||
#[serde(default = "default_git_provider")]
|
||||
#[builder(default = "default_git_provider()")]
|
||||
@@ -334,6 +341,7 @@ fn default_pending_alert() -> bool {
|
||||
impl Default for ResourceSyncConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
linked_repo: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
git_https: default_git_https(),
|
||||
repo: Default::default(),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "komodo_client",
|
||||
"version": "1.18.1",
|
||||
"version": "1.18.2",
|
||||
"description": "Komodo client package",
|
||||
"homepage": "https://komo.do",
|
||||
"main": "dist/lib.js",
|
||||
|
||||
@@ -5,12 +5,15 @@ import {
|
||||
UserResponses,
|
||||
WriteResponses,
|
||||
} from "./responses.js";
|
||||
import {
|
||||
terminal_methods,
|
||||
ConnectExecQuery,
|
||||
ExecuteExecBody,
|
||||
TerminalCallbacks,
|
||||
} from "./terminal.js";
|
||||
import {
|
||||
AuthRequest,
|
||||
BatchExecutionResponse,
|
||||
ConnectContainerExecQuery,
|
||||
ConnectDeploymentExecQuery,
|
||||
ConnectStackExecQuery,
|
||||
ConnectTerminalQuery,
|
||||
ExecuteRequest,
|
||||
ExecuteTerminalBody,
|
||||
@@ -25,6 +28,8 @@ import {
|
||||
|
||||
export * as Types from "./types.js";
|
||||
|
||||
export type { ConnectExecQuery, ExecuteExecBody, TerminalCallbacks };
|
||||
|
||||
export type InitOptions =
|
||||
| { type: "jwt"; params: { jwt: string } }
|
||||
| { type: "api-key"; params: { key: string; secret: string } };
|
||||
@@ -39,30 +44,15 @@ export class CancelToken {
|
||||
}
|
||||
}
|
||||
|
||||
export type ContainerExecQuery =
|
||||
| {
|
||||
type: "container";
|
||||
query: ConnectContainerExecQuery;
|
||||
}
|
||||
| {
|
||||
type: "deployment";
|
||||
query: ConnectDeploymentExecQuery;
|
||||
}
|
||||
| {
|
||||
type: "stack";
|
||||
query: ConnectStackExecQuery;
|
||||
};
|
||||
|
||||
export type TerminalCallbacks = {
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
export type ClientState = {
|
||||
jwt: string | undefined;
|
||||
key: string | undefined;
|
||||
secret: string | undefined;
|
||||
};
|
||||
|
||||
/** Initialize a new client for Komodo */
|
||||
export function KomodoClient(url: string, options: InitOptions) {
|
||||
const state = {
|
||||
const state: ClientState = {
|
||||
jwt: options.type === "jwt" ? options.params.jwt : undefined,
|
||||
key: options.type === "api-key" ? options.params.key : undefined,
|
||||
secret: options.type === "api-key" ? options.params.secret : undefined,
|
||||
@@ -332,203 +322,21 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
}
|
||||
};
|
||||
|
||||
const connect_terminal = ({
|
||||
query,
|
||||
on_message,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
query: ConnectTerminalQuery;
|
||||
} & TerminalCallbacks) => {
|
||||
const url_query = new URLSearchParams(
|
||||
query as any as Record<string, string>
|
||||
).toString();
|
||||
const ws = new WebSocket(
|
||||
url.replace("http", "ws") + "/ws/terminal?" + url_query
|
||||
);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg: WsLoginMessage =
|
||||
options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
} else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => on_close?.();
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const connect_container_exec = ({
|
||||
query: { type, query },
|
||||
on_message,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
query: ContainerExecQuery;
|
||||
} & TerminalCallbacks) => {
|
||||
const url_query = new URLSearchParams(
|
||||
query as any as Record<string, string>
|
||||
).toString();
|
||||
const ws = new WebSocket(
|
||||
url.replace("http", "ws") + `/ws/${type}/terminal?` + url_query
|
||||
);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg: WsLoginMessage =
|
||||
options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
} else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => on_close?.();
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const execute_terminal_stream = (request: ExecuteTerminalBody) =>
|
||||
new Promise<AsyncIterable<string>>(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + "/terminal/execute", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(
|
||||
new TransformStream<string, string>({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop()!; // last item may be incomplete
|
||||
for (const line of parts) controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail) controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
} as Transformer<string, string> & { tail: string })
|
||||
);
|
||||
res(stream);
|
||||
} else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const execute_terminal = async (
|
||||
request: ExecuteTerminalBody,
|
||||
callbacks?: {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
}
|
||||
) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
} else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
const {
|
||||
connect_terminal,
|
||||
execute_terminal,
|
||||
execute_terminal_stream,
|
||||
connect_exec,
|
||||
connect_container_exec,
|
||||
execute_container_exec,
|
||||
execute_container_exec_stream,
|
||||
connect_deployment_exec,
|
||||
execute_deployment_exec,
|
||||
execute_deployment_exec_stream,
|
||||
connect_stack_exec,
|
||||
execute_stack_exec,
|
||||
execute_stack_exec_stream,
|
||||
} = terminal_methods(url, state);
|
||||
|
||||
return {
|
||||
/**
|
||||
@@ -631,12 +439,24 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
*/
|
||||
connect_terminal,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment, Stack,
|
||||
* or any container on a Server. The permission used to allow the connection
|
||||
* depends on `query.type`.
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
connect_container_exec,
|
||||
execute_terminal,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
@@ -662,14 +482,28 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
*/
|
||||
execute_terminal_stream,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to container on a Server,
|
||||
* or associated with a Deployment or Stack.
|
||||
* Terminal permission on connecting resource required.
|
||||
*/
|
||||
connect_exec,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Container on a Server.
|
||||
* Server Terminal permission required.
|
||||
*/
|
||||
connect_container_exec,
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* const stream = await komodo.execute_container_exec(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
@@ -679,6 +513,131 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal,
|
||||
execute_container_exec,
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_container_exec_stream({
|
||||
* server: "my-server",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_container_exec_stream,
|
||||
/**
|
||||
* Subscribes to deployment container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment container.
|
||||
* Deployment Terminal permission required.
|
||||
*/
|
||||
connect_deployment_exec,
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec(
|
||||
* {
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec,
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec_stream({
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec_stream,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Stack service container.
|
||||
* Stack Terminal permission required.
|
||||
*/
|
||||
connect_stack_exec,
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec(
|
||||
* {
|
||||
* stack: "my-stack",
|
||||
* service: "database"
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec,
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec_stream({
|
||||
* stack: "my-stack",
|
||||
* service: "service1",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec_stream,
|
||||
};
|
||||
}
|
||||
|
||||
334
client/core/ts/src/terminal.ts
Normal file
334
client/core/ts/src/terminal.ts
Normal file
@@ -0,0 +1,334 @@
|
||||
import { ClientState, InitOptions } from "./lib";
|
||||
import {
|
||||
ConnectContainerExecQuery,
|
||||
ConnectDeploymentExecQuery,
|
||||
ConnectStackExecQuery,
|
||||
ConnectTerminalQuery,
|
||||
ExecuteContainerExecBody,
|
||||
ExecuteDeploymentExecBody,
|
||||
ExecuteStackExecBody,
|
||||
ExecuteTerminalBody,
|
||||
WsLoginMessage,
|
||||
} from "./types";
|
||||
|
||||
export type TerminalCallbacks = {
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
};
|
||||
|
||||
export type ConnectExecQuery =
|
||||
| {
|
||||
type: "container";
|
||||
query: ConnectContainerExecQuery;
|
||||
}
|
||||
| {
|
||||
type: "deployment";
|
||||
query: ConnectDeploymentExecQuery;
|
||||
}
|
||||
| {
|
||||
type: "stack";
|
||||
query: ConnectStackExecQuery;
|
||||
};
|
||||
|
||||
export type ExecuteExecBody =
|
||||
| {
|
||||
type: "container";
|
||||
body: ExecuteContainerExecBody;
|
||||
}
|
||||
| {
|
||||
type: "deployment";
|
||||
body: ExecuteDeploymentExecBody;
|
||||
}
|
||||
| {
|
||||
type: "stack";
|
||||
body: ExecuteStackExecBody;
|
||||
};
|
||||
|
||||
export type ExecuteCallbacks = {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
};
|
||||
|
||||
export const terminal_methods = (url: string, state: ClientState) => {
|
||||
const connect_terminal = ({
|
||||
query,
|
||||
on_message,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
query: ConnectTerminalQuery;
|
||||
} & TerminalCallbacks) => {
|
||||
const url_query = new URLSearchParams(
|
||||
query as any as Record<string, string>
|
||||
).toString();
|
||||
const ws = new WebSocket(
|
||||
url.replace("http", "ws") + "/ws/terminal?" + url_query
|
||||
);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg: WsLoginMessage = state.jwt
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: state.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: state.key!,
|
||||
secret: state.secret!,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
} else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => on_close?.();
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const execute_terminal = async (
|
||||
request: ExecuteTerminalBody,
|
||||
callbacks?: ExecuteCallbacks
|
||||
) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
} else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
|
||||
const execute_terminal_stream = (request: ExecuteTerminalBody) =>
|
||||
execute_stream("/terminal/execute", request);
|
||||
|
||||
const connect_container_exec = ({
|
||||
query,
|
||||
...callbacks
|
||||
}: {
|
||||
query: ConnectContainerExecQuery;
|
||||
} & TerminalCallbacks) =>
|
||||
connect_exec({ query: { type: "container", query }, ...callbacks });
|
||||
|
||||
const connect_deployment_exec = ({
|
||||
query,
|
||||
...callbacks
|
||||
}: {
|
||||
query: ConnectDeploymentExecQuery;
|
||||
} & TerminalCallbacks) =>
|
||||
connect_exec({ query: { type: "deployment", query }, ...callbacks });
|
||||
|
||||
const connect_stack_exec = ({
|
||||
query,
|
||||
...callbacks
|
||||
}: {
|
||||
query: ConnectStackExecQuery;
|
||||
} & TerminalCallbacks) =>
|
||||
connect_exec({ query: { type: "stack", query }, ...callbacks });
|
||||
|
||||
const connect_exec = ({
|
||||
query: { type, query },
|
||||
on_message,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
query: ConnectExecQuery;
|
||||
} & TerminalCallbacks) => {
|
||||
const url_query = new URLSearchParams(
|
||||
query as any as Record<string, string>
|
||||
).toString();
|
||||
const ws = new WebSocket(
|
||||
url.replace("http", "ws") + `/ws/${type}/terminal?` + url_query
|
||||
);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg: WsLoginMessage = state.jwt
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: state.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: state.key!,
|
||||
secret: state.secret!,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
} else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => on_close?.();
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const execute_container_exec = (
|
||||
body: ExecuteContainerExecBody,
|
||||
callbacks?: ExecuteCallbacks
|
||||
) => execute_exec({ type: "container", body }, callbacks);
|
||||
|
||||
const execute_deployment_exec = (
|
||||
body: ExecuteDeploymentExecBody,
|
||||
callbacks?: ExecuteCallbacks
|
||||
) => execute_exec({ type: "deployment", body }, callbacks);
|
||||
|
||||
const execute_stack_exec = (
|
||||
body: ExecuteStackExecBody,
|
||||
callbacks?: ExecuteCallbacks
|
||||
) => execute_exec({ type: "stack", body }, callbacks);
|
||||
|
||||
const execute_exec = async (
|
||||
request: ExecuteExecBody,
|
||||
callbacks?: ExecuteCallbacks
|
||||
) => {
|
||||
const stream = await execute_exec_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
} else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
|
||||
const execute_container_exec_stream = (body: ExecuteContainerExecBody) =>
|
||||
execute_exec_stream({ type: "container", body });
|
||||
|
||||
const execute_deployment_exec_stream = (body: ExecuteDeploymentExecBody) =>
|
||||
execute_exec_stream({ type: "deployment", body });
|
||||
|
||||
const execute_stack_exec_stream = (body: ExecuteStackExecBody) =>
|
||||
execute_exec_stream({ type: "stack", body });
|
||||
|
||||
const execute_exec_stream = (request: ExecuteExecBody) =>
|
||||
execute_stream(`/terminal/execute/${request.type}`, request.body);
|
||||
|
||||
const execute_stream = (path: string, request: any) =>
|
||||
new Promise<AsyncIterable<string>>(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + path, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(
|
||||
new TransformStream<string, string>({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop()!; // last item may be incomplete
|
||||
for (const line of parts) controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail) controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
} as Transformer<string, string> & { tail: string })
|
||||
);
|
||||
res(stream);
|
||||
} else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
connect_terminal,
|
||||
execute_terminal,
|
||||
execute_terminal_stream,
|
||||
connect_exec,
|
||||
connect_container_exec,
|
||||
execute_container_exec,
|
||||
execute_container_exec_stream,
|
||||
connect_deployment_exec,
|
||||
execute_deployment_exec,
|
||||
execute_deployment_exec_stream,
|
||||
connect_stack_exec,
|
||||
execute_stack_exec,
|
||||
execute_stack_exec_stream,
|
||||
};
|
||||
};
|
||||
@@ -213,6 +213,48 @@ export type ResourceTarget =
|
||||
| { type: "Alerter", id: string }
|
||||
| { type: "ResourceSync", id: string };
|
||||
|
||||
/** Types of maintenance schedules */
|
||||
export enum MaintenanceScheduleType {
|
||||
/** Daily at the specified time */
|
||||
Daily = "Daily",
|
||||
/** Weekly on the specified day and time */
|
||||
Weekly = "Weekly",
|
||||
/** One-time maintenance on a specific date and time */
|
||||
OneTime = "OneTime",
|
||||
}
|
||||
|
||||
/** Represents a scheduled maintenance window */
|
||||
export interface MaintenanceWindow {
|
||||
/** Name for the maintenance window (required) */
|
||||
name: string;
|
||||
/** Description of what maintenance is performed (optional) */
|
||||
description?: string;
|
||||
/**
|
||||
* The type of maintenance schedule:
|
||||
* - Daily (default)
|
||||
* - Weekly
|
||||
* - OneTime
|
||||
*/
|
||||
schedule_type?: MaintenanceScheduleType;
|
||||
/** For Weekly schedules: Specify the day of the week (Monday, Tuesday, etc.) */
|
||||
day_of_week?: string;
|
||||
/** For OneTime window: ISO 8601 date format (YYYY-MM-DD) */
|
||||
date?: string;
|
||||
/** Start hour in 24-hour format (0-23) (optional, defaults to 0) */
|
||||
hour?: number;
|
||||
/** Start minute (0-59) (optional, defaults to 0) */
|
||||
minute?: number;
|
||||
/** Duration of the maintenance window in minutes (required) */
|
||||
duration_minutes: number;
|
||||
/**
|
||||
* Timezone for maintenance window specificiation.
|
||||
* If empty, will use Core timezone.
|
||||
*/
|
||||
timezone?: string;
|
||||
/** Whether this maintenance window is currently enabled */
|
||||
enabled: boolean;
|
||||
}
|
||||
|
||||
export interface AlerterConfig {
|
||||
/** Whether the alerter is enabled */
|
||||
enabled?: boolean;
|
||||
@@ -234,6 +276,8 @@ export interface AlerterConfig {
|
||||
resources?: ResourceTarget[];
|
||||
/** DON'T send alerts on these resources. */
|
||||
except_resources?: ResourceTarget[];
|
||||
/** Scheduled maintenance windows during which alerts will be suppressed. */
|
||||
maintenance_windows?: MaintenanceWindow[];
|
||||
}
|
||||
|
||||
export type Alerter = Resource<AlerterConfig, undefined>;
|
||||
@@ -521,6 +565,8 @@ export interface BuildConfig {
|
||||
image_tag?: string;
|
||||
/** Configure quick links that are displayed in the resource header */
|
||||
links?: string[];
|
||||
/** Choose a Komodo Repo (Resource) to source the build files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -654,6 +700,10 @@ export interface BuildListItemInfo {
|
||||
builder_id: string;
|
||||
/** Whether build is in files on host mode. */
|
||||
files_on_host: boolean;
|
||||
/** Whether build has UI defined dockerfile contents */
|
||||
dockerfile_contents: boolean;
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain */
|
||||
git_provider: string;
|
||||
/** The repo used as the source of the build */
|
||||
@@ -1592,7 +1642,13 @@ export interface RepoConfig {
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Explicitly specify the folder to clone the repo in. */
|
||||
/**
|
||||
* Explicitly specify the folder to clone the repo in.
|
||||
* - If absolute (has leading '/')
|
||||
* - Used directly as the path
|
||||
* - If relative
|
||||
* - Taken relative to Periphery `repo_dir` (ie `${root_directory}/repos`)
|
||||
*/
|
||||
path?: string;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
@@ -1659,6 +1715,8 @@ export type GetResourceSyncActionStateResponse = ResourceSyncActionState;
|
||||
|
||||
/** The sync configuration. */
|
||||
export interface ResourceSyncConfig {
|
||||
/** Choose a Komodo Repo (Resource) to source the sync files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -1907,6 +1965,8 @@ export interface ServerConfig {
|
||||
disk_warning: number;
|
||||
/** The percentage threshhold which triggers CRITICAL state for DISK. */
|
||||
disk_critical: number;
|
||||
/** Scheduled maintenance windows during which alerts will be suppressed. */
|
||||
maintenance_windows?: MaintenanceWindow[];
|
||||
}
|
||||
|
||||
export type Server = Resource<ServerConfig, undefined>;
|
||||
@@ -1973,6 +2033,8 @@ export interface StackConfig {
|
||||
destroy_before_deploy?: boolean;
|
||||
/** Whether to skip secret interpolation into the stack environment variables. */
|
||||
skip_secret_interp?: boolean;
|
||||
/** Choose a Komodo Repo (Resource) to source the compose files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -1989,12 +2051,17 @@ export interface StackConfig {
|
||||
* for the configured git provider.
|
||||
*/
|
||||
git_account?: string;
|
||||
/** The Github repo used as the source of the build. */
|
||||
/**
|
||||
* The repo used as the source of the build.
|
||||
* {namespace}/{repo_name}
|
||||
*/
|
||||
repo?: string;
|
||||
/** The branch of the repo. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Optionally set a specific clone path */
|
||||
clone_path?: string;
|
||||
/**
|
||||
* By default, the Stack will `git pull` the repo after it is first cloned.
|
||||
* If this option is enabled, the repo folder will be deleted and recloned instead.
|
||||
@@ -3513,6 +3580,8 @@ export interface ResourceSyncListItemInfo {
|
||||
managed: boolean;
|
||||
/** Resource paths to the files. */
|
||||
resource_path: string[];
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain. */
|
||||
git_provider: string;
|
||||
/** The Github repo used as the source of the sync resources */
|
||||
@@ -3580,6 +3649,8 @@ export interface ServerListItemInfo {
|
||||
region: string;
|
||||
/** Address of the server. */
|
||||
address: string;
|
||||
/** The Komodo Periphery version of the server. */
|
||||
version: string;
|
||||
/** Whether server is configured to send unreachable alerts. */
|
||||
send_unreachable_alerts: boolean;
|
||||
/** Whether server is configured to send cpu alerts. */
|
||||
@@ -3651,6 +3722,8 @@ export interface StackListItemInfo {
|
||||
files_on_host: boolean;
|
||||
/** Whether stack has file contents defined. */
|
||||
file_contents: boolean;
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain */
|
||||
git_provider: string;
|
||||
/** The configured repo */
|
||||
@@ -4237,7 +4310,7 @@ export interface ConnectContainerExecQuery {
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
|
||||
@@ -4249,7 +4322,7 @@ export interface ConnectContainerExecQuery {
|
||||
export interface ConnectDeploymentExecQuery {
|
||||
/** Deployment Id or name */
|
||||
deployment: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
|
||||
@@ -4263,7 +4336,7 @@ export interface ConnectStackExecQuery {
|
||||
stack: string;
|
||||
/** The service name to connect to */
|
||||
service: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
|
||||
@@ -5101,6 +5174,49 @@ export interface ExchangeForJwt {
|
||||
token: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteContainerExecBody {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteDeploymentExecBody {
|
||||
/** Deployment Id or name */
|
||||
deployment: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteStackExecBody {
|
||||
/** Stack Id or name */
|
||||
stack: string;
|
||||
/** The service name to connect to */
|
||||
service: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a terminal command on the given server.
|
||||
* TODO: Document calling.
|
||||
@@ -5366,6 +5482,8 @@ export interface GetCoreInfoResponse {
|
||||
github_webhook_owners: string[];
|
||||
/** Whether to disable websocket automatic reconnect. */
|
||||
disable_websocket_reconnect: boolean;
|
||||
/** TZ identifier Core is using, if manually set. */
|
||||
timezone: string;
|
||||
}
|
||||
|
||||
/** Get a specific deployment by name or id. Response: [Deployment]. */
|
||||
@@ -7750,6 +7868,17 @@ export type AuthRequest =
|
||||
| { type: "ExchangeForJwt", params: ExchangeForJwt }
|
||||
| { type: "GetUser", params: GetUser };
|
||||
|
||||
/** Days of the week */
|
||||
export enum DayOfWeek {
|
||||
Monday = "Monday",
|
||||
Tuesday = "Tuesday",
|
||||
Wednesday = "Wednesday",
|
||||
Thursday = "Thursday",
|
||||
Friday = "Friday",
|
||||
Saturday = "Saturday",
|
||||
Sunday = "Sunday",
|
||||
}
|
||||
|
||||
export type ExecuteRequest =
|
||||
| { type: "StartContainer", params: StartContainer }
|
||||
| { type: "RestartContainer", params: RestartContainer }
|
||||
@@ -7812,6 +7941,92 @@ export type ExecuteRequest =
|
||||
| { type: "TestAlerter", params: TestAlerter }
|
||||
| { type: "RunSync", params: RunSync };
|
||||
|
||||
/**
|
||||
* One representative IANA zone for each distinct base UTC offset in the tz database.
|
||||
* https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
*
|
||||
* The `serde`/`strum` renames ensure the canonical identifier is used
|
||||
* when serializing or parsing from a string such as `"Etc/UTC"`.
|
||||
*/
|
||||
export enum IanaTimezone {
|
||||
/** UTC−12:00 */
|
||||
EtcGmtMinus12 = "Etc/GMT+12",
|
||||
/** UTC−11:00 */
|
||||
PacificPagoPago = "Pacific/Pago_Pago",
|
||||
/** UTC−10:00 */
|
||||
PacificHonolulu = "Pacific/Honolulu",
|
||||
/** UTC−09:30 */
|
||||
PacificMarquesas = "Pacific/Marquesas",
|
||||
/** UTC−09:00 */
|
||||
AmericaAnchorage = "America/Anchorage",
|
||||
/** UTC−08:00 */
|
||||
AmericaLosAngeles = "America/Los_Angeles",
|
||||
/** UTC−07:00 */
|
||||
AmericaDenver = "America/Denver",
|
||||
/** UTC−06:00 */
|
||||
AmericaChicago = "America/Chicago",
|
||||
/** UTC−05:00 */
|
||||
AmericaNewYork = "America/New_York",
|
||||
/** UTC−04:00 */
|
||||
AmericaHalifax = "America/Halifax",
|
||||
/** UTC−03:30 */
|
||||
AmericaStJohns = "America/St_Johns",
|
||||
/** UTC−03:00 */
|
||||
AmericaSaoPaulo = "America/Sao_Paulo",
|
||||
/** UTC−02:00 */
|
||||
AmericaNoronha = "America/Noronha",
|
||||
/** UTC−01:00 */
|
||||
AtlanticAzores = "Atlantic/Azores",
|
||||
/** UTC±00:00 */
|
||||
EtcUtc = "Etc/UTC",
|
||||
/** UTC+01:00 */
|
||||
EuropeBerlin = "Europe/Berlin",
|
||||
/** UTC+02:00 */
|
||||
EuropeBucharest = "Europe/Bucharest",
|
||||
/** UTC+03:00 */
|
||||
EuropeMoscow = "Europe/Moscow",
|
||||
/** UTC+03:30 */
|
||||
AsiaTehran = "Asia/Tehran",
|
||||
/** UTC+04:00 */
|
||||
AsiaDubai = "Asia/Dubai",
|
||||
/** UTC+04:30 */
|
||||
AsiaKabul = "Asia/Kabul",
|
||||
/** UTC+05:00 */
|
||||
AsiaKarachi = "Asia/Karachi",
|
||||
/** UTC+05:30 */
|
||||
AsiaKolkata = "Asia/Kolkata",
|
||||
/** UTC+05:45 */
|
||||
AsiaKathmandu = "Asia/Kathmandu",
|
||||
/** UTC+06:00 */
|
||||
AsiaDhaka = "Asia/Dhaka",
|
||||
/** UTC+06:30 */
|
||||
AsiaYangon = "Asia/Yangon",
|
||||
/** UTC+07:00 */
|
||||
AsiaBangkok = "Asia/Bangkok",
|
||||
/** UTC+08:00 */
|
||||
AsiaShanghai = "Asia/Shanghai",
|
||||
/** UTC+08:45 */
|
||||
AustraliaEucla = "Australia/Eucla",
|
||||
/** UTC+09:00 */
|
||||
AsiaTokyo = "Asia/Tokyo",
|
||||
/** UTC+09:30 */
|
||||
AustraliaAdelaide = "Australia/Adelaide",
|
||||
/** UTC+10:00 */
|
||||
AustraliaSydney = "Australia/Sydney",
|
||||
/** UTC+10:30 */
|
||||
AustraliaLordHowe = "Australia/Lord_Howe",
|
||||
/** UTC+11:00 */
|
||||
PacificPortMoresby = "Pacific/Port_Moresby",
|
||||
/** UTC+12:00 */
|
||||
PacificAuckland = "Pacific/Auckland",
|
||||
/** UTC+12:45 */
|
||||
PacificChatham = "Pacific/Chatham",
|
||||
/** UTC+13:00 */
|
||||
PacificTongatapu = "Pacific/Tongatapu",
|
||||
/** UTC+14:00 */
|
||||
PacificKiritimati = "Pacific/Kiritimati",
|
||||
}
|
||||
|
||||
/** Configuration for the registry to push the built image to. */
|
||||
export type ImageRegistryLegacy1_14 =
|
||||
/** Don't push the image to any registry */
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use komodo_client::entities::{FileContents, update::Log};
|
||||
use komodo_client::entities::{
|
||||
FileContents, repo::Repo, update::Log,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -7,6 +9,8 @@ use serde::{Deserialize, Serialize};
|
||||
#[error(serror::Error)]
|
||||
pub struct Build {
|
||||
pub build: komodo_client::entities::build::Build,
|
||||
/// Send the linked repo if it exists.
|
||||
pub repo: Option<Repo>,
|
||||
/// Override registry token with one sent from core.
|
||||
pub registry_token: Option<String>,
|
||||
/// Propogate any secret replacers from core interpolation.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use komodo_client::entities::{
|
||||
FileContents, SearchCombinator,
|
||||
repo::Repo,
|
||||
stack::{ComposeProject, Stack, StackServiceNames},
|
||||
update::Log,
|
||||
};
|
||||
@@ -119,6 +120,8 @@ pub struct WriteComposeContentsToHost {
|
||||
pub struct WriteCommitComposeContents {
|
||||
/// The stack to write to.
|
||||
pub stack: Stack,
|
||||
/// Optional linked repo.
|
||||
pub repo: Option<Repo>,
|
||||
/// The username of user which committed the file.
|
||||
pub username: Option<String>,
|
||||
/// Relative to the stack folder + run directory.
|
||||
@@ -143,9 +146,11 @@ pub struct ComposePull {
|
||||
/// If empty, will pull all services.
|
||||
#[serde(default)]
|
||||
pub services: Vec<String>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local registries.
|
||||
pub git_token: Option<String>,
|
||||
/// The linked repo, if it exists.
|
||||
pub repo: Option<Repo>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local git providers.
|
||||
pub git_token: Option<String>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local registry providers.
|
||||
pub registry_token: Option<String>,
|
||||
}
|
||||
|
||||
@@ -168,6 +173,8 @@ pub struct ComposeUp {
|
||||
/// If empty, will deploy all services.
|
||||
#[serde(default)]
|
||||
pub services: Vec<String>,
|
||||
/// The linked repo, if it exists.
|
||||
pub repo: Option<Repo>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local registries.
|
||||
pub git_token: Option<String>,
|
||||
/// If provided, use it to login in. Otherwise check periphery local git providers.
|
||||
|
||||
@@ -82,8 +82,13 @@ pub struct PullOrCloneRepo {
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct RepoActionResponse {
|
||||
/// Response logs
|
||||
pub logs: Vec<Log>,
|
||||
/// Absolute path to the repo root on the host.
|
||||
pub path: PathBuf,
|
||||
/// Latest short commit hash, if it could be retrieved
|
||||
pub commit_hash: Option<String>,
|
||||
/// Latest commit message, if it could be retrieved
|
||||
pub commit_message: Option<String>,
|
||||
/// Don't need to send this one to core, its only needed for calls local to single periphery
|
||||
#[serde(skip_serializing)]
|
||||
|
||||
@@ -76,6 +76,7 @@ pub struct ConnectTerminalQuery {
|
||||
|
||||
//
|
||||
|
||||
/// Note: The `terminal` must already exist, created by [CreateTerminal].
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteTerminalBody {
|
||||
/// Specify the terminal to execute the command on.
|
||||
@@ -86,8 +87,6 @@ pub struct ExecuteTerminalBody {
|
||||
|
||||
//
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectContainerExecQuery {
|
||||
/// Use [CreateTerminalAuthToken] to create a single-use
|
||||
@@ -101,6 +100,20 @@ pub struct ConnectContainerExecQuery {
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteContainerExecBody {
|
||||
/// The name of the container to execute command in.
|
||||
pub container: String,
|
||||
/// The shell to start inside container.
|
||||
/// Default: `sh`
|
||||
#[serde(default = "default_container_shell")]
|
||||
pub shell: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
fn default_container_shell() -> String {
|
||||
String::from("sh")
|
||||
}
|
||||
|
||||
@@ -2,17 +2,12 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::terminal::TerminalStreamResponse;
|
||||
use reqwest::RequestBuilder;
|
||||
use rustls::{ClientConfig, client::danger::ServerCertVerifier};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio_tungstenite::{Connector, MaybeTlsStream, WebSocketStream};
|
||||
|
||||
use crate::{
|
||||
PeripheryClient,
|
||||
api::terminal::{
|
||||
ConnectContainerExecQuery, ConnectTerminalQuery,
|
||||
CreateTerminalAuthToken, ExecuteTerminalBody,
|
||||
},
|
||||
};
|
||||
use crate::{PeripheryClient, api::terminal::*};
|
||||
|
||||
impl PeripheryClient {
|
||||
/// Handles ws connect and login.
|
||||
@@ -44,6 +39,35 @@ impl PeripheryClient {
|
||||
connect_websocket(&url).await
|
||||
}
|
||||
|
||||
/// Executes command on specified terminal,
|
||||
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
|
||||
/// sentinal value as the expected final line of the stream.
|
||||
///
|
||||
/// Example final line:
|
||||
/// ```
|
||||
/// __KOMODO_EXIT_CODE:0
|
||||
/// ```
|
||||
///
|
||||
/// This means the command exited with code 0 (success).
|
||||
///
|
||||
/// If this value is NOT the final item before stream closes, it means
|
||||
/// the terminal exited mid command, before giving status. Example: running `exit`.
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn execute_terminal(
|
||||
&self,
|
||||
terminal: String,
|
||||
command: String,
|
||||
) -> anyhow::Result<TerminalStreamResponse> {
|
||||
tracing::trace!(
|
||||
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
|
||||
);
|
||||
let req = crate::periphery_http_client()
|
||||
.post(format!("{}/terminal/execute", self.address))
|
||||
.json(&ExecuteTerminalBody { terminal, command })
|
||||
.header("authorization", &self.passkey);
|
||||
terminal_stream_response(req).await
|
||||
}
|
||||
|
||||
/// Handles ws connect and login.
|
||||
/// Does not handle reconnect.
|
||||
pub async fn connect_container_exec(
|
||||
@@ -75,7 +99,7 @@ impl PeripheryClient {
|
||||
connect_websocket(&url).await
|
||||
}
|
||||
|
||||
/// Executes command on specified terminal,
|
||||
/// Executes command on specified container,
|
||||
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
|
||||
/// sentinal value as the expected final line of the stream.
|
||||
///
|
||||
@@ -87,42 +111,26 @@ impl PeripheryClient {
|
||||
/// This means the command exited with code 0 (success).
|
||||
///
|
||||
/// If this value is NOT the final item before stream closes, it means
|
||||
/// the terminal exited mid command, before giving status. Example: running `exit`.
|
||||
/// the container shell exited mid command, before giving status. Example: running `exit`.
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn execute_terminal(
|
||||
pub async fn execute_container_exec(
|
||||
&self,
|
||||
terminal: String,
|
||||
container: String,
|
||||
shell: String,
|
||||
command: String,
|
||||
) -> anyhow::Result<TerminalStreamResponse> {
|
||||
tracing::trace!(
|
||||
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
|
||||
"sending request | type: ExecuteContainerExec | container: {container} | shell: {shell} | command: {command}",
|
||||
);
|
||||
let req = crate::periphery_http_client()
|
||||
.post(format!("{}/terminal/execute", self.address))
|
||||
.json(&ExecuteTerminalBody { terminal, command })
|
||||
.post(format!("{}/terminal/execute/container", self.address))
|
||||
.json(&ExecuteContainerExecBody {
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
})
|
||||
.header("authorization", &self.passkey);
|
||||
let res =
|
||||
req.send().await.context("Failed at request to periphery")?;
|
||||
let status = res.status();
|
||||
tracing::debug!(
|
||||
"got response | type: ExecuteTerminal | {status} | response: {res:?}",
|
||||
);
|
||||
if status.is_success() {
|
||||
Ok(TerminalStreamResponse(res))
|
||||
} else {
|
||||
tracing::debug!("response is non-200");
|
||||
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("Failed to convert response to text")?;
|
||||
|
||||
tracing::debug!("got response text, deserializing error");
|
||||
|
||||
let error = serror::deserialize_error(text).context(status);
|
||||
|
||||
Err(error)
|
||||
}
|
||||
terminal_stream_response(req).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,16 +152,47 @@ async fn connect_websocket(
|
||||
))),
|
||||
)
|
||||
.await
|
||||
.context("failed to connect to websocket")?
|
||||
.with_context(|| {
|
||||
format!("failed to connect to websocket | url: {url}")
|
||||
})?
|
||||
} else {
|
||||
tokio_tungstenite::connect_async(url)
|
||||
.await
|
||||
.context("failed to connect to websocket")?
|
||||
.with_context(|| {
|
||||
format!("failed to connect to websocket | url: {url}")
|
||||
})?
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
async fn terminal_stream_response(
|
||||
req: RequestBuilder,
|
||||
) -> anyhow::Result<TerminalStreamResponse> {
|
||||
let res =
|
||||
req.send().await.context("Failed at request to periphery")?;
|
||||
let status = res.status();
|
||||
tracing::debug!(
|
||||
"got response | type: ExecuteTerminal | {status} | response: {res:?}",
|
||||
);
|
||||
if status.is_success() {
|
||||
Ok(TerminalStreamResponse(res))
|
||||
} else {
|
||||
tracing::debug!("response is non-200");
|
||||
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("Failed to convert response to text")?;
|
||||
|
||||
tracing::debug!("got response text, deserializing error");
|
||||
|
||||
let error = serror::deserialize_error(text).context(status);
|
||||
|
||||
Err(error)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct InsecureVerifier;
|
||||
|
||||
|
||||
@@ -43,14 +43,13 @@ KOMODO_DISABLE_CONFIRM_DIALOG=false
|
||||
|
||||
## Rate Komodo polls your servers for
|
||||
## status / container status / system stats / alerting.
|
||||
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min.
|
||||
## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min, 15-min
|
||||
## Default: 15-sec
|
||||
KOMODO_MONITORING_INTERVAL="15-sec"
|
||||
## Rate Komodo polls Resources for updates,
|
||||
## like outdated commit hash.
|
||||
## Options: 1-min, 5-min, 15-min, 30-min, 1-hr.
|
||||
## Default: 5-min
|
||||
KOMODO_RESOURCE_POLL_INTERVAL="5-min"
|
||||
## Interval at which to poll Resources for any updates / automated actions.
|
||||
## Options: 15-min, 1-hr, 2-hr, 6-hr, 12-hr, 1-day
|
||||
## Default: 1-hr
|
||||
KOMODO_RESOURCE_POLL_INTERVAL="1-hr"
|
||||
|
||||
## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE
|
||||
KOMODO_WEBHOOK_SECRET=a_random_secret
|
||||
|
||||
@@ -244,6 +244,23 @@ github_oauth.id = ""
|
||||
## Required if github_oauth is enabled.
|
||||
github_oauth.secret = ""
|
||||
|
||||
##################
|
||||
# POLL INTERVALS #
|
||||
##################
|
||||
|
||||
## Controls the rate at which servers are polled for health, system stats, and container status.
|
||||
## This affects network usage, and the size of the stats stored in mongo.
|
||||
## Env: KOMODO_MONITORING_INTERVAL
|
||||
## Options: 1-sec, 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min
|
||||
## Default: 15-sec
|
||||
monitoring_interval = "15-sec"
|
||||
|
||||
## Interval at which to poll Resources for any updates / automated actions.
|
||||
## Env: KOMODO_RESOURCE_POLL_INTERVAL
|
||||
## Options: `15-min`, `1-hr`, `2-hr`, `6-hr`, `12-hr`, `1-day`
|
||||
## Default: 1-hr
|
||||
resource_poll_interval = "1-hr"
|
||||
|
||||
############
|
||||
# Security #
|
||||
############
|
||||
@@ -392,23 +409,6 @@ keep_stats_for_days = 14
|
||||
## Default: 14
|
||||
keep_alerts_for_days = 14
|
||||
|
||||
##################
|
||||
# POLL INTERVALS #
|
||||
##################
|
||||
|
||||
## Controls the rate at which servers are polled for health, system stats, and container status.
|
||||
## This affects network usage, and the size of the stats stored in mongo.
|
||||
## Env: KOMODO_MONITORING_INTERVAL
|
||||
## Options: 1-sec, 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min
|
||||
## Default: 15-sec
|
||||
monitoring_interval = "15-sec"
|
||||
|
||||
## Interval at which to poll Resources for any updates / automated actions.
|
||||
## Env: KOMODO_RESOURCE_POLL_INTERVAL
|
||||
## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`.
|
||||
## Default: 5-min
|
||||
resource_poll_interval = "5-min"
|
||||
|
||||
###################
|
||||
# CLOUD PROVIDERS #
|
||||
###################
|
||||
|
||||
@@ -40,7 +40,9 @@ Some features are additionally gated behind a specific permission for that featu
|
||||
- If given on a `Stack` or `Deployment`, this allows container exec terminal (even without `Terminal` on `Server`).
|
||||
- **`Attach`**: User can "attach" *other resources* to the resource.
|
||||
- If given on a `Server`, allows users to attach `Stacks`, `Deployments`, `Repos`, and `Builders`.
|
||||
- If given on a `Builder`, allows users to attach `Builds`.
|
||||
- If given on a `Builder`, allows users to attach `Builds` and `Repos`.
|
||||
- If given on a `Build`, allows users to attach it to `Deployments`.
|
||||
- If given on a `Repo`, allows users to attach it to `Stacks`, `Builds`, and `Resource Syncs`.
|
||||
- **`Processes`**: User can retrieve the full running process list on the `Server`.
|
||||
|
||||
## Permissioning by Resource Type
|
||||
|
||||
@@ -2,6 +2,4 @@
|
||||
|
||||
Most version upgrades only require a redeployment of the Core container after pulling the latest version, and are fully backward compatible with the periphery clients, which may be updated later on as convenient. This is the default, and will be the case unless specifically mentioned in the [version release notes](https://github.com/moghtech/komodo/releases).
|
||||
|
||||
Some Core API upgrades may change behavior such as building / cloning, and require updating the Periphery binaries to match the Core version before this functionality can be restored. This will be specifically mentioned in the release notes.
|
||||
|
||||
Additionally, some Core API upgrades may include database schema changes, and require a database migration. This can be accomplished by using the [komodo migrator](https://github.com/moghtech/komodo/blob/main/bin/migrator/README.md) for the particular version upgrade before upgrading the Core API container.
|
||||
Some Core API upgrades may change behavior such as building / cloning, and require updating the Periphery binaries to match the Core version before this functionality can be restored. This will be specifically mentioned in the release notes.
|
||||
205
frontend/public/client/lib.d.ts
vendored
205
frontend/public/client/lib.d.ts
vendored
@@ -1,6 +1,8 @@
|
||||
import { AuthResponses, ExecuteResponses, ReadResponses, UserResponses, WriteResponses } from "./responses.js";
|
||||
import { AuthRequest, ConnectContainerExecQuery, ConnectDeploymentExecQuery, ConnectStackExecQuery, ConnectTerminalQuery, ExecuteRequest, ExecuteTerminalBody, ReadRequest, Update, UpdateListItem, UserRequest, WriteRequest } from "./types.js";
|
||||
import { ConnectExecQuery, ExecuteExecBody, TerminalCallbacks } from "./terminal.js";
|
||||
import { AuthRequest, ConnectTerminalQuery, ExecuteRequest, ExecuteTerminalBody, ReadRequest, Update, UpdateListItem, UserRequest, WriteRequest } from "./types.js";
|
||||
export * as Types from "./types.js";
|
||||
export type { ConnectExecQuery, ExecuteExecBody, TerminalCallbacks };
|
||||
export type InitOptions = {
|
||||
type: "jwt";
|
||||
params: {
|
||||
@@ -18,21 +20,10 @@ export declare class CancelToken {
|
||||
constructor();
|
||||
cancel(): void;
|
||||
}
|
||||
export type ContainerExecQuery = {
|
||||
type: "container";
|
||||
query: ConnectContainerExecQuery;
|
||||
} | {
|
||||
type: "deployment";
|
||||
query: ConnectDeploymentExecQuery;
|
||||
} | {
|
||||
type: "stack";
|
||||
query: ConnectStackExecQuery;
|
||||
};
|
||||
export type TerminalCallbacks = {
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
export type ClientState = {
|
||||
jwt: string | undefined;
|
||||
key: string | undefined;
|
||||
secret: string | undefined;
|
||||
};
|
||||
/** Initialize a new client for Komodo */
|
||||
export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
@@ -167,14 +158,24 @@ export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
query: ConnectTerminalQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment, Stack,
|
||||
* or any container on a Server. The permission used to allow the connection
|
||||
* depends on `query.type`.
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
connect_container_exec: ({ query: { type, query }, on_message, on_login, on_open, on_close, }: {
|
||||
query: ContainerExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
execute_terminal: (request: ExecuteTerminalBody, callbacks?: import("./terminal.js").ExecuteCallbacks) => Promise<void>;
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
@@ -200,14 +201,32 @@ export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
*/
|
||||
execute_terminal_stream: (request: ExecuteTerminalBody) => Promise<AsyncIterable<string>>;
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to container on a Server,
|
||||
* or associated with a Deployment or Stack.
|
||||
* Terminal permission on connecting resource required.
|
||||
*/
|
||||
connect_exec: ({ query: { type, query }, on_message, on_login, on_open, on_close, }: {
|
||||
query: ConnectExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Container on a Server.
|
||||
* Server Terminal permission required.
|
||||
*/
|
||||
connect_container_exec: ({ query, ...callbacks }: {
|
||||
query: import("./types.js").ConnectContainerExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* const stream = await komodo.execute_container_exec(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
@@ -217,8 +236,134 @@ export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal: (request: ExecuteTerminalBody, callbacks?: {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
}) => Promise<void>;
|
||||
execute_container_exec: (body: import("./types.js").ExecuteContainerExecBody, callbacks?: import("./terminal.js").ExecuteCallbacks) => Promise<void>;
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_container_exec_stream({
|
||||
* server: "my-server",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_container_exec_stream: (body: import("./types.js").ExecuteContainerExecBody) => Promise<AsyncIterable<string>>;
|
||||
/**
|
||||
* Subscribes to deployment container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment container.
|
||||
* Deployment Terminal permission required.
|
||||
*/
|
||||
connect_deployment_exec: ({ query, ...callbacks }: {
|
||||
query: import("./types.js").ConnectDeploymentExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec(
|
||||
* {
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec: (body: import("./types.js").ExecuteDeploymentExecBody, callbacks?: import("./terminal.js").ExecuteCallbacks) => Promise<void>;
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec_stream({
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec_stream: (body: import("./types.js").ExecuteDeploymentExecBody) => Promise<AsyncIterable<string>>;
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Stack service container.
|
||||
* Stack Terminal permission required.
|
||||
*/
|
||||
connect_stack_exec: ({ query, ...callbacks }: {
|
||||
query: import("./types.js").ConnectStackExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec(
|
||||
* {
|
||||
* stack: "my-stack",
|
||||
* service: "database"
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec: (body: import("./types.js").ExecuteStackExecBody, callbacks?: import("./terminal.js").ExecuteCallbacks) => Promise<void>;
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec_stream({
|
||||
* stack: "my-stack",
|
||||
* service: "service1",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec_stream: (body: import("./types.js").ExecuteStackExecBody) => Promise<AsyncIterable<string>>;
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { terminal_methods, } from "./terminal.js";
|
||||
import { UpdateStatus, } from "./types.js";
|
||||
export * as Types from "./types.js";
|
||||
export class CancelToken {
|
||||
@@ -172,168 +173,7 @@ export function KomodoClient(url, options) {
|
||||
}
|
||||
}
|
||||
};
|
||||
const connect_terminal = ({ query, on_message, on_login, on_open, on_close, }) => {
|
||||
const url_query = new URLSearchParams(query).toString();
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/terminal?" + url_query);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg = options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
}
|
||||
else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const connect_container_exec = ({ query: { type, query }, on_message, on_login, on_open, on_close, }) => {
|
||||
const url_query = new URLSearchParams(query).toString();
|
||||
const ws = new WebSocket(url.replace("http", "ws") + `/ws/${type}/terminal?` + url_query);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg = options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
}
|
||||
else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const execute_terminal_stream = (request) => new Promise(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + "/terminal/execute", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(new TransformStream({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop(); // last item may be incomplete
|
||||
for (const line of parts)
|
||||
controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail)
|
||||
controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
}));
|
||||
res(stream);
|
||||
}
|
||||
else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
const execute_terminal = async (request, callbacks) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
const { connect_terminal, execute_terminal, execute_terminal_stream, connect_exec, connect_container_exec, execute_container_exec, execute_container_exec_stream, connect_deployment_exec, execute_deployment_exec, execute_deployment_exec_stream, connect_stack_exec, execute_stack_exec, execute_stack_exec_stream, } = terminal_methods(url, state);
|
||||
return {
|
||||
/**
|
||||
* Call the `/auth` api.
|
||||
@@ -435,12 +275,24 @@ export function KomodoClient(url, options) {
|
||||
*/
|
||||
connect_terminal,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment, Stack,
|
||||
* or any container on a Server. The permission used to allow the connection
|
||||
* depends on `query.type`.
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
connect_container_exec,
|
||||
execute_terminal,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
@@ -466,14 +318,28 @@ export function KomodoClient(url, options) {
|
||||
*/
|
||||
execute_terminal_stream,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to container on a Server,
|
||||
* or associated with a Deployment or Stack.
|
||||
* Terminal permission on connecting resource required.
|
||||
*/
|
||||
connect_exec,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Container on a Server.
|
||||
* Server Terminal permission required.
|
||||
*/
|
||||
connect_container_exec,
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* const stream = await komodo.execute_container_exec(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
@@ -483,6 +349,131 @@ export function KomodoClient(url, options) {
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal,
|
||||
execute_container_exec,
|
||||
/**
|
||||
* Executes a command on a given container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_container_exec_stream({
|
||||
* server: "my-server",
|
||||
* container: "name",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_container_exec_stream,
|
||||
/**
|
||||
* Subscribes to deployment container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Deployment container.
|
||||
* Deployment Terminal permission required.
|
||||
*/
|
||||
connect_deployment_exec,
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec(
|
||||
* {
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec,
|
||||
/**
|
||||
* Executes a command on a given deployment container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_deployment_exec_stream({
|
||||
* deployment: "my-deployment",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_deployment_exec_stream,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs. Can connect to Stack service container.
|
||||
* Stack Terminal permission required.
|
||||
*/
|
||||
connect_stack_exec,
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec(
|
||||
* {
|
||||
* stack: "my-stack",
|
||||
* service: "database"
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec,
|
||||
/**
|
||||
* Executes a command on a given stack service container,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_stack_exec_stream({
|
||||
* stack: "my-stack",
|
||||
* service: "service1",
|
||||
* shell: "bash",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_stack_exec_stream,
|
||||
};
|
||||
}
|
||||
|
||||
57
frontend/public/client/terminal.d.ts
vendored
Normal file
57
frontend/public/client/terminal.d.ts
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
import { ClientState } from "./lib";
|
||||
import { ConnectContainerExecQuery, ConnectDeploymentExecQuery, ConnectStackExecQuery, ConnectTerminalQuery, ExecuteContainerExecBody, ExecuteDeploymentExecBody, ExecuteStackExecBody, ExecuteTerminalBody } from "./types";
|
||||
export type TerminalCallbacks = {
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
};
|
||||
export type ConnectExecQuery = {
|
||||
type: "container";
|
||||
query: ConnectContainerExecQuery;
|
||||
} | {
|
||||
type: "deployment";
|
||||
query: ConnectDeploymentExecQuery;
|
||||
} | {
|
||||
type: "stack";
|
||||
query: ConnectStackExecQuery;
|
||||
};
|
||||
export type ExecuteExecBody = {
|
||||
type: "container";
|
||||
body: ExecuteContainerExecBody;
|
||||
} | {
|
||||
type: "deployment";
|
||||
body: ExecuteDeploymentExecBody;
|
||||
} | {
|
||||
type: "stack";
|
||||
body: ExecuteStackExecBody;
|
||||
};
|
||||
export type ExecuteCallbacks = {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
};
|
||||
export declare const terminal_methods: (url: string, state: ClientState) => {
|
||||
connect_terminal: ({ query, on_message, on_login, on_open, on_close, }: {
|
||||
query: ConnectTerminalQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
execute_terminal: (request: ExecuteTerminalBody, callbacks?: ExecuteCallbacks) => Promise<void>;
|
||||
execute_terminal_stream: (request: ExecuteTerminalBody) => Promise<AsyncIterable<string>>;
|
||||
connect_exec: ({ query: { type, query }, on_message, on_login, on_open, on_close, }: {
|
||||
query: ConnectExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
connect_container_exec: ({ query, ...callbacks }: {
|
||||
query: ConnectContainerExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
execute_container_exec: (body: ExecuteContainerExecBody, callbacks?: ExecuteCallbacks) => Promise<void>;
|
||||
execute_container_exec_stream: (body: ExecuteContainerExecBody) => Promise<AsyncIterable<string>>;
|
||||
connect_deployment_exec: ({ query, ...callbacks }: {
|
||||
query: ConnectDeploymentExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
execute_deployment_exec: (body: ExecuteDeploymentExecBody, callbacks?: ExecuteCallbacks) => Promise<void>;
|
||||
execute_deployment_exec_stream: (body: ExecuteDeploymentExecBody) => Promise<AsyncIterable<string>>;
|
||||
connect_stack_exec: ({ query, ...callbacks }: {
|
||||
query: ConnectStackExecQuery;
|
||||
} & TerminalCallbacks) => WebSocket;
|
||||
execute_stack_exec: (body: ExecuteStackExecBody, callbacks?: ExecuteCallbacks) => Promise<void>;
|
||||
execute_stack_exec_stream: (body: ExecuteStackExecBody) => Promise<AsyncIterable<string>>;
|
||||
};
|
||||
204
frontend/public/client/terminal.js
Normal file
204
frontend/public/client/terminal.js
Normal file
@@ -0,0 +1,204 @@
|
||||
export const terminal_methods = (url, state) => {
|
||||
const connect_terminal = ({ query, on_message, on_login, on_open, on_close, }) => {
|
||||
const url_query = new URLSearchParams(query).toString();
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/terminal?" + url_query);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg = state.jwt
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: state.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: state.key,
|
||||
secret: state.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
}
|
||||
else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const execute_terminal = async (request, callbacks) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
const execute_terminal_stream = (request) => execute_stream("/terminal/execute", request);
|
||||
const connect_container_exec = ({ query, ...callbacks }) => connect_exec({ query: { type: "container", query }, ...callbacks });
|
||||
const connect_deployment_exec = ({ query, ...callbacks }) => connect_exec({ query: { type: "deployment", query }, ...callbacks });
|
||||
const connect_stack_exec = ({ query, ...callbacks }) => connect_exec({ query: { type: "stack", query }, ...callbacks });
|
||||
const connect_exec = ({ query: { type, query }, on_message, on_login, on_open, on_close, }) => {
|
||||
const url_query = new URLSearchParams(query).toString();
|
||||
const ws = new WebSocket(url.replace("http", "ws") + `/ws/${type}/terminal?` + url_query);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg = state.jwt
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: state.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: state.key,
|
||||
secret: state.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
}
|
||||
else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const execute_container_exec = (body, callbacks) => execute_exec({ type: "container", body }, callbacks);
|
||||
const execute_deployment_exec = (body, callbacks) => execute_exec({ type: "deployment", body }, callbacks);
|
||||
const execute_stack_exec = (body, callbacks) => execute_exec({ type: "stack", body }, callbacks);
|
||||
const execute_exec = async (request, callbacks) => {
|
||||
const stream = await execute_exec_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
const execute_container_exec_stream = (body) => execute_exec_stream({ type: "container", body });
|
||||
const execute_deployment_exec_stream = (body) => execute_exec_stream({ type: "deployment", body });
|
||||
const execute_stack_exec_stream = (body) => execute_exec_stream({ type: "stack", body });
|
||||
const execute_exec_stream = (request) => execute_stream(`/terminal/execute/${request.type}`, request.body);
|
||||
const execute_stream = (path, request) => new Promise(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + path, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(new TransformStream({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop(); // last item may be incomplete
|
||||
for (const line of parts)
|
||||
controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail)
|
||||
controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
}));
|
||||
res(stream);
|
||||
}
|
||||
else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
return {
|
||||
connect_terminal,
|
||||
execute_terminal,
|
||||
execute_terminal_stream,
|
||||
connect_exec,
|
||||
connect_container_exec,
|
||||
execute_container_exec,
|
||||
execute_container_exec_stream,
|
||||
connect_deployment_exec,
|
||||
execute_deployment_exec,
|
||||
execute_deployment_exec_stream,
|
||||
connect_stack_exec,
|
||||
execute_stack_exec,
|
||||
execute_stack_exec_stream,
|
||||
};
|
||||
};
|
||||
218
frontend/public/client/types.d.ts
vendored
218
frontend/public/client/types.d.ts
vendored
@@ -226,6 +226,46 @@ export type ResourceTarget = {
|
||||
type: "ResourceSync";
|
||||
id: string;
|
||||
};
|
||||
/** Types of maintenance schedules */
|
||||
export declare enum MaintenanceScheduleType {
|
||||
/** Daily at the specified time */
|
||||
Daily = "Daily",
|
||||
/** Weekly on the specified day and time */
|
||||
Weekly = "Weekly",
|
||||
/** One-time maintenance on a specific date and time */
|
||||
OneTime = "OneTime"
|
||||
}
|
||||
/** Represents a scheduled maintenance window */
|
||||
export interface MaintenanceWindow {
|
||||
/** Name for the maintenance window (required) */
|
||||
name: string;
|
||||
/** Description of what maintenance is performed (optional) */
|
||||
description?: string;
|
||||
/**
|
||||
* The type of maintenance schedule:
|
||||
* - Daily (default)
|
||||
* - Weekly
|
||||
* - OneTime
|
||||
*/
|
||||
schedule_type?: MaintenanceScheduleType;
|
||||
/** For Weekly schedules: Specify the day of the week (Monday, Tuesday, etc.) */
|
||||
day_of_week?: string;
|
||||
/** For OneTime window: ISO 8601 date format (YYYY-MM-DD) */
|
||||
date?: string;
|
||||
/** Start hour in 24-hour format (0-23) (optional, defaults to 0) */
|
||||
hour?: number;
|
||||
/** Start minute (0-59) (optional, defaults to 0) */
|
||||
minute?: number;
|
||||
/** Duration of the maintenance window in minutes (required) */
|
||||
duration_minutes: number;
|
||||
/**
|
||||
* Timezone for maintenance window specificiation.
|
||||
* If empty, will use Core timezone.
|
||||
*/
|
||||
timezone?: string;
|
||||
/** Whether this maintenance window is currently enabled */
|
||||
enabled: boolean;
|
||||
}
|
||||
export interface AlerterConfig {
|
||||
/** Whether the alerter is enabled */
|
||||
enabled?: boolean;
|
||||
@@ -247,6 +287,8 @@ export interface AlerterConfig {
|
||||
resources?: ResourceTarget[];
|
||||
/** DON'T send alerts on these resources. */
|
||||
except_resources?: ResourceTarget[];
|
||||
/** Scheduled maintenance windows during which alerts will be suppressed. */
|
||||
maintenance_windows?: MaintenanceWindow[];
|
||||
}
|
||||
export type Alerter = Resource<AlerterConfig, undefined>;
|
||||
export interface AlerterListItemInfo {
|
||||
@@ -522,6 +564,8 @@ export interface BuildConfig {
|
||||
image_tag?: string;
|
||||
/** Configure quick links that are displayed in the resource header */
|
||||
links?: string[];
|
||||
/** Choose a Komodo Repo (Resource) to source the build files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -651,6 +695,10 @@ export interface BuildListItemInfo {
|
||||
builder_id: string;
|
||||
/** Whether build is in files on host mode. */
|
||||
files_on_host: boolean;
|
||||
/** Whether build has UI defined dockerfile contents */
|
||||
dockerfile_contents: boolean;
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain */
|
||||
git_provider: string;
|
||||
/** The repo used as the source of the build */
|
||||
@@ -1713,7 +1761,13 @@ export interface RepoConfig {
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Explicitly specify the folder to clone the repo in. */
|
||||
/**
|
||||
* Explicitly specify the folder to clone the repo in.
|
||||
* - If absolute (has leading '/')
|
||||
* - Used directly as the path
|
||||
* - If relative
|
||||
* - Taken relative to Periphery `repo_dir` (ie `${root_directory}/repos`)
|
||||
*/
|
||||
path?: string;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
@@ -1774,6 +1828,8 @@ export interface ResourceSyncActionState {
|
||||
export type GetResourceSyncActionStateResponse = ResourceSyncActionState;
|
||||
/** The sync configuration. */
|
||||
export interface ResourceSyncConfig {
|
||||
/** Choose a Komodo Repo (Resource) to source the sync files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -2019,6 +2075,8 @@ export interface ServerConfig {
|
||||
disk_warning: number;
|
||||
/** The percentage threshhold which triggers CRITICAL state for DISK. */
|
||||
disk_critical: number;
|
||||
/** Scheduled maintenance windows during which alerts will be suppressed. */
|
||||
maintenance_windows?: MaintenanceWindow[];
|
||||
}
|
||||
export type Server = Resource<ServerConfig, undefined>;
|
||||
export type GetServerResponse = Server;
|
||||
@@ -2079,6 +2137,8 @@ export interface StackConfig {
|
||||
destroy_before_deploy?: boolean;
|
||||
/** Whether to skip secret interpolation into the stack environment variables. */
|
||||
skip_secret_interp?: boolean;
|
||||
/** Choose a Komodo Repo (Resource) to source the compose files. */
|
||||
linked_repo?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -2095,12 +2155,17 @@ export interface StackConfig {
|
||||
* for the configured git provider.
|
||||
*/
|
||||
git_account?: string;
|
||||
/** The Github repo used as the source of the build. */
|
||||
/**
|
||||
* The repo used as the source of the build.
|
||||
* {namespace}/{repo_name}
|
||||
*/
|
||||
repo?: string;
|
||||
/** The branch of the repo. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Optionally set a specific clone path */
|
||||
clone_path?: string;
|
||||
/**
|
||||
* By default, the Stack will `git pull` the repo after it is first cloned.
|
||||
* If this option is enabled, the repo folder will be deleted and recloned instead.
|
||||
@@ -3484,6 +3549,8 @@ export interface ResourceSyncListItemInfo {
|
||||
managed: boolean;
|
||||
/** Resource paths to the files. */
|
||||
resource_path: string[];
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain. */
|
||||
git_provider: string;
|
||||
/** The Github repo used as the source of the sync resources */
|
||||
@@ -3544,6 +3611,8 @@ export interface ServerListItemInfo {
|
||||
region: string;
|
||||
/** Address of the server. */
|
||||
address: string;
|
||||
/** The Komodo Periphery version of the server. */
|
||||
version: string;
|
||||
/** Whether server is configured to send unreachable alerts. */
|
||||
send_unreachable_alerts: boolean;
|
||||
/** Whether server is configured to send cpu alerts. */
|
||||
@@ -3608,6 +3677,8 @@ export interface StackListItemInfo {
|
||||
files_on_host: boolean;
|
||||
/** Whether stack has file contents defined. */
|
||||
file_contents: boolean;
|
||||
/** Linked repo, if one is attached. */
|
||||
linked_repo: string;
|
||||
/** The git provider domain */
|
||||
git_provider: string;
|
||||
/** The configured repo */
|
||||
@@ -4110,7 +4181,7 @@ export interface ConnectContainerExecQuery {
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
/**
|
||||
@@ -4121,7 +4192,7 @@ export interface ConnectContainerExecQuery {
|
||||
export interface ConnectDeploymentExecQuery {
|
||||
/** Deployment Id or name */
|
||||
deployment: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
/**
|
||||
@@ -4134,7 +4205,7 @@ export interface ConnectStackExecQuery {
|
||||
stack: string;
|
||||
/** The service name to connect to */
|
||||
service: string;
|
||||
/** The shell to connect to */
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
}
|
||||
/**
|
||||
@@ -4896,6 +4967,46 @@ export interface ExchangeForJwt {
|
||||
/** The 'exchange token' */
|
||||
token: string;
|
||||
}
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteContainerExecBody {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteDeploymentExecBody {
|
||||
/** Deployment Id or name */
|
||||
deployment: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
/**
|
||||
* Execute a command in the given containers shell.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteStackExecBody {
|
||||
/** Stack Id or name */
|
||||
stack: string;
|
||||
/** The service name to connect to */
|
||||
service: string;
|
||||
/** The shell to use (eg. `sh` or `bash`) */
|
||||
shell: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
/**
|
||||
* Execute a terminal command on the given server.
|
||||
* TODO: Document calling.
|
||||
@@ -5136,6 +5247,8 @@ export interface GetCoreInfoResponse {
|
||||
github_webhook_owners: string[];
|
||||
/** Whether to disable websocket automatic reconnect. */
|
||||
disable_websocket_reconnect: boolean;
|
||||
/** TZ identifier Core is using, if manually set. */
|
||||
timezone: string;
|
||||
}
|
||||
/** Get a specific deployment by name or id. Response: [Deployment]. */
|
||||
export interface GetDeployment {
|
||||
@@ -7298,6 +7411,16 @@ export type AuthRequest = {
|
||||
type: "GetUser";
|
||||
params: GetUser;
|
||||
};
|
||||
/** Days of the week */
|
||||
export declare enum DayOfWeek {
|
||||
Monday = "Monday",
|
||||
Tuesday = "Tuesday",
|
||||
Wednesday = "Wednesday",
|
||||
Thursday = "Thursday",
|
||||
Friday = "Friday",
|
||||
Saturday = "Saturday",
|
||||
Sunday = "Sunday"
|
||||
}
|
||||
export type ExecuteRequest = {
|
||||
type: "StartContainer";
|
||||
params: StartContainer;
|
||||
@@ -7479,6 +7602,91 @@ export type ExecuteRequest = {
|
||||
type: "RunSync";
|
||||
params: RunSync;
|
||||
};
|
||||
/**
|
||||
* One representative IANA zone for each distinct base UTC offset in the tz database.
|
||||
* https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
*
|
||||
* The `serde`/`strum` renames ensure the canonical identifier is used
|
||||
* when serializing or parsing from a string such as `"Etc/UTC"`.
|
||||
*/
|
||||
export declare enum IanaTimezone {
|
||||
/** UTC−12:00 */
|
||||
EtcGmtMinus12 = "Etc/GMT+12",
|
||||
/** UTC−11:00 */
|
||||
PacificPagoPago = "Pacific/Pago_Pago",
|
||||
/** UTC−10:00 */
|
||||
PacificHonolulu = "Pacific/Honolulu",
|
||||
/** UTC−09:30 */
|
||||
PacificMarquesas = "Pacific/Marquesas",
|
||||
/** UTC−09:00 */
|
||||
AmericaAnchorage = "America/Anchorage",
|
||||
/** UTC−08:00 */
|
||||
AmericaLosAngeles = "America/Los_Angeles",
|
||||
/** UTC−07:00 */
|
||||
AmericaDenver = "America/Denver",
|
||||
/** UTC−06:00 */
|
||||
AmericaChicago = "America/Chicago",
|
||||
/** UTC−05:00 */
|
||||
AmericaNewYork = "America/New_York",
|
||||
/** UTC−04:00 */
|
||||
AmericaHalifax = "America/Halifax",
|
||||
/** UTC−03:30 */
|
||||
AmericaStJohns = "America/St_Johns",
|
||||
/** UTC−03:00 */
|
||||
AmericaSaoPaulo = "America/Sao_Paulo",
|
||||
/** UTC−02:00 */
|
||||
AmericaNoronha = "America/Noronha",
|
||||
/** UTC−01:00 */
|
||||
AtlanticAzores = "Atlantic/Azores",
|
||||
/** UTC±00:00 */
|
||||
EtcUtc = "Etc/UTC",
|
||||
/** UTC+01:00 */
|
||||
EuropeBerlin = "Europe/Berlin",
|
||||
/** UTC+02:00 */
|
||||
EuropeBucharest = "Europe/Bucharest",
|
||||
/** UTC+03:00 */
|
||||
EuropeMoscow = "Europe/Moscow",
|
||||
/** UTC+03:30 */
|
||||
AsiaTehran = "Asia/Tehran",
|
||||
/** UTC+04:00 */
|
||||
AsiaDubai = "Asia/Dubai",
|
||||
/** UTC+04:30 */
|
||||
AsiaKabul = "Asia/Kabul",
|
||||
/** UTC+05:00 */
|
||||
AsiaKarachi = "Asia/Karachi",
|
||||
/** UTC+05:30 */
|
||||
AsiaKolkata = "Asia/Kolkata",
|
||||
/** UTC+05:45 */
|
||||
AsiaKathmandu = "Asia/Kathmandu",
|
||||
/** UTC+06:00 */
|
||||
AsiaDhaka = "Asia/Dhaka",
|
||||
/** UTC+06:30 */
|
||||
AsiaYangon = "Asia/Yangon",
|
||||
/** UTC+07:00 */
|
||||
AsiaBangkok = "Asia/Bangkok",
|
||||
/** UTC+08:00 */
|
||||
AsiaShanghai = "Asia/Shanghai",
|
||||
/** UTC+08:45 */
|
||||
AustraliaEucla = "Australia/Eucla",
|
||||
/** UTC+09:00 */
|
||||
AsiaTokyo = "Asia/Tokyo",
|
||||
/** UTC+09:30 */
|
||||
AustraliaAdelaide = "Australia/Adelaide",
|
||||
/** UTC+10:00 */
|
||||
AustraliaSydney = "Australia/Sydney",
|
||||
/** UTC+10:30 */
|
||||
AustraliaLordHowe = "Australia/Lord_Howe",
|
||||
/** UTC+11:00 */
|
||||
PacificPortMoresby = "Pacific/Port_Moresby",
|
||||
/** UTC+12:00 */
|
||||
PacificAuckland = "Pacific/Auckland",
|
||||
/** UTC+12:45 */
|
||||
PacificChatham = "Pacific/Chatham",
|
||||
/** UTC+13:00 */
|
||||
PacificTongatapu = "Pacific/Tongatapu",
|
||||
/** UTC+14:00 */
|
||||
PacificKiritimati = "Pacific/Kiritimati"
|
||||
}
|
||||
/** Configuration for the registry to push the built image to. */
|
||||
export type ImageRegistryLegacy1_14 =
|
||||
/** Don't push the image to any registry */
|
||||
|
||||
@@ -36,6 +36,16 @@ export var TagBehavior;
|
||||
/** Returns resources which have one or more of the tags */
|
||||
TagBehavior["Any"] = "Any";
|
||||
})(TagBehavior || (TagBehavior = {}));
|
||||
/** Types of maintenance schedules */
|
||||
export var MaintenanceScheduleType;
|
||||
(function (MaintenanceScheduleType) {
|
||||
/** Daily at the specified time */
|
||||
MaintenanceScheduleType["Daily"] = "Daily";
|
||||
/** Weekly on the specified day and time */
|
||||
MaintenanceScheduleType["Weekly"] = "Weekly";
|
||||
/** One-time maintenance on a specific date and time */
|
||||
MaintenanceScheduleType["OneTime"] = "OneTime";
|
||||
})(MaintenanceScheduleType || (MaintenanceScheduleType = {}));
|
||||
export var Operation;
|
||||
(function (Operation) {
|
||||
Operation["None"] = "None";
|
||||
@@ -519,6 +529,103 @@ export var SearchCombinator;
|
||||
SearchCombinator["Or"] = "Or";
|
||||
SearchCombinator["And"] = "And";
|
||||
})(SearchCombinator || (SearchCombinator = {}));
|
||||
/** Days of the week */
|
||||
export var DayOfWeek;
|
||||
(function (DayOfWeek) {
|
||||
DayOfWeek["Monday"] = "Monday";
|
||||
DayOfWeek["Tuesday"] = "Tuesday";
|
||||
DayOfWeek["Wednesday"] = "Wednesday";
|
||||
DayOfWeek["Thursday"] = "Thursday";
|
||||
DayOfWeek["Friday"] = "Friday";
|
||||
DayOfWeek["Saturday"] = "Saturday";
|
||||
DayOfWeek["Sunday"] = "Sunday";
|
||||
})(DayOfWeek || (DayOfWeek = {}));
|
||||
/**
|
||||
* One representative IANA zone for each distinct base UTC offset in the tz database.
|
||||
* https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
|
||||
*
|
||||
* The `serde`/`strum` renames ensure the canonical identifier is used
|
||||
* when serializing or parsing from a string such as `"Etc/UTC"`.
|
||||
*/
|
||||
export var IanaTimezone;
|
||||
(function (IanaTimezone) {
|
||||
/** UTC−12:00 */
|
||||
IanaTimezone["EtcGmtMinus12"] = "Etc/GMT+12";
|
||||
/** UTC−11:00 */
|
||||
IanaTimezone["PacificPagoPago"] = "Pacific/Pago_Pago";
|
||||
/** UTC−10:00 */
|
||||
IanaTimezone["PacificHonolulu"] = "Pacific/Honolulu";
|
||||
/** UTC−09:30 */
|
||||
IanaTimezone["PacificMarquesas"] = "Pacific/Marquesas";
|
||||
/** UTC−09:00 */
|
||||
IanaTimezone["AmericaAnchorage"] = "America/Anchorage";
|
||||
/** UTC−08:00 */
|
||||
IanaTimezone["AmericaLosAngeles"] = "America/Los_Angeles";
|
||||
/** UTC−07:00 */
|
||||
IanaTimezone["AmericaDenver"] = "America/Denver";
|
||||
/** UTC−06:00 */
|
||||
IanaTimezone["AmericaChicago"] = "America/Chicago";
|
||||
/** UTC−05:00 */
|
||||
IanaTimezone["AmericaNewYork"] = "America/New_York";
|
||||
/** UTC−04:00 */
|
||||
IanaTimezone["AmericaHalifax"] = "America/Halifax";
|
||||
/** UTC−03:30 */
|
||||
IanaTimezone["AmericaStJohns"] = "America/St_Johns";
|
||||
/** UTC−03:00 */
|
||||
IanaTimezone["AmericaSaoPaulo"] = "America/Sao_Paulo";
|
||||
/** UTC−02:00 */
|
||||
IanaTimezone["AmericaNoronha"] = "America/Noronha";
|
||||
/** UTC−01:00 */
|
||||
IanaTimezone["AtlanticAzores"] = "Atlantic/Azores";
|
||||
/** UTC±00:00 */
|
||||
IanaTimezone["EtcUtc"] = "Etc/UTC";
|
||||
/** UTC+01:00 */
|
||||
IanaTimezone["EuropeBerlin"] = "Europe/Berlin";
|
||||
/** UTC+02:00 */
|
||||
IanaTimezone["EuropeBucharest"] = "Europe/Bucharest";
|
||||
/** UTC+03:00 */
|
||||
IanaTimezone["EuropeMoscow"] = "Europe/Moscow";
|
||||
/** UTC+03:30 */
|
||||
IanaTimezone["AsiaTehran"] = "Asia/Tehran";
|
||||
/** UTC+04:00 */
|
||||
IanaTimezone["AsiaDubai"] = "Asia/Dubai";
|
||||
/** UTC+04:30 */
|
||||
IanaTimezone["AsiaKabul"] = "Asia/Kabul";
|
||||
/** UTC+05:00 */
|
||||
IanaTimezone["AsiaKarachi"] = "Asia/Karachi";
|
||||
/** UTC+05:30 */
|
||||
IanaTimezone["AsiaKolkata"] = "Asia/Kolkata";
|
||||
/** UTC+05:45 */
|
||||
IanaTimezone["AsiaKathmandu"] = "Asia/Kathmandu";
|
||||
/** UTC+06:00 */
|
||||
IanaTimezone["AsiaDhaka"] = "Asia/Dhaka";
|
||||
/** UTC+06:30 */
|
||||
IanaTimezone["AsiaYangon"] = "Asia/Yangon";
|
||||
/** UTC+07:00 */
|
||||
IanaTimezone["AsiaBangkok"] = "Asia/Bangkok";
|
||||
/** UTC+08:00 */
|
||||
IanaTimezone["AsiaShanghai"] = "Asia/Shanghai";
|
||||
/** UTC+08:45 */
|
||||
IanaTimezone["AustraliaEucla"] = "Australia/Eucla";
|
||||
/** UTC+09:00 */
|
||||
IanaTimezone["AsiaTokyo"] = "Asia/Tokyo";
|
||||
/** UTC+09:30 */
|
||||
IanaTimezone["AustraliaAdelaide"] = "Australia/Adelaide";
|
||||
/** UTC+10:00 */
|
||||
IanaTimezone["AustraliaSydney"] = "Australia/Sydney";
|
||||
/** UTC+10:30 */
|
||||
IanaTimezone["AustraliaLordHowe"] = "Australia/Lord_Howe";
|
||||
/** UTC+11:00 */
|
||||
IanaTimezone["PacificPortMoresby"] = "Pacific/Port_Moresby";
|
||||
/** UTC+12:00 */
|
||||
IanaTimezone["PacificAuckland"] = "Pacific/Auckland";
|
||||
/** UTC+12:45 */
|
||||
IanaTimezone["PacificChatham"] = "Pacific/Chatham";
|
||||
/** UTC+13:00 */
|
||||
IanaTimezone["PacificTongatapu"] = "Pacific/Tongatapu";
|
||||
/** UTC+14:00 */
|
||||
IanaTimezone["PacificKiritimati"] = "Pacific/Kiritimati";
|
||||
})(IanaTimezone || (IanaTimezone = {}));
|
||||
/** The specific types of permission that a User or UserGroup can have on a resource. */
|
||||
export var SpecificPermission;
|
||||
(function (SpecificPermission) {
|
||||
|
||||
10
frontend/public/index.d.ts
vendored
10
frontend/public/index.d.ts
vendored
@@ -1,4 +1,4 @@
|
||||
import { KomodoClient, Types as KomodoTypes } from "./client/lib.js";
|
||||
import { KomodoClient as Client, Types as KomodoTypes } from "./client/lib.js";
|
||||
import "./deno.d.ts";
|
||||
|
||||
declare global {
|
||||
@@ -753,7 +753,11 @@ declare global {
|
||||
}
|
||||
|
||||
/** Pre initialized Komodo client */
|
||||
var komodo: ReturnType<typeof KomodoClient>;
|
||||
var komodo: ReturnType<typeof Client>;
|
||||
/** KomodoClient initializer */
|
||||
var KomodoClient: typeof Client;
|
||||
/** All Komodo Types */
|
||||
export import Types = KomodoTypes;
|
||||
/** YAML parsing utilities */
|
||||
var YAML: {
|
||||
/**
|
||||
@@ -918,6 +922,4 @@ declare global {
|
||||
*/
|
||||
parseCargoToml: (cargoToml: string) => CargoToml;
|
||||
};
|
||||
/** All Komodo Types */
|
||||
export import Types = KomodoTypes;
|
||||
}
|
||||
|
||||
58
frontend/src/components/config/linked_repo.tsx
Normal file
58
frontend/src/components/config/linked_repo.tsx
Normal file
@@ -0,0 +1,58 @@
|
||||
import { ResourceLink, ResourceSelector } from "@components/resources/common";
|
||||
import { ConfigItem } from "./util";
|
||||
|
||||
export const LinkedRepoConfig = ({
|
||||
linked_repo,
|
||||
repo_linked,
|
||||
set,
|
||||
disabled,
|
||||
}: {
|
||||
linked_repo: string | undefined;
|
||||
repo_linked: boolean;
|
||||
set: (update: {
|
||||
linked_repo: string;
|
||||
// Set other props back to default.
|
||||
git_provider: string;
|
||||
git_account: string;
|
||||
git_https: boolean;
|
||||
repo: string;
|
||||
branch: string;
|
||||
commit: string;
|
||||
}) => void;
|
||||
disabled: boolean;
|
||||
}) => {
|
||||
return (
|
||||
<ConfigItem
|
||||
label={
|
||||
linked_repo ? (
|
||||
<div className="flex gap-3 text-lg font-bold">
|
||||
Repo:
|
||||
<ResourceLink type="Repo" id={linked_repo} />
|
||||
</div>
|
||||
) : (
|
||||
"Select Repo"
|
||||
)
|
||||
}
|
||||
description={`Select an existing Repo to attach${!repo_linked ? ", or configure the repo below" : ""}.`}
|
||||
>
|
||||
<ResourceSelector
|
||||
type="Repo"
|
||||
selected={linked_repo}
|
||||
onSelect={(linked_repo) =>
|
||||
set({
|
||||
linked_repo,
|
||||
// Set other props back to default.
|
||||
git_provider: "github.com",
|
||||
git_account: "",
|
||||
git_https: true,
|
||||
repo: linked_repo ? "" : "namespace/repo",
|
||||
branch: "main",
|
||||
commit: "",
|
||||
})
|
||||
}
|
||||
disabled={disabled}
|
||||
align="start"
|
||||
/>
|
||||
</ConfigItem>
|
||||
);
|
||||
};
|
||||
511
frontend/src/components/config/maintenance.tsx
Normal file
511
frontend/src/components/config/maintenance.tsx
Normal file
@@ -0,0 +1,511 @@
|
||||
import { Button } from "@ui/button";
|
||||
import { Input } from "@ui/input";
|
||||
import { Switch } from "@ui/switch";
|
||||
import {
|
||||
Select,
|
||||
SelectContent,
|
||||
SelectItem,
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@ui/select";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
DialogTrigger,
|
||||
} from "@ui/dialog";
|
||||
import { Badge } from "@ui/badge";
|
||||
import { DataTable, SortableHeader } from "@ui/data-table";
|
||||
import { Types } from "komodo_client";
|
||||
import { useState } from "react";
|
||||
import {
|
||||
PlusCircle,
|
||||
Pen,
|
||||
Trash2,
|
||||
Clock,
|
||||
Calendar,
|
||||
CalendarDays,
|
||||
} from "lucide-react";
|
||||
import { TimezoneSelector } from "@components/util";
|
||||
|
||||
export const MaintenanceWindows = ({
|
||||
windows,
|
||||
onUpdate,
|
||||
disabled,
|
||||
}: {
|
||||
windows: Types.MaintenanceWindow[];
|
||||
onUpdate: (windows: Types.MaintenanceWindow[]) => void;
|
||||
disabled: boolean;
|
||||
}) => {
|
||||
const [isCreating, setIsCreating] = useState(false);
|
||||
const [editingWindow, setEditingWindow] = useState<
|
||||
[number, Types.MaintenanceWindow] | null
|
||||
>(null);
|
||||
|
||||
const addWindow = (newWindow: Types.MaintenanceWindow) => {
|
||||
onUpdate([...windows, newWindow]);
|
||||
setIsCreating(false);
|
||||
};
|
||||
|
||||
const updateWindow = (
|
||||
index: number,
|
||||
updatedWindow: Types.MaintenanceWindow
|
||||
) => {
|
||||
onUpdate(windows.map((w, i) => (i === index ? updatedWindow : w)));
|
||||
setEditingWindow(null);
|
||||
};
|
||||
|
||||
const deleteWindow = (index: number) => {
|
||||
onUpdate(windows.filter((_, i) => i !== index));
|
||||
};
|
||||
|
||||
const toggleWindow = (index: number, enabled: boolean) => {
|
||||
onUpdate(windows.map((w, i) => (i === index ? { ...w, enabled } : w)));
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="space-y-4">
|
||||
{!disabled && (
|
||||
<Dialog open={isCreating} onOpenChange={setIsCreating}>
|
||||
<DialogTrigger asChild>
|
||||
<Button variant="secondary" className="flex items-center gap-2">
|
||||
<PlusCircle className="w-4 h-4" />
|
||||
Add Maintenance Window
|
||||
</Button>
|
||||
</DialogTrigger>
|
||||
<DialogContent className="max-w-2xl">
|
||||
<MaintenanceWindowForm
|
||||
onSave={addWindow}
|
||||
onCancel={() => setIsCreating(false)}
|
||||
/>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)}
|
||||
|
||||
{windows.length > 0 && (
|
||||
<DataTable
|
||||
tableKey="maintenance-windows"
|
||||
data={windows}
|
||||
columns={[
|
||||
{
|
||||
accessorKey: "name",
|
||||
header: ({ column }) => (
|
||||
<SortableHeader column={column} title="Name" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<div className="flex items-center gap-2">
|
||||
<ScheduleIcon
|
||||
scheduleType={
|
||||
row.original.schedule_type ??
|
||||
Types.MaintenanceScheduleType.Daily
|
||||
}
|
||||
/>
|
||||
<span className="font-medium">{row.original.name}</span>
|
||||
</div>
|
||||
),
|
||||
size: 200,
|
||||
},
|
||||
{
|
||||
accessorKey: "schedule_type",
|
||||
header: ({ column }) => (
|
||||
<SortableHeader column={column} title="Schedule" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<span className="text-sm">
|
||||
<ScheduleDescription window={row.original} />
|
||||
</span>
|
||||
),
|
||||
size: 150,
|
||||
},
|
||||
{
|
||||
accessorKey: "start_time",
|
||||
header: ({ column }) => (
|
||||
<SortableHeader column={column} title="Start Time" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<span className="text-sm font-mono">
|
||||
{formatTime(row.original)}
|
||||
</span>
|
||||
),
|
||||
size: 180,
|
||||
},
|
||||
{
|
||||
accessorKey: "duration_minutes",
|
||||
header: ({ column }) => (
|
||||
<SortableHeader column={column} title="Duration" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<span className="text-sm">
|
||||
{row.original.duration_minutes} min
|
||||
</span>
|
||||
),
|
||||
size: 100,
|
||||
},
|
||||
{
|
||||
accessorKey: "enabled",
|
||||
header: ({ column }) => (
|
||||
<SortableHeader column={column} title="Status" />
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<div className="flex items-center gap-2">
|
||||
<Badge
|
||||
variant={row.original.enabled ? "default" : "secondary"}
|
||||
>
|
||||
{row.original.enabled ? "Enabled" : "Disabled"}
|
||||
</Badge>
|
||||
{!disabled && (
|
||||
<Switch
|
||||
checked={row.original.enabled}
|
||||
onCheckedChange={(enabled) =>
|
||||
toggleWindow(row.index, enabled)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
),
|
||||
size: 120,
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
header: "Actions",
|
||||
cell: ({ row }) =>
|
||||
!disabled && (
|
||||
<div className="flex items-center gap-1">
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={() =>
|
||||
setEditingWindow([row.index, row.original])
|
||||
}
|
||||
className="h-8 w-8 p-0"
|
||||
>
|
||||
<Pen className="w-4 h-4" />
|
||||
</Button>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
onClick={() => deleteWindow(row.index)}
|
||||
className="h-8 w-8 p-0 text-destructive hover:text-destructive"
|
||||
>
|
||||
<Trash2 className="w-4 h-4" />
|
||||
</Button>
|
||||
</div>
|
||||
),
|
||||
size: 100,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
)}
|
||||
|
||||
{editingWindow && (
|
||||
<Dialog
|
||||
open={!!editingWindow}
|
||||
onOpenChange={() => setEditingWindow(null)}
|
||||
>
|
||||
<DialogContent className="max-w-2xl">
|
||||
<MaintenanceWindowForm
|
||||
initialData={editingWindow[1]}
|
||||
onSave={(window) => updateWindow(editingWindow[0], window)}
|
||||
onCancel={() => setEditingWindow(null)}
|
||||
/>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const ScheduleIcon = ({
|
||||
scheduleType,
|
||||
}: {
|
||||
scheduleType: Types.MaintenanceScheduleType;
|
||||
}) => {
|
||||
switch (scheduleType) {
|
||||
case "Daily":
|
||||
return <Clock className="w-4 h-4" />;
|
||||
case "Weekly":
|
||||
return <Calendar className="w-4 h-4" />;
|
||||
case "OneTime":
|
||||
return <CalendarDays className="w-4 h-4" />;
|
||||
default:
|
||||
return <Clock className="w-4 h-4" />;
|
||||
}
|
||||
};
|
||||
|
||||
const ScheduleDescription = ({
|
||||
window,
|
||||
}: {
|
||||
window: Types.MaintenanceWindow;
|
||||
}): string => {
|
||||
switch (window.schedule_type) {
|
||||
case "Daily":
|
||||
return "Daily";
|
||||
case "Weekly":
|
||||
return `Weekly (${window.day_of_week || "Monday"})`;
|
||||
case "OneTime":
|
||||
return `One-time (${window.date || "No date"})`;
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
};
|
||||
|
||||
const formatTime = (window: Types.MaintenanceWindow) => {
|
||||
const hours = window.hour!.toString().padStart(2, "0");
|
||||
const minutes = window.minute!.toString().padStart(2, "0");
|
||||
return `${hours}:${minutes} ${window.timezone ? `(${window.timezone})` : ""}`;
|
||||
};
|
||||
|
||||
interface MaintenanceWindowFormProps {
|
||||
initialData?: Types.MaintenanceWindow;
|
||||
onSave: (window: Types.MaintenanceWindow) => void;
|
||||
onCancel: () => void;
|
||||
}
|
||||
|
||||
const MaintenanceWindowForm = ({
|
||||
initialData,
|
||||
onSave,
|
||||
onCancel,
|
||||
}: MaintenanceWindowFormProps) => {
|
||||
const [formData, setFormData] = useState<Types.MaintenanceWindow>(
|
||||
initialData || {
|
||||
name: "",
|
||||
description: "",
|
||||
schedule_type: Types.MaintenanceScheduleType.Daily,
|
||||
day_of_week: "",
|
||||
date: "",
|
||||
hour: 5,
|
||||
minute: 0,
|
||||
timezone: "",
|
||||
duration_minutes: 60,
|
||||
enabled: true,
|
||||
}
|
||||
);
|
||||
|
||||
const [errors, setErrors] = useState<Record<string, string>>({});
|
||||
|
||||
const validate = (): boolean => {
|
||||
const newErrors: Record<string, string> = {};
|
||||
|
||||
if (!formData.name.trim()) {
|
||||
newErrors.name = "Name is required";
|
||||
}
|
||||
|
||||
if (formData.hour! < 0 || formData.hour! > 23) {
|
||||
newErrors.hour = "Hour must be between 0 and 23";
|
||||
}
|
||||
|
||||
if (formData.minute! < 0 || formData.minute! > 59) {
|
||||
newErrors.minute = "Minute must be between 0 and 59";
|
||||
}
|
||||
|
||||
if (formData.duration_minutes <= 0) {
|
||||
newErrors.duration = "Duration must be greater than 0";
|
||||
}
|
||||
|
||||
if (formData.schedule_type && formData.schedule_type === "OneTime") {
|
||||
const date = formData.date;
|
||||
if (!date || !/^\d{4}-\d{2}-\d{2}$/.test(date)) {
|
||||
newErrors.date = "Date must be in YYYY-MM-DD format";
|
||||
}
|
||||
}
|
||||
|
||||
setErrors(newErrors);
|
||||
return Object.keys(newErrors).length === 0;
|
||||
};
|
||||
|
||||
const handleSave = () => {
|
||||
if (validate()) {
|
||||
onSave(formData);
|
||||
}
|
||||
};
|
||||
|
||||
const updateScheduleType = (schedule_type: Types.MaintenanceScheduleType) => {
|
||||
setFormData((data) => ({
|
||||
...data,
|
||||
schedule_type,
|
||||
day_of_week:
|
||||
schedule_type === Types.MaintenanceScheduleType.Weekly ? "Monday" : "",
|
||||
date:
|
||||
schedule_type === Types.MaintenanceScheduleType.OneTime
|
||||
? new Date().toISOString().split("T")[0]
|
||||
: "",
|
||||
}));
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<DialogHeader>
|
||||
<DialogTitle>
|
||||
{initialData
|
||||
? "Edit Maintenance Window"
|
||||
: "Create Maintenance Window"}
|
||||
</DialogTitle>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<label className="text-sm font-medium">Name</label>
|
||||
<Input
|
||||
value={formData.name}
|
||||
onChange={(e) =>
|
||||
setFormData((data) => ({ ...data, name: e.target.value }))
|
||||
}
|
||||
placeholder="e.g., Daily Backup"
|
||||
className={errors.name ? "border-destructive" : ""}
|
||||
/>
|
||||
{errors.name && (
|
||||
<p className="text-sm text-destructive mt-1">{errors.name}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="text-sm font-medium">Schedule Type</label>
|
||||
<Select
|
||||
value={formData.schedule_type}
|
||||
onValueChange={(value: Types.MaintenanceScheduleType) =>
|
||||
updateScheduleType(value)
|
||||
}
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{Object.values(Types.MaintenanceScheduleType).map(
|
||||
(schedule_type) => (
|
||||
<SelectItem key={schedule_type} value={schedule_type}>
|
||||
{schedule_type}
|
||||
</SelectItem>
|
||||
)
|
||||
)}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
|
||||
{formData.schedule_type === "Weekly" && (
|
||||
<div>
|
||||
<label className="text-sm font-medium">Day of Week</label>
|
||||
<Select
|
||||
value={formData.day_of_week || "Monday"}
|
||||
onValueChange={(value: Types.DayOfWeek) =>
|
||||
setFormData((data) => ({
|
||||
...data,
|
||||
day_of_week: value,
|
||||
}))
|
||||
}
|
||||
>
|
||||
<SelectTrigger>
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{Object.values(Types.DayOfWeek).map((day_of_week) => (
|
||||
<SelectItem key={day_of_week} value={day_of_week}>
|
||||
{day_of_week}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{formData.schedule_type === "OneTime" && (
|
||||
<div>
|
||||
<label className="text-sm font-medium">Date</label>
|
||||
<Input
|
||||
type="date"
|
||||
value={formData.date || new Date().toISOString().split("T")[0]}
|
||||
onChange={(e) =>
|
||||
setFormData({
|
||||
...formData,
|
||||
date: e.target.value,
|
||||
})
|
||||
}
|
||||
className={errors.date ? "border-destructive" : ""}
|
||||
/>
|
||||
{errors.date && (
|
||||
<p className="text-sm text-destructive mt-1">{errors.date}</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="text-sm font-medium">Start Time</label>
|
||||
<Input
|
||||
type="time"
|
||||
value={`${formData.hour!.toString().padStart(2, "0")}:${formData.minute!.toString().padStart(2, "0")}`}
|
||||
onChange={(e) => {
|
||||
const [hour, minute] = e.target.value
|
||||
.split(":")
|
||||
.map((n) => parseInt(n) || 0);
|
||||
setFormData({
|
||||
...formData,
|
||||
hour,
|
||||
minute,
|
||||
});
|
||||
}}
|
||||
className={
|
||||
errors.hour || errors.minute ? "border-destructive" : ""
|
||||
}
|
||||
/>
|
||||
{(errors.hour || errors.minute) && (
|
||||
<p className="text-sm text-destructive mt-1">
|
||||
{errors.hour || errors.minute}
|
||||
</p>
|
||||
)}
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-sm font-medium">Timezone</label>
|
||||
<TimezoneSelector
|
||||
timezone={formData.timezone ?? ""}
|
||||
onChange={(timezone) =>
|
||||
setFormData((data) => ({ ...data, timezone }))
|
||||
}
|
||||
triggerClassName="w-full"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="text-sm font-medium">Duration (minutes)</label>
|
||||
<Input
|
||||
type="number"
|
||||
min={1}
|
||||
value={formData.duration_minutes}
|
||||
onChange={(e) =>
|
||||
setFormData((data) => ({
|
||||
...data,
|
||||
duration_minutes: parseInt(e.target.value) || 60,
|
||||
}))
|
||||
}
|
||||
className={errors.duration ? "border-destructive" : ""}
|
||||
/>
|
||||
{errors.duration && (
|
||||
<p className="text-sm text-destructive mt-1">{errors.duration}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="text-sm font-medium">Description (optional)</label>
|
||||
<Input
|
||||
value={formData.description}
|
||||
onChange={(e) =>
|
||||
setFormData((data) => ({ ...data, description: e.target.value }))
|
||||
}
|
||||
placeholder="e.g., Automated backup process"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<DialogFooter>
|
||||
<Button variant="outline" onClick={onCancel}>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button onClick={handleSave}>
|
||||
{initialData ? "Update" : "Create"}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</>
|
||||
);
|
||||
};
|
||||
@@ -28,6 +28,7 @@ import {
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@ui/select";
|
||||
import { TimezoneSelector } from "@components/util";
|
||||
|
||||
const ACTION_GIT_PROVIDER = "Action";
|
||||
|
||||
@@ -189,11 +190,21 @@ export const ActionConfig = ({ id }: { id: string }) => {
|
||||
? "0 0 0 ? * SUN"
|
||||
: "Enter English expression",
|
||||
},
|
||||
schedule_timezone: {
|
||||
label: "Timezone",
|
||||
description:
|
||||
"Optional. Enter specific IANA timezone for schedule expression. If not provided, uses the Core timezone.",
|
||||
placeholder: "Enter IANA timezone",
|
||||
schedule_timezone: (timezone, set) => {
|
||||
return (
|
||||
<ConfigItem
|
||||
label="Timezone"
|
||||
description="Select specific IANA timezone for schedule expression."
|
||||
>
|
||||
<TimezoneSelector
|
||||
timezone={timezone ?? ""}
|
||||
onChange={(schedule_timezone) =>
|
||||
set({ schedule_timezone })
|
||||
}
|
||||
disabled={disabled}
|
||||
/>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
schedule_alert: {
|
||||
description: "Send an alert when the scheduled run occurs",
|
||||
|
||||
@@ -4,6 +4,7 @@ import { Types } from "komodo_client";
|
||||
import { EndpointConfig } from "./endpoint";
|
||||
import { AlertTypeConfig } from "./alert_types";
|
||||
import { ResourcesConfig } from "./resources";
|
||||
import { MaintenanceWindows } from "@components/config/maintenance";
|
||||
|
||||
export const AlerterConfig = ({ id }: { id: string }) => {
|
||||
const { canWrite } = usePermissions({ type: "Alerter", id });
|
||||
@@ -82,6 +83,31 @@ export const AlerterConfig = ({ id }: { id: string }) => {
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Maintenance",
|
||||
boldLabel: false,
|
||||
description: (
|
||||
<>
|
||||
Configure maintenance windows to temporarily disable alerts
|
||||
during scheduled maintenance periods. When a maintenance window
|
||||
is active, alerts which would be sent by this alerter will be
|
||||
suppressed.
|
||||
</>
|
||||
),
|
||||
components: {
|
||||
maintenance_windows: (values, set) => {
|
||||
return (
|
||||
<MaintenanceWindows
|
||||
windows={values ?? []}
|
||||
onUpdate={(maintenance_windows) =>
|
||||
set({ maintenance_windows })
|
||||
}
|
||||
disabled={disabled}
|
||||
/>
|
||||
);
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
}}
|
||||
/>
|
||||
|
||||
@@ -38,6 +38,7 @@ import {
|
||||
SelectTrigger,
|
||||
SelectValue,
|
||||
} from "@ui/select";
|
||||
import { LinkedRepoConfig } from "@components/config/linked_repo";
|
||||
|
||||
type BuildMode = "UI Defined" | "Files On Server" | "Git Repo" | undefined;
|
||||
const BUILD_MODES: BuildMode[] = ["UI Defined", "Files On Server", "Git Repo"];
|
||||
@@ -47,7 +48,11 @@ function getBuildMode(
|
||||
config: Types.BuildConfig
|
||||
): BuildMode {
|
||||
if (update.files_on_host ?? config.files_on_host) return "Files On Server";
|
||||
if (update.repo ?? config.repo) return "Git Repo";
|
||||
if (
|
||||
(update.repo ?? config.repo) ||
|
||||
(update.linked_repo ?? config.linked_repo)
|
||||
)
|
||||
return "Git Repo";
|
||||
if (update.dockerfile ?? config.dockerfile) return "UI Defined";
|
||||
return undefined;
|
||||
}
|
||||
@@ -420,6 +425,7 @@ export const BuildConfig = ({
|
||||
advanced,
|
||||
};
|
||||
} else if (mode === "Git Repo") {
|
||||
const repo_linked = !!(update.linked_repo ?? config.linked_repo);
|
||||
components = {
|
||||
"": [
|
||||
builder_component,
|
||||
@@ -434,46 +440,59 @@ export const BuildConfig = ({
|
||||
/>
|
||||
),
|
||||
components: {
|
||||
git_provider: (provider, set) => {
|
||||
const https = update.git_https ?? config.git_https;
|
||||
return (
|
||||
<ProviderSelectorConfig
|
||||
account_type="git"
|
||||
selected={provider}
|
||||
disabled={disabled}
|
||||
onSelect={(git_provider) => set({ git_provider })}
|
||||
https={https}
|
||||
onHttpsSwitch={() => set({ git_https: !https })}
|
||||
/>
|
||||
);
|
||||
},
|
||||
git_account: (account, set) => (
|
||||
<AccountSelectorConfig
|
||||
id={update.builder_id ?? config.builder_id ?? undefined}
|
||||
type="Builder"
|
||||
account_type="git"
|
||||
provider={update.git_provider ?? config.git_provider}
|
||||
selected={account}
|
||||
onSelect={(git_account) => set({ git_account })}
|
||||
linked_repo: (linked_repo, set) => (
|
||||
<LinkedRepoConfig
|
||||
linked_repo={linked_repo}
|
||||
repo_linked={repo_linked}
|
||||
set={set}
|
||||
disabled={disabled}
|
||||
placeholder="None"
|
||||
/>
|
||||
),
|
||||
repo: {
|
||||
placeholder: "Enter repo",
|
||||
description:
|
||||
"The repo path on the provider. {namespace}/{repo_name}",
|
||||
},
|
||||
branch: {
|
||||
placeholder: "Enter branch",
|
||||
description: "Select a custom branch, or default to 'main'.",
|
||||
},
|
||||
commit: {
|
||||
label: "Commit Hash",
|
||||
placeholder: "Input commit hash",
|
||||
description:
|
||||
"Optional. Switch to a specific commit hash after cloning the branch.",
|
||||
},
|
||||
...(!repo_linked
|
||||
? {
|
||||
git_provider: (provider, set) => {
|
||||
const https = update.git_https ?? config.git_https;
|
||||
return (
|
||||
<ProviderSelectorConfig
|
||||
account_type="git"
|
||||
selected={provider}
|
||||
disabled={disabled}
|
||||
onSelect={(git_provider) => set({ git_provider })}
|
||||
https={https}
|
||||
onHttpsSwitch={() => set({ git_https: !https })}
|
||||
/>
|
||||
);
|
||||
},
|
||||
git_account: (account, set) => (
|
||||
<AccountSelectorConfig
|
||||
id={update.builder_id ?? config.builder_id ?? undefined}
|
||||
type="Builder"
|
||||
account_type="git"
|
||||
provider={update.git_provider ?? config.git_provider}
|
||||
selected={account}
|
||||
onSelect={(git_account) => set({ git_account })}
|
||||
disabled={disabled}
|
||||
placeholder="None"
|
||||
/>
|
||||
),
|
||||
repo: {
|
||||
placeholder: "Enter repo",
|
||||
description:
|
||||
"The repo path on the provider. {namespace}/{repo_name}",
|
||||
},
|
||||
branch: {
|
||||
placeholder: "Enter branch",
|
||||
description:
|
||||
"Select a custom branch, or default to 'main'.",
|
||||
},
|
||||
commit: {
|
||||
label: "Commit Hash",
|
||||
placeholder: "Input commit hash",
|
||||
description:
|
||||
"Optional. Switch to a specific commit hash after cloning the branch.",
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
@@ -58,7 +58,8 @@ export const BuildInfo = ({
|
||||
).data;
|
||||
|
||||
const file_on_host = build?.config?.files_on_host ?? false;
|
||||
const git_repo = build?.config?.repo ? true : false;
|
||||
const git_repo =
|
||||
build?.config?.repo || build?.config?.linked_repo ? true : false;
|
||||
const canEdit = canWrite && (file_on_host || git_repo);
|
||||
|
||||
const remote_path = build?.info?.remote_path;
|
||||
|
||||
@@ -131,7 +131,7 @@ export const ResourceSelector = ({
|
||||
<PopoverTrigger asChild>
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="flex justify-start gap-2 w-fit max-w-[200px]"
|
||||
className="flex justify-start gap-2 w-fit max-w-[350px]"
|
||||
disabled={disabled}
|
||||
>
|
||||
{name || `Select ${type}`}
|
||||
@@ -153,6 +153,17 @@ export const ResourceSelector = ({
|
||||
</CommandEmpty>
|
||||
|
||||
<CommandGroup>
|
||||
{!search && (
|
||||
<CommandItem
|
||||
onSelect={() => {
|
||||
onSelect && onSelect("");
|
||||
setOpen(false);
|
||||
}}
|
||||
className="flex items-center justify-between cursor-pointer"
|
||||
>
|
||||
<div className="p-1">None</div>
|
||||
</CommandItem>
|
||||
)}
|
||||
{filtered.map((resource) => (
|
||||
<CommandItem
|
||||
key={resource.id}
|
||||
@@ -391,6 +402,7 @@ export const StandardSource = ({
|
||||
}: {
|
||||
info:
|
||||
| {
|
||||
linked_repo: string;
|
||||
files_on_host: boolean;
|
||||
repo: string;
|
||||
repo_link: string;
|
||||
@@ -408,6 +420,9 @@ export const StandardSource = ({
|
||||
</div>
|
||||
);
|
||||
}
|
||||
if (info.linked_repo) {
|
||||
return <ResourceLink type="Repo" id={info.linked_repo} />;
|
||||
}
|
||||
if (info.repo) {
|
||||
return <RepoLink repo={info.repo} link={info.repo_link} />;
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ import {
|
||||
SearchX,
|
||||
} from "lucide-react";
|
||||
import { useToast } from "@ui/use-toast";
|
||||
import { TextUpdateMenuMonaco } from "@components/util";
|
||||
import { TextUpdateMenuMonaco, TimezoneSelector } from "@components/util";
|
||||
import { Card } from "@ui/card";
|
||||
import { filterBySplit } from "@lib/utils";
|
||||
import { Popover, PopoverContent, PopoverTrigger } from "@ui/popover";
|
||||
@@ -330,11 +330,21 @@ export const ProcedureConfig = ({ id }: { id: string }) => {
|
||||
? "0 0 0 ? * SUN"
|
||||
: "Enter English expression",
|
||||
},
|
||||
schedule_timezone: {
|
||||
label: "Timezone",
|
||||
description:
|
||||
"Optional. Enter specific IANA timezone for schedule expression. If not provided, uses the Core timezone.",
|
||||
placeholder: "Enter IANA timezone",
|
||||
schedule_timezone: (timezone, set) => {
|
||||
return (
|
||||
<ConfigItem
|
||||
label="Timezone"
|
||||
description="Select specific IANA timezone for schedule expression."
|
||||
>
|
||||
<TimezoneSelector
|
||||
timezone={timezone ?? ""}
|
||||
onChange={(schedule_timezone) =>
|
||||
set({ schedule_timezone })
|
||||
}
|
||||
disabled={disabled}
|
||||
/>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
schedule_alert: {
|
||||
description: "Send an alert when the scheduled run occurs",
|
||||
|
||||
@@ -176,8 +176,18 @@ export const RepoConfig = ({ id }: { id: string }) => {
|
||||
label: "Clone Path",
|
||||
boldLabel: true,
|
||||
placeholder: "/clone/path/on/host",
|
||||
description:
|
||||
"Explicitly specify the folder on the host to clone the repo in.",
|
||||
description: (
|
||||
<div className="flex flex-col gap-0">
|
||||
<div>
|
||||
Explicitly specify the folder on the host to clone the
|
||||
repo in.
|
||||
</div>
|
||||
<div>
|
||||
If <span className="font-bold">relative</span> (no leading
|
||||
'/'), relative to {"$root_directory/repos/" + repo.name}
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user