Compare commits

..

42 Commits

Author SHA1 Message Date
Maxwell Becker
41d1ff9760 1.15.9 (#127)
* add close alert threshold to prevent Ok - Warning back and forth

* remove part about repo being deleted, no longer behavior

* resource sync share general common

* remove this changelog. use releases

* remove changelog from readme

* write commit file clean up path

* docs: supports any git provider repo

* fix docs: authorization

* multiline command supports escaped newlines

* move webhook to build config advanced

* parser comments with escaped newline

* improve parser

* save use Enter. escape monaco using escape

* improve logic when deployment / stack action buttons shown

* used_mem = total - available

* Fix unrecognized path have 404

* webhooks will 404 if misconfigured

* move update logger / alerter

* delete migrator

* update examples

* publish typescript client komodo_client
2024-10-14 23:04:49 -07:00
mbecker20
dfafadf57b demo / build username pw 2024-10-14 11:49:44 -04:00
mbecker20
538a79b8b5 fix upausing all container action state 2024-10-13 18:11:09 -04:00
Maxwell Becker
5088dc5c3c 1.15.8 (#124)
* fix all containers restart and unpause

* add CommitSync to Procedure

* validate resource query tags causes failure on non exist

* files on host init working. match tags fail if tag doesnt exist

* intelligent sync match tag selector

* fix linting

* Wait for user initialize file on host
2024-10-13 15:03:16 -07:00
mbecker20
581d7e0b2c fix Procedure sync log 2024-10-13 04:21:03 -04:00
mbecker20
657298041f remove unneeded syncs volume 2024-10-13 04:03:09 -04:00
mbecker20
d71e9dca11 fix version 2024-10-13 03:21:56 -04:00
Maxwell Becker
165131bdf8 1.15.7 (#119)
* 1.15.7-dev ensure git config set

* add username to commit msg
2024-10-13 00:01:14 -07:00
mbecker20
0a81d2a0d0 add labels to mongo compose 2024-10-13 00:57:13 -04:00
Maxwell Becker
44ab5eb804 1.15.6 (#117)
* add periphery.skip label, skip in StopAllContainers

* add core config sync directory

* deploy stack if changed

* fix stack env_file_path when git repo and using run_directory

* deploy stack if changed

* write sync contents

* commit to git based sync, managed git based sync

* can sync non UI defined resource syncs

* sync UI control

* clippy

* init new stack compose file in repo

* better error message when attached Server / Builder invalid

* specify multiple resource file paths (mixed files + folders)

* use react charts

* tweak stats charts

* add Containers page

* 1.15.6

* stack deploy check if deployes vs remote has changed

* improve ux with loading indicators

* sync diff accounts for deploy / after

* fix new chart time axes
2024-10-12 21:42:46 -07:00
Maxwell Becker
e3d8e603ec 1.15.5 (#116)
* 1.15.5
- Update your user's username and password
- **Admin**: Delete Users

* update username / password / delete user backend

* bump version

* alerter default disabled

* delete users and update username / password

* set password "" after update
2024-10-11 19:42:43 -07:00
mbecker20
8b5c179473 account recover note 2024-10-11 19:16:01 -04:00
mbecker20
8582bc92da fix Destroy Before Deploy config 2024-10-10 04:17:17 -04:00
Maxwell Becker
8ee270d045 1.15.4 (#114)
* stack destroy before deploy option

* add timestamps. Fix log polling even when poll not selected

* Add build [[$VERSION]] support. VERSION build arg default

* fix clippy lint

* initialize `first_builder`

* run_komodo_command uses parse_multiline_command

* comment UI for $VERSION and new command feature

* bump some deps

* support multiline commands in pre_deploy / pre_build
2024-10-10 00:37:23 -07:00
Maxwell Becker
2cfae525e9 1.15.3 (#109)
* fix parser support single quote '

* add stack reclone toggle

* git clone with token uses token:<TOKEN> for gitlab compatability

* support stack pre deploy shell command

* rename compose down update log stage

* deployment configure registry login account

* local testing setup

* bump version to 1.15.3

* new resources auto assign server if only one

* better error log when try to create resource with duplicate name

* end description with .

* ConfirmUpdate multi language

* fix compose write to host logic

* improve instrumentation

* improve update diff when small array

improve 2

* fix compose env file passing when repo_dir is not absolute
2024-10-08 23:07:38 -07:00
mbecker20
80e5d2a972 frontend dev setup guide 2024-10-08 16:55:24 -04:00
mbecker20
6f22c011a6 builder / server template add correct additional line if empty params 2024-10-07 22:55:48 -04:00
mbecker20
401cccee79 config nav buttons secondary 2024-10-07 21:55:14 -04:00
mbecker20
654b923f98 fix broken link to periphery setup 2024-10-07 18:56:14 -04:00
mbecker20
61261be70f update docs, split connecting servers out of Core Setup 2024-10-07 18:54:00 -04:00
mbecker20
46418125e3 update docs for periphery systemd --user install 2024-10-07 18:53:43 -04:00
mbecker20
e029e94f0d 1.15.2 Pass KOMODO_OIDC_ADDITIONAL_AUDIENCES 2024-10-07 15:44:51 -04:00
mbecker20
3be2b5163b 1.15.1 do not add trailing slash OIDC provider 2024-10-07 13:23:40 -04:00
mbecker20
6a145f58ff pass provider as-is. Authentik users should add a trailing slash 2024-10-07 13:16:25 -04:00
mbecker20
f1cede2ebd update dark / light stack screenshot to have action buttons 2024-10-07 08:05:39 -04:00
mbecker20
a5cfa1d412 update screenshots 2024-10-07 07:30:18 -04:00
mbecker20
a0674654c1 update screenshots 2024-10-07 07:30:11 -04:00
mbecker20
3faa1c58c1 update screenshots 2024-10-07 07:30:05 -04:00
mbecker20
7e296f34af screenshots 2024-10-07 07:29:58 -04:00
mbecker20
9f8ced190c update screenshots 2024-10-07 07:29:02 -04:00
mbecker20
c194bb16d8 update screenshots 2024-10-07 07:28:45 -04:00
mbecker20
39fec9b55e update screenshots 2024-10-07 07:27:52 -04:00
mbecker20
e97ed9888d update screenshots 1 2024-10-07 07:27:16 -04:00
mbecker20
559102ffe3 update readme 2024-10-07 07:25:36 -04:00
mbecker20
6bf80ddcc7 update screenshots readme 2024-10-07 07:25:24 -04:00
mbecker20
89dbe1b4d9 stack file_contents editor respects readOnly / disabled 2024-10-07 06:58:00 -04:00
mbecker20
334e16d646 OIDC use preferred username 2024-10-07 06:35:46 -04:00
mbecker20
a7bbe519f4 add build server link 2024-10-07 06:15:53 -04:00
mbecker20
5827486c5a add redirect uri for OIDC 2024-10-07 06:15:00 -04:00
mbecker20
8ca8f7eddd add context to oidc init error 2024-10-07 06:10:12 -04:00
mbecker20
0600276b43 fix parse KOMODO_MONGO_ in envs 2024-10-07 05:43:09 -04:00
mbecker20
a77a1495c7 active resources mb-12 not always there 2024-10-07 05:14:54 -04:00
306 changed files with 6885 additions and 3604 deletions

View File

@@ -5,4 +5,10 @@ LICENSE
*.code-workspace
*/node_modules
*/dist
*/dist
creds.toml
.core-repos
.repos
.stacks
.ssl

8
.gitignore vendored
View File

@@ -1,11 +1,11 @@
target
/frontend/build
node_modules
/lib/ts_client/build
node_modules
dist
.env
.env.development
.DS_Store
creds.toml
.syncs
.stacks
.DS_Store
.komodo

118
Cargo.lock generated
View File

@@ -41,7 +41,7 @@ dependencies = [
[[package]]
name = "alerter"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"axum",
@@ -201,9 +201,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "aws-config"
version = "1.5.7"
version = "1.5.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8191fb3091fa0561d1379ef80333c3c7191c6f0435d986e85821bcf7acbd1126"
checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -268,9 +268,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.75.0"
version = "1.77.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6787d920877cca6a4ee3953093f6a47cefe26de95a4f7b3681c5850bfe657b4"
checksum = "4bb6f841697b994ec3a020c560b52693bc9fcb7b9c69210088ab56e03df23b5e"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -292,9 +292,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sso"
version = "1.44.0"
version = "1.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b90cfe6504115e13c41d3ea90286ede5aa14da294f3fe077027a6e83850843c"
checksum = "e33ae899566f3d395cbf42858e433930682cc9c1889fa89318896082fef45efb"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -314,9 +314,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ssooidc"
version = "1.45.0"
version = "1.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "167c0fad1f212952084137308359e8e4c4724d1c643038ce163f06de9662c1d0"
checksum = "f39c09e199ebd96b9f860b0fce4b6625f211e064ad7c8693b72ecf7ef03881e0"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -336,9 +336,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sts"
version = "1.44.0"
version = "1.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cb5f98188ec1435b68097daa2a37d74b9d17c9caa799466338a8d1544e71b9d"
checksum = "3d95f93a98130389eb6233b9d615249e543f6c24a68ca1f109af9ca5164a8765"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -887,9 +887,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.19"
version = "4.5.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615"
checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8"
dependencies = [
"clap_builder",
"clap_derive",
@@ -897,9 +897,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.19"
version = "4.5.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b"
checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54"
dependencies = [
"anstream",
"anstyle",
@@ -943,7 +943,7 @@ dependencies = [
[[package]]
name = "command"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"komodo_client",
"run_command",
@@ -1149,18 +1149,18 @@ dependencies = [
[[package]]
name = "derive_builder"
version = "0.20.1"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b"
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
dependencies = [
"derive_builder_macro",
]
[[package]]
name = "derive_builder_core"
version = "0.20.1"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38"
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
dependencies = [
"darling",
"proc-macro2",
@@ -1170,9 +1170,9 @@ dependencies = [
[[package]]
name = "derive_builder_macro"
version = "0.20.1"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc"
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
dependencies = [
"derive_builder_core",
"syn 2.0.77",
@@ -1355,7 +1355,7 @@ dependencies = [
[[package]]
name = "environment_file"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"thiserror",
]
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "formatting"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"serror",
]
@@ -1452,9 +1452,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
[[package]]
name = "futures"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
@@ -1467,9 +1467,9 @@ dependencies = [
[[package]]
name = "futures-channel"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
@@ -1477,15 +1477,15 @@ dependencies = [
[[package]]
name = "futures-core"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
@@ -1494,15 +1494,15 @@ dependencies = [
[[package]]
name = "futures-io"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
@@ -1511,21 +1511,21 @@ dependencies = [
[[package]]
name = "futures-sink"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
version = "0.3.30"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
@@ -1571,7 +1571,7 @@ checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd"
[[package]]
name = "git"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"command",
@@ -2192,7 +2192,7 @@ dependencies = [
[[package]]
name = "komodo_cli"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"clap",
@@ -2208,7 +2208,7 @@ dependencies = [
[[package]]
name = "komodo_client"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2239,7 +2239,7 @@ dependencies = [
[[package]]
name = "komodo_core"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2296,7 +2296,7 @@ dependencies = [
[[package]]
name = "komodo_periphery"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2306,6 +2306,7 @@ dependencies = [
"bollard",
"clap",
"command",
"derive_variants",
"dotenvy",
"environment_file",
"envy",
@@ -2382,7 +2383,7 @@ dependencies = [
[[package]]
name = "logger"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"komodo_client",
@@ -2444,19 +2445,6 @@ dependencies = [
"toml",
]
[[package]]
name = "migrator"
version = "1.15.0"
dependencies = [
"anyhow",
"dotenvy",
"envy",
"logger",
"serde",
"tokio",
"tracing",
]
[[package]]
name = "mime"
version = "0.3.17"
@@ -3101,7 +3089,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "periphery_client"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"komodo_client",
@@ -4249,9 +4237,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.31.4"
version = "0.32.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be"
checksum = "e3b5ae3f4f7d64646c46c4cae4e3f01d1c5d255c7406fdd7c7f999a94e488791"
dependencies = [
"core-foundation-sys",
"libc",
@@ -4879,7 +4867,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "update_logger"
version = "1.15.0"
version = "1.15.9"
dependencies = [
"anyhow",
"komodo_client",

View File

@@ -1,9 +1,15 @@
[workspace]
resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
members = [
"bin/*",
"lib/*",
"example/*",
"client/core/rs",
"client/periphery/rs",
]
[workspace.package]
version = "1.15.0"
version = "1.15.9"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -15,7 +21,7 @@ homepage = "https://komo.do"
[workspace.dependencies]
# LOCAL
# komodo_client = "1.14.3"
# komodo_client = "1.15.6"
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
environment_file = { path = "lib/environment_file" }
@@ -44,8 +50,8 @@ svi = "1.0.1"
reqwest = { version = "0.12.8", features = ["json"] }
tokio = { version = "1.38.1", features = ["full"] }
tokio-util = "0.7.12"
futures = "0.3.30"
futures-util = "0.3.30"
futures = "0.3.31"
futures-util = "0.3.31"
# SERVER
axum-extra = { version = "0.9.4", features = ["typed-header"] }
@@ -76,7 +82,7 @@ opentelemetry = "0.25.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.19", features = ["derive"] }
clap = { version = "4.5.20", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
@@ -95,17 +101,17 @@ hex = "0.4.3"
# SYSTEM
bollard = "0.17.1"
sysinfo = "0.31.4"
sysinfo = "0.32.0"
# CLOUD
aws-config = "1.5.7"
aws-sdk-ec2 = "1.75.0"
aws-config = "1.5.8"
aws-sdk-ec2 = "1.77.0"
# MISC
derive_builder = "0.20.1"
derive_builder = "0.20.2"
typeshare = "1.0.3"
octorust = "0.7.0"
dashmap = "6.1.0"
colored = "2.1.0"
regex = "1.11.0"
bson = "2.13.0"
bson = "2.13.0"

View File

@@ -129,9 +129,15 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CommitSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -270,9 +276,15 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(request) => {
komodo_client().execute(request).await
}
Execution::CommitSync(request) => {
komodo_client().write(request).await
}
Execution::DeployStack(request) => {
komodo_client().execute(request).await
}
Execution::DeployStackIfChanged(request) => {
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
komodo_client().execute(request).await
}

View File

@@ -16,7 +16,7 @@ WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM alpine:3.20

View File

@@ -10,7 +10,7 @@ WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim

View File

@@ -10,36 +10,42 @@ use komodo_client::entities::{
ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use tracing::Instrument;
use crate::{config::core_config, state::db_client};
mod discord;
mod slack;
#[instrument]
pub async fn send_alerts(alerts: &[Alert]) {
if alerts.is_empty() {
return;
}
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
let span =
info_span!("send_alerts", alerts = format!("{alerts:?}"));
async {
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
}) else {
return;
};
let handles =
alerts.iter().map(|alert| send_alert(&alerters, alert));
let handles =
alerts.iter().map(|alert| send_alert(&alerters, alert));
join_all(handles).await;
join_all(handles).await;
}
.instrument(span)
.await
}
#[instrument(level = "debug")]

View File

@@ -64,7 +64,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
PermissionLevel::Execute,
)
.await?;
let vars_and_secrets = get_variables_and_secrets().await?;
let mut vars_and_secrets = get_variables_and_secrets().await?;
if build.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to RunBuild"));
@@ -85,6 +85,14 @@ impl Resolve<RunBuild, (User, Update)> for State {
update.version = build.config.version;
update_update(update.clone()).await?;
// Add the $VERSION to variables. Use with [[$VERSION]]
if !vars_and_secrets.variables.contains_key("$VERSION") {
vars_and_secrets.variables.insert(
String::from("$VERSION"),
build.config.version.to_string(),
);
}
let git_token = git_token(
&build.config.git_provider,
&build.config.git_account,
@@ -171,7 +179,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
};
// CLONE REPO
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into pre build command
let mut global_replacers = HashSet::new();

View File

@@ -2,6 +2,7 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use komodo_client::{
api::execute::*,
@@ -33,7 +34,10 @@ mod stack;
mod sync;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
)]
#[variant_derive(Debug)]
#[resolver_target(State)]
#[resolver_args((User, Update))]
#[serde(tag = "type", content = "params")]
@@ -72,6 +76,7 @@ pub enum ExecuteRequest {
// ==== STACK ====
DeployStack(DeployStack),
DeployStackIfChanged(DeployStackIfChanged),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
@@ -154,7 +159,15 @@ async fn handler(
Ok(Json(update))
}
#[instrument(name = "ExecuteRequest", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
name = "ExecuteRequest",
skip(user, update),
fields(
user_id = user.id,
update_id = update.id,
request = format!("{:?}", request.extract_variant()))
)
]
async fn task(
req_id: Uuid,
request: ExecuteRequest,

View File

@@ -425,7 +425,7 @@ impl Resolve<RestartAllContainers, (User, Update)> for State {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.request(api::container::RestartAllContainers {})
.await
.context("failed to restart all containers on host")?;
@@ -520,12 +520,12 @@ impl Resolve<UnpauseAllContainers, (User, Update)> for State {
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.starting_containers = true)?;
.update(|state| state.unpausing_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.request(api::container::UnpauseAllContainers {})
.await
.context("failed to unpause all containers on host")?;

View File

@@ -3,9 +3,11 @@ use std::collections::HashSet;
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
api::{execute::*, write::RefreshStackCache},
entities::{
permission::PermissionLevel, stack::StackInfo, update::Update,
permission::PermissionLevel,
stack::{Stack, StackInfo},
update::Update,
user::User,
},
};
@@ -19,12 +21,14 @@ use crate::{
add_interp_update_log,
interpolate_variables_secrets_into_extra_args,
interpolate_variables_secrets_into_string,
interpolate_variables_secrets_into_system_command,
},
periphery_client,
query::get_variables_and_secrets,
update::update_update,
update::{add_update_without_send, update_update},
},
monitor::update_cache_for_server,
resource,
stack::{
execute::execute_compose, get_stack_and_server,
services::extract_services_into_res,
@@ -102,6 +106,13 @@ impl Resolve<DeployStack, (User, Update)> for State {
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut stack.config.pre_deploy,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
@@ -235,6 +246,77 @@ impl Resolve<DeployStack, (User, Update)> for State {
}
}
impl Resolve<DeployStackIfChanged, (User, Update)> for State {
async fn resolve(
&self,
DeployStackIfChanged { stack, stop_time }: DeployStackIfChanged,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Execute,
)
.await?;
State
.resolve(
RefreshStackCache {
stack: stack.id.clone(),
},
user.clone(),
)
.await?;
let stack = resource::get::<Stack>(&stack.id).await?;
let changed = match (
&stack.info.deployed_contents,
&stack.info.remote_contents,
) {
(Some(deployed_contents), Some(latest_contents)) => {
let changed = || {
for latest in latest_contents {
let Some(deployed) = deployed_contents
.iter()
.find(|c| c.path == latest.path)
else {
return true;
};
if latest.contents != deployed.contents {
return true;
}
}
false
};
changed()
}
(None, _) => true,
_ => false,
};
if !changed {
update.push_simple_log(
"Diff compose files",
String::from("Deploy cancelled after no changes detected."),
);
update.finalize();
return Ok(update);
}
// Don't actually send it here, let the handler send it after it can set action state.
// This is usually done in crate::helpers::update::init_execution_update.
update.id = add_update_without_send(&update).await?;
State
.resolve(
DeployStack {
stack: stack.name,
stop_time,
},
(user, update),
)
.await
}
}
impl Resolve<StartStack, (User, Update)> for State {
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -1,4 +1,4 @@
use std::collections::HashMap;
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
@@ -20,23 +20,27 @@ use komodo_client::{
sync::ResourceSync,
update::{Log, Update},
user::{sync_user, User},
ResourceTargetVariant,
},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{query::get_id_to_tags, update::update_update},
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, State},
state::{action_states, db_client, State},
sync::{
deploy::{
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
execute::{get_updates_for_execution, ExecuteResourceSync},
remote::RemoteResources,
AllResourcesById,
AllResourcesById, ResourceSyncTrait,
},
};
@@ -44,7 +48,11 @@ impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
RunSync {
sync,
resource_type: match_resource_type,
resources: match_resources,
}: RunSync,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
@@ -52,6 +60,17 @@ impl Resolve<RunSync, (User, Update)> for State {
>(&sync, &user, PermissionLevel::Execute)
.await?;
// get the action state for the sync (or insert default).
let action_state = action_states()
.resource_sync
.get_or_insert_default(&sync.id)
.await;
// This will set action state back to default when dropped.
// Will also check to ensure sync not already busy before updating.
let _action_guard =
action_state.update(|state| state.syncing = true)?;
// Send update here for FE to recheck action state
update_update(update.clone()).await?;
@@ -70,22 +89,101 @@ impl Resolve<RunSync, (User, Update)> for State {
update_update(update.clone()).await?;
if !file_errors.is_empty() {
return Err(anyhow!("Found file errors. Cannot execute sync."))
return Err(anyhow!("Found file errors. Cannot execute sync."));
}
let resources = resources?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
// Convert all match_resources to names
let match_resources = match_resources.map(|resources| {
resources
.into_iter()
.filter_map(|name_or_id| {
let Some(resource_type) = match_resource_type else {
return Some(name_or_id);
};
match ObjectId::from_str(&name_or_id) {
Ok(_) => match resource_type {
ResourceTargetVariant::Alerter => all_resources
.alerters
.get(&name_or_id)
.map(|a| a.name.clone()),
ResourceTargetVariant::Build => all_resources
.builds
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Builder => all_resources
.builders
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Deployment => all_resources
.deployments
.get(&name_or_id)
.map(|d| d.name.clone()),
ResourceTargetVariant::Procedure => all_resources
.procedures
.get(&name_or_id)
.map(|p| p.name.clone()),
ResourceTargetVariant::Repo => all_resources
.repos
.get(&name_or_id)
.map(|r| r.name.clone()),
ResourceTargetVariant::Server => all_resources
.servers
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ServerTemplate => all_resources
.templates
.get(&name_or_id)
.map(|t| t.name.clone()),
ResourceTargetVariant::Stack => all_resources
.stacks
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ResourceSync => all_resources
.syncs
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::System => None,
},
Err(_) => Some(name_or_id),
}
})
.collect::<Vec<_>>()
});
let deployments_by_name = all_resources
.deployments
.values()
.filter(|deployment| {
Deployment::include_resource(
&deployment.name,
&deployment.config,
match_resource_type,
match_resources.as_deref(),
&deployment.tags,
&id_to_tags,
&sync.config.match_tags,
)
})
.map(|deployment| (deployment.name.clone(), deployment.clone()))
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.filter(|stack| {
Stack::include_resource(
&stack.name,
&stack.config,
match_resource_type,
match_resources.as_deref(),
&stack.tags,
&id_to_tags,
&sync.config.match_tags,
)
})
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
@@ -105,6 +203,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.servers,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -117,6 +217,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -126,6 +228,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.stacks,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -135,6 +239,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.builds,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -144,6 +250,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.repos,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -156,6 +264,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -165,6 +275,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.builders,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -174,6 +286,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.alerters,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -186,6 +300,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -198,31 +314,48 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
variables_to_create,
variables_to_update,
variables_to_delete,
) = crate::sync::variables::get_updates_for_execution(
resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?;
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
{
crate::sync::variables::get_updates_for_execution(
resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?
} else {
Default::default()
};
let (
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
) = crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?;
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
{
crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?
} else {
Default::default()
};
if deploy_cache.is_empty()
&& resource_syncs_to_create.is_empty()

View File

@@ -12,6 +12,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListAlerters, User> for State {
ListAlerters { query }: ListAlerters,
user: User,
) -> anyhow::Result<Vec<AlerterListItem>> {
resource::list_for_user::<Alerter>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Alerter>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullAlerters, User> for State {
ListFullAlerters { query }: ListFullAlerters,
user: User,
) -> anyhow::Result<ListFullAlertersResponse> {
resource::list_full_for_user::<Alerter>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Alerter>(query, &user, &all_tags)
.await
}
}

View File

@@ -22,6 +22,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{
action_states, build_state_cache, db_client, github_client, State,
@@ -49,7 +50,12 @@ impl Resolve<ListBuilds, User> for State {
ListBuilds { query }: ListBuilds,
user: User,
) -> anyhow::Result<Vec<BuildListItem>> {
resource::list_for_user::<Build>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Build>(query, &user, &all_tags).await
}
}
@@ -59,7 +65,13 @@ impl Resolve<ListFullBuilds, User> for State {
ListFullBuilds { query }: ListFullBuilds,
user: User,
) -> anyhow::Result<ListFullBuildsResponse> {
resource::list_full_for_user::<Build>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Build>(query, &user, &all_tags)
.await
}
}
@@ -94,6 +106,7 @@ impl Resolve<GetBuildsSummary, User> for State {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get all builds")?;
@@ -252,9 +265,15 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
let builds = resource::list_full_for_user::<Build>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let builds =
resource::list_full_for_user::<Build>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();

View File

@@ -12,6 +12,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListBuilders, User> for State {
ListBuilders { query }: ListBuilders,
user: User,
) -> anyhow::Result<Vec<BuilderListItem>> {
resource::list_for_user::<Builder>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Builder>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullBuilders, User> for State {
ListFullBuilders { query }: ListFullBuilders,
user: User,
) -> anyhow::Result<ListFullBuildersResponse> {
resource::list_full_for_user::<Builder>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Builder>(query, &user, &all_tags)
.await
}
}

View File

@@ -19,7 +19,7 @@ use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
state::{action_states, deployment_status_cache, State},
};
@@ -45,7 +45,13 @@ impl Resolve<ListDeployments, User> for State {
ListDeployments { query }: ListDeployments,
user: User,
) -> anyhow::Result<Vec<DeploymentListItem>> {
resource::list_for_user::<Deployment>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Deployment>(query, &user, &all_tags)
.await
}
}
@@ -55,7 +61,15 @@ impl Resolve<ListFullDeployments, User> for State {
ListFullDeployments { query }: ListFullDeployments,
user: User,
) -> anyhow::Result<ListFullDeploymentsResponse> {
resource::list_full_for_user::<Deployment>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Deployment>(
query, &user, &all_tags,
)
.await
}
}
@@ -88,7 +102,11 @@ const MAX_LOG_LENGTH: u64 = 5000;
impl Resolve<GetDeploymentLog, User> for State {
async fn resolve(
&self,
GetDeploymentLog { deployment, tail }: GetDeploymentLog,
GetDeploymentLog {
deployment,
tail,
timestamps,
}: GetDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
@@ -109,6 +127,7 @@ impl Resolve<GetDeploymentLog, User> for State {
.request(api::container::GetContainerLog {
name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
timestamps,
})
.await
.context("failed at call to periphery")
@@ -123,6 +142,7 @@ impl Resolve<SearchDeploymentLog, User> for State {
terms,
combinator,
invert,
timestamps,
}: SearchDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
@@ -146,6 +166,7 @@ impl Resolve<SearchDeploymentLog, User> for State {
terms,
combinator,
invert,
timestamps,
})
.await
.context("failed at call to periphery")
@@ -210,6 +231,7 @@ impl Resolve<GetDeploymentsSummary, User> for State {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get deployments from db")?;
@@ -247,10 +269,16 @@ impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
user: User,
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
let deployments =
resource::list_full_for_user::<Deployment>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
query, &user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();

View File

@@ -111,6 +111,7 @@ enum ReadRequest {
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
ListAllDockerContainers(ListAllDockerContainers),
#[to_string_resolver]
ListDockerContainers(ListDockerContainers),
#[to_string_resolver]
@@ -402,12 +403,18 @@ impl Resolve<ListGitProvidersFromConfig, User> for State {
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user
&user,
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
&user,
&[]
),
resource::list_full_for_user::<Repo>(Default::default(), &user),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user
&user,
&[]
),
)?;

View File

@@ -10,6 +10,7 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{action_states, procedure_state_cache, State},
};
@@ -35,7 +36,13 @@ impl Resolve<ListProcedures, User> for State {
ListProcedures { query }: ListProcedures,
user: User,
) -> anyhow::Result<ListProceduresResponse> {
resource::list_for_user::<Procedure>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Procedure>(query, &user, &all_tags)
.await
}
}
@@ -45,7 +52,13 @@ impl Resolve<ListFullProcedures, User> for State {
ListFullProcedures { query }: ListFullProcedures,
user: User,
) -> anyhow::Result<ListFullProceduresResponse> {
resource::list_full_for_user::<Procedure>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Procedure>(query, &user, &all_tags)
.await
}
}
@@ -58,6 +71,7 @@ impl Resolve<GetProceduresSummary, User> for State {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get procedures from db")?;

View File

@@ -12,6 +12,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{action_states, github_client, repo_state_cache, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListRepos, User> for State {
ListRepos { query }: ListRepos,
user: User,
) -> anyhow::Result<Vec<RepoListItem>> {
resource::list_for_user::<Repo>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Repo>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullRepos, User> for State {
ListFullRepos { query }: ListFullRepos,
user: User,
) -> anyhow::Result<ListFullReposResponse> {
resource::list_full_for_user::<Repo>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Repo>(query, &user, &all_tags)
.await
}
}
@@ -79,10 +91,13 @@ impl Resolve<GetReposSummary, User> for State {
GetReposSummary {}: GetReposSummary,
user: User,
) -> anyhow::Result<GetReposSummaryResponse> {
let repos =
resource::list_full_for_user::<Repo>(Default::default(), &user)
.await
.context("failed to get repos from db")?;
let repos = resource::list_full_for_user::<Repo>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get repos from db")?;
let mut res = GetReposSummaryResponse::default();

View File

@@ -13,7 +13,7 @@ use komodo_client::{
entities::{
deployment::Deployment,
docker::{
container::Container,
container::{Container, ContainerListItem},
image::{Image, ImageHistoryResponseItem},
network::Network,
volume::Volume,
@@ -43,7 +43,7 @@ use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache, State},
@@ -55,9 +55,12 @@ impl Resolve<GetServersSummary, User> for State {
GetServersSummary {}: GetServersSummary,
user: User,
) -> anyhow::Result<GetServersSummaryResponse> {
let servers =
resource::list_for_user::<Server>(Default::default(), &user)
.await?;
let servers = resource::list_for_user::<Server>(
Default::default(),
&user,
&[],
)
.await?;
let mut res = GetServersSummaryResponse::default();
for server in servers {
res.total += 1;
@@ -119,7 +122,12 @@ impl Resolve<ListServers, User> for State {
ListServers { query }: ListServers,
user: User,
) -> anyhow::Result<Vec<ServerListItem>> {
resource::list_for_user::<Server>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Server>(query, &user, &all_tags).await
}
}
@@ -129,7 +137,13 @@ impl Resolve<ListFullServers, User> for State {
ListFullServers { query }: ListFullServers,
user: User,
) -> anyhow::Result<ListFullServersResponse> {
resource::list_full_for_user::<Server>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(query, &user, &all_tags)
.await
}
}
@@ -289,7 +303,7 @@ impl ResolveToString<ListSystemProcesses, User> for State {
}
}
const STATS_PER_PAGE: i64 = 500;
const STATS_PER_PAGE: i64 = 200;
impl Resolve<GetHistoricalServerStats, User> for State {
async fn resolve(
@@ -368,6 +382,40 @@ impl ResolveToString<ListDockerContainers, User> for State {
}
}
impl Resolve<ListAllDockerContainers, User> for State {
async fn resolve(
&self,
ListAllDockerContainers { servers }: ListAllDockerContainers,
user: User,
) -> anyhow::Result<Vec<ContainerListItem>> {
let servers = resource::list_for_user::<Server>(
Default::default(),
&user,
&[],
)
.await?
.into_iter()
.filter(|server| {
servers.is_empty()
|| servers.contains(&server.id)
|| servers.contains(&server.name)
});
let mut containers = Vec::<ContainerListItem>::new();
for server in servers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(more_containers) = &cache.containers {
containers.extend(more_containers.clone());
}
}
Ok(containers)
}
}
impl Resolve<InspectDockerContainer, User> for State {
async fn resolve(
&self,
@@ -404,6 +452,7 @@ impl Resolve<GetContainerLog, User> for State {
server,
container,
tail,
timestamps,
}: GetContainerLog,
user: User,
) -> anyhow::Result<Log> {
@@ -417,6 +466,7 @@ impl Resolve<GetContainerLog, User> for State {
.request(periphery::container::GetContainerLog {
name: container,
tail: cmp::min(tail, MAX_LOG_LENGTH),
timestamps,
})
.await
.context("failed at call to periphery")
@@ -432,6 +482,7 @@ impl Resolve<SearchContainerLog, User> for State {
terms,
combinator,
invert,
timestamps,
}: SearchContainerLog,
user: User,
) -> anyhow::Result<Log> {
@@ -447,6 +498,7 @@ impl Resolve<SearchContainerLog, User> for State {
terms,
combinator,
invert,
timestamps,
})
.await
.context("failed at call to periphery")

View File

@@ -11,6 +11,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -36,7 +37,13 @@ impl Resolve<ListServerTemplates, User> for State {
ListServerTemplates { query }: ListServerTemplates,
user: User,
) -> anyhow::Result<ListServerTemplatesResponse> {
resource::list_for_user::<ServerTemplate>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<ServerTemplate>(query, &user, &all_tags)
.await
}
}
@@ -46,7 +53,15 @@ impl Resolve<ListFullServerTemplates, User> for State {
ListFullServerTemplates { query }: ListFullServerTemplates,
user: User,
) -> anyhow::Result<ListFullServerTemplatesResponse> {
resource::list_full_for_user::<ServerTemplate>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<ServerTemplate>(
query, &user, &all_tags,
)
.await
}
}

View File

@@ -17,7 +17,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
stack::get_stack_and_server,
state::{action_states, github_client, stack_status_cache, State},
@@ -70,6 +70,7 @@ impl Resolve<GetStackServiceLog, User> for State {
stack,
service,
tail,
timestamps,
}: GetStackServiceLog,
user: User,
) -> anyhow::Result<GetStackServiceLogResponse> {
@@ -85,6 +86,7 @@ impl Resolve<GetStackServiceLog, User> for State {
project: stack.project_name(false),
service,
tail,
timestamps,
})
.await
.context("failed to get stack service log from periphery")
@@ -100,6 +102,7 @@ impl Resolve<SearchStackServiceLog, User> for State {
terms,
combinator,
invert,
timestamps,
}: SearchStackServiceLog,
user: User,
) -> anyhow::Result<SearchStackServiceLogResponse> {
@@ -117,6 +120,7 @@ impl Resolve<SearchStackServiceLog, User> for State {
terms,
combinator,
invert,
timestamps,
})
.await
.context("failed to get stack service log from periphery")
@@ -129,9 +133,15 @@ impl Resolve<ListCommonStackExtraArgs, User> for State {
ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let stacks =
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -154,9 +164,15 @@ impl Resolve<ListCommonStackBuildExtraArgs, User> for State {
ListCommonStackBuildExtraArgs { query }: ListCommonStackBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackBuildExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let stacks =
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -179,7 +195,12 @@ impl Resolve<ListStacks, User> for State {
ListStacks { query }: ListStacks,
user: User,
) -> anyhow::Result<Vec<StackListItem>> {
resource::list_for_user::<Stack>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Stack>(query, &user, &all_tags).await
}
}
@@ -189,7 +210,13 @@ impl Resolve<ListFullStacks, User> for State {
ListFullStacks { query }: ListFullStacks,
user: User,
) -> anyhow::Result<ListFullStacksResponse> {
resource::list_full_for_user::<Stack>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
}
}
@@ -224,6 +251,7 @@ impl Resolve<GetStacksSummary, User> for State {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get stacks from db")?;

View File

@@ -15,6 +15,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{
action_states, github_client, resource_sync_state_cache, State,
@@ -42,7 +43,13 @@ impl Resolve<ListResourceSyncs, User> for State {
ListResourceSyncs { query }: ListResourceSyncs,
user: User,
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
resource::list_for_user::<ResourceSync>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<ResourceSync>(query, &user, &all_tags)
.await
}
}
@@ -52,7 +59,15 @@ impl Resolve<ListFullResourceSyncs, User> for State {
ListFullResourceSyncs { query }: ListFullResourceSyncs,
user: User,
) -> anyhow::Result<ListFullResourceSyncsResponse> {
resource::list_full_for_user::<ResourceSync>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<ResourceSync>(
query, &user, &all_tags,
)
.await
}
}
@@ -88,6 +103,7 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get resource_syncs from db")?;

View File

@@ -29,7 +29,9 @@ use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
helpers::query::{get_id_to_tags, get_user_user_group_ids},
helpers::query::{
get_all_tags, get_id_to_tags, get_user_user_group_ids,
},
resource,
state::{db_client, State},
sync::{
@@ -46,10 +48,17 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -59,6 +68,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -68,6 +78,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -77,6 +88,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -86,6 +98,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -95,6 +108,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -104,6 +118,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -113,6 +128,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -122,6 +138,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -131,6 +148,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -184,9 +202,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res
.alerters
.push(convert_resource::<Alerter>(alerter, &id_to_tags))
res.alerters.push(convert_resource::<Alerter>(
alerter,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
@@ -201,6 +222,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
{
res.resource_syncs.push(convert_resource::<ResourceSync>(
sync,
false,
vec![],
&id_to_tags,
))
}
@@ -213,7 +236,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(template, &id_to_tags),
convert_resource::<ServerTemplate>(
template,
false,
vec![],
&id_to_tags,
),
)
}
ResourceTarget::Server(id) => {
@@ -223,9 +251,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res
.servers
.push(convert_resource::<Server>(server, &id_to_tags))
res.servers.push(convert_resource::<Server>(
server,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Builder(id) => {
let mut builder =
@@ -236,9 +267,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Builder::replace_ids(&mut builder, &all);
res
.builders
.push(convert_resource::<Builder>(builder, &id_to_tags))
res.builders.push(convert_resource::<Builder>(
builder,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
@@ -248,9 +282,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Build::replace_ids(&mut build, &all);
res
.builds
.push(convert_resource::<Build>(build, &id_to_tags))
res.builds.push(convert_resource::<Build>(
build,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Deployment(id) => {
let mut deployment = resource::get_check_permissions::<
@@ -262,6 +299,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
Deployment::replace_ids(&mut deployment, &all);
res.deployments.push(convert_resource::<Deployment>(
deployment,
false,
vec![],
&id_to_tags,
))
}
@@ -273,7 +312,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Repo::replace_ids(&mut repo, &all);
res.repos.push(convert_resource::<Repo>(repo, &id_to_tags))
res.repos.push(convert_resource::<Repo>(
repo,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Stack(id) => {
let mut stack = resource::get_check_permissions::<Stack>(
@@ -283,9 +327,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Stack::replace_ids(&mut stack, &all);
res
.stacks
.push(convert_resource::<Stack>(stack, &id_to_tags))
res.stacks.push(convert_resource::<Stack>(
stack,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Procedure(id) => {
let mut procedure = resource::get_check_permissions::<
@@ -297,6 +344,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
Procedure::replace_ids(&mut procedure, &all);
res.procedures.push(convert_resource::<Procedure>(
procedure,
false,
vec![],
&id_to_tags,
));
}

View File

@@ -28,6 +28,7 @@ mod service_user;
mod stack;
mod sync;
mod tag;
mod user;
mod user_group;
mod variable;
@@ -40,6 +41,11 @@ mod variable;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
pub enum WriteRequest {
// ==== USER ====
UpdateUserUsername(UpdateUserUsername),
UpdateUserPassword(UpdateUserPassword),
DeleteUser(DeleteUser),
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
@@ -124,8 +130,9 @@ pub enum WriteRequest {
CopyResourceSync(CopyResourceSync),
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
WriteSyncFileContents(WriteSyncFileContents),
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
@@ -188,7 +195,10 @@ async fn handler(
#[instrument(
name = "WriteRequest",
skip(user, request),
fields(user_id = user.id, request = format!("{:?}", request.extract_variant()))
fields(
user_id = user.id,
request = format!("{:?}", request.extract_variant())
)
)]
async fn task(
req_id: Uuid,

View File

@@ -22,14 +22,14 @@ use octorust::types::{
};
use periphery_client::api::compose::{
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
WriteComposeContentsToHost,
WriteCommitComposeContents, WriteComposeContentsToHost,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
periphery_client,
git_token, periphery_client,
query::get_server_with_state,
update::{add_update, make_update},
},
@@ -143,7 +143,7 @@ impl Resolve<WriteStackFileContents, User> for State {
}: WriteStackFileContents,
user: User,
) -> anyhow::Result<Update> {
let (stack, server) = get_stack_and_server(
let (mut stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Write,
@@ -151,9 +151,9 @@ impl Resolve<WriteStackFileContents, User> for State {
)
.await?;
if !stack.config.files_on_host {
if !stack.config.files_on_host && stack.config.repo.is_empty() {
return Err(anyhow!(
"Stack is not configured to use files on host, can't write file contents"
"Stack is not configured to use Files on Host or Git Repo, can't write file contents"
));
}
@@ -162,30 +162,72 @@ impl Resolve<WriteStackFileContents, User> for State {
update.push_simple_log("File contents to write", &contents);
match periphery_client(&server)?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
let stack_id = stack.id.clone();
if stack.config.files_on_host {
match periphery_client(&server)?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
} else {
let git_token = if !stack.config.git_account.is_empty() {
git_token(
&stack.config.git_provider,
&stack.config.git_account,
|https| stack.config.git_https = https,
)
.await
.with_context(|| {
format!(
"Failed to get git token. | {} | {}",
stack.config.git_account, stack.config.git_provider
)
})?
} else {
None
};
match periphery_client(&server)?
.request(WriteCommitComposeContents {
stack,
username: Some(user.username),
file_path,
contents,
git_token,
})
.await
.context("Failed to write contents to host")
{
Ok(res) => {
update.logs.extend(res.logs);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
}
if let Err(e) = State
.resolve(
RefreshStackCache { stack: stack.id },
RefreshStackCache { stack: stack_id },
stack_user().to_owned(),
)
.await
@@ -227,10 +269,11 @@ impl Resolve<RefreshStackCache, User> for State {
.await?;
let file_contents_empty = stack.config.file_contents.is_empty();
let repo_empty = stack.config.repo.is_empty();
if !stack.config.files_on_host
&& file_contents_empty
&& stack.config.repo.is_empty()
&& repo_empty
{
// Nothing to do without one of these
return Ok(NoData {});
@@ -297,7 +340,7 @@ impl Resolve<RefreshStackCache, User> for State {
(services, Some(contents), Some(errors), None, None)
}
}
} else if file_contents_empty {
} else if !repo_empty {
// ================
// REPO BASED STACK
// ================

View File

@@ -8,6 +8,7 @@ use komodo_client::{
self,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
all_logs_success,
build::Build,
builder::Builder,
config::core::CoreConfig,
@@ -22,9 +23,10 @@ use komodo_client::{
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
},
update::Log,
to_komodo_name,
update::{Log, Update},
user::{sync_user, User},
NoData, Operation, ResourceTarget,
CloneArgs, NoData, Operation, ResourceTarget,
},
};
use mungos::{
@@ -35,6 +37,7 @@ use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
alert::send_alerts,
@@ -103,6 +106,286 @@ impl Resolve<UpdateResourceSync, User> for State {
}
}
impl Resolve<WriteSyncFileContents, User> for State {
async fn resolve(
&self,
WriteSyncFileContents {
sync,
resource_path,
file_path,
contents,
}: WriteSyncFileContents,
user: User,
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if !sync.config.files_on_host && sync.config.repo.is_empty() {
return Err(anyhow!(
"This method is only for files on host, or repo based syncs."
));
}
let mut update =
make_update(&sync, Operation::WriteSyncContents, &user);
update.push_simple_log("File contents", &contents);
let root = if sync.config.files_on_host {
core_config()
.sync_directory
.join(to_komodo_name(&sync.name))
} else {
let clone_args: CloneArgs = (&sync).into();
clone_args.unique_path(&core_config().repo_directory)?
};
let file_path =
file_path.parse::<PathBuf>().context("Invalid file path")?;
let resource_path = resource_path
.parse::<PathBuf>()
.context("Invalid resource path")?;
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
let _ = fs::create_dir_all(parent).await;
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!("Failed to write file contents to {full_path:?}")
})
{
update.push_error_log("Write file", format_serror(&e.into()));
} else {
update.push_simple_log(
"Write file",
format!("File written to {full_path:?}"),
);
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
if sync.config.files_on_host {
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update
.push_error_log("Refresh failed", format_serror(&e.into()));
}
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
let commit_res = git::commit_file(
&format!("{}: Commit Resource File", user.username),
&root,
&resource_path.join(&file_path),
)
.await;
update.logs.extend(commit_res.logs);
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update
.push_error_log("Refresh failed", format_serror(&e.into()));
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<CommitSync, User> for State {
#[instrument(name = "CommitSync", skip(self, user))]
async fn resolve(
&self,
CommitSync { sync }: CommitSync,
user: User,
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Write)
.await?;
let file_contents_empty = sync.config.file_contents_empty();
let fresh_sync = !sync.config.files_on_host
&& sync.config.repo.is_empty()
&& file_contents_empty;
if !sync.config.managed && !fresh_sync {
return Err(anyhow!(
"Cannot commit to sync. Enabled 'managed' mode."
));
}
// Get this here so it can fail before update created.
let resource_path =
if sync.config.files_on_host || !sync.config.repo.is_empty() {
let resource_path = sync
.config
.resource_path
.first()
.context("Sync does not have resource path configured.")?
.parse::<PathBuf>()
.context("Invalid resource path")?;
if resource_path
.extension()
.context("Resource path missing '.toml' extension")?
!= "toml"
{
return Err(anyhow!(
"Resource path missing '.toml' extension"
));
}
Some(resource_path)
} else {
None
};
let res = State
.resolve(
ExportAllResourcesToToml {
tags: sync.config.match_tags.clone(),
},
sync_user().to_owned(),
)
.await?;
let mut update = make_update(&sync, Operation::CommitSync, &user);
update.id = add_update(update.clone()).await?;
update.logs.push(Log::simple("Resources", res.toml.clone()));
if sync.config.files_on_host {
let Some(resource_path) = resource_path else {
// Resource path checked above for files_on_host mode.
unreachable!()
};
let file_path = core_config()
.sync_directory
.join(to_komodo_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
let _ = tokio::fs::create_dir_all(&parent).await;
};
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
})
{
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
} else {
update.push_simple_log(
"Write contents",
format!("File contents written to {file_path:?}"),
);
}
} else if !sync.config.repo.is_empty() {
let Some(resource_path) = resource_path else {
// Resource path checked above for repo mode.
unreachable!()
};
// GIT REPO
let args: CloneArgs = (&sync).into();
let root = args.unique_path(&core_config().repo_directory)?;
match git::write_commit_file(
"Commit Sync",
&root,
&resource_path,
&res.toml,
)
.await
{
Ok(res) => update.logs.extend(res.logs),
Err(e) => {
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
}
}
// ===========
// UI DEFINED
} else if let Err(e) = db_client()
.resource_syncs
.update_one(
doc! { "name": &sync.name },
doc! { "$set": { "config.file_contents": res.toml } },
)
.await
.context("failed to update file_contents on db")
{
update.push_error_log(
"Write resource to database",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
}
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update.push_error_log(
"Refresh sync pending",
format_serror(&(&e).into()),
);
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RefreshResourceSyncPending, User> for State {
#[instrument(
name = "RefreshResourceSyncPending",
@@ -190,6 +473,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -199,6 +484,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -208,6 +495,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -217,6 +506,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -226,6 +517,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -235,6 +528,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -244,6 +539,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -253,6 +550,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -262,6 +561,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -271,6 +572,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -278,22 +581,28 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
.await?;
}
let variable_updates =
let variable_updates = if sync.config.match_tags.is_empty() {
crate::sync::variables::get_updates_for_view(
&resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?;
.await?
} else {
Default::default()
};
let user_group_updates =
let user_group_updates = if sync.config.match_tags.is_empty() {
crate::sync::user_groups::get_updates_for_view(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?;
.await?
} else {
Default::default()
};
anyhow::Ok((
diffs,
@@ -418,135 +727,6 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
}
}
impl Resolve<CommitSync, User> for State {
#[instrument(name = "CommitSync", skip(self, user))]
async fn resolve(
&self,
CommitSync { sync }: CommitSync,
user: User,
) -> anyhow::Result<ResourceSync> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Write)
.await?;
let fresh_sync = !sync.config.files_on_host
&& sync.config.file_contents.is_empty()
&& sync.config.repo.is_empty();
if !sync.config.managed && !fresh_sync {
return Err(anyhow!(
"Cannot commit to sync. Enabled 'managed' mode."
));
}
let res = State
.resolve(
ExportAllResourcesToToml {
tags: sync.config.match_tags,
},
sync_user().to_owned(),
)
.await?;
let mut update = make_update(
ResourceTarget::ResourceSync(sync.id),
Operation::CommitSync,
&user,
);
update.id = add_update(update.clone()).await?;
if sync.config.files_on_host {
let path = sync
.config
.resource_path
.parse::<PathBuf>()
.context("Resource path is not valid file path")?;
let extension = path
.extension()
.context("Resource path missing '.toml' extension")?;
if extension != "toml" {
return Err(anyhow!("Wrong file extension. Expected '.toml', got '.{extension:?}'"));
}
if let Some(parent) = path.parent() {
let _ = tokio::fs::create_dir_all(&parent).await;
};
if let Err(e) =
tokio::fs::write(&sync.config.resource_path, &res.toml)
.await
.with_context(|| {
format!(
"Failed to write resource file to {}",
sync.config.resource_path
)
})
{
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update).await?;
return resource::get::<ResourceSync>(&sync.name).await;
}
} else if let Err(e) = db_client()
.resource_syncs
.update_one(
doc! { "name": &sync.name },
doc! { "$set": { "config.file_contents": &res.toml } },
)
.await
.context("failed to update file_contents on db")
{
update.push_error_log(
"Write resource to database",
format_serror(&e.into()),
);
update.finalize();
add_update(update).await?;
return resource::get::<ResourceSync>(&sync.name).await;
}
update
.logs
.push(Log::simple("Committed resources", res.toml));
let res = match State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
Ok(sync) => Ok(sync),
Err(e) => {
update.push_error_log(
"Refresh sync pending",
format_serror(&(&e).into()),
);
Err(e)
}
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update).await?;
res
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(

View File

@@ -0,0 +1,130 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
DeleteUser, DeleteUserResponse, UpdateUserPassword,
UpdateUserPasswordResponse, UpdateUserUsername,
UpdateUserUsernameResponse,
},
entities::{
user::{User, UserConfig},
NoData,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::hash_password,
state::{db_client, State},
};
//
impl Resolve<UpdateUserUsername, User> for State {
async fn resolve(
&self,
UpdateUserUsername { username }: UpdateUserUsername,
user: User,
) -> anyhow::Result<UpdateUserUsernameResponse> {
if username.is_empty() {
return Err(anyhow!("Username cannot be empty."));
}
let db = db_client();
if db
.users
.find_one(doc! { "username": &username })
.await
.context("Failed to query for existing users")?
.is_some()
{
return Err(anyhow!("Username already taken."));
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
db.users
.update_one(
doc! { "_id": id },
doc! { "$set": { "username": username } },
)
.await
.context("Failed to update user username on database.")?;
Ok(NoData {})
}
}
//
impl Resolve<UpdateUserPassword, User> for State {
async fn resolve(
&self,
UpdateUserPassword { password }: UpdateUserPassword,
user: User,
) -> anyhow::Result<UpdateUserPasswordResponse> {
let UserConfig::Local { .. } = user.config else {
return Err(anyhow!("User is not local user"));
};
if password.is_empty() {
return Err(anyhow!("Password cannot be empty."));
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
let hashed_password = hash_password(password)?;
db_client()
.users
.update_one(
doc! { "_id": id },
doc! { "$set": {
"config.data.password": hashed_password
} },
)
.await
.context("Failed to update user password on database.")?;
Ok(NoData {})
}
}
//
impl Resolve<DeleteUser, User> for State {
async fn resolve(
&self,
DeleteUser { user }: DeleteUser,
admin: User,
) -> anyhow::Result<DeleteUserResponse> {
if !admin.admin {
return Err(anyhow!("Calling user is not admin."));
}
if admin.username == user || admin.id == user {
return Err(anyhow!("User cannot delete themselves."));
}
let query = if let Ok(id) = ObjectId::from_str(&user) {
doc! { "_id": id }
} else {
doc! { "username": user }
};
let db = db_client();
let Some(user) = db
.users
.find_one(query.clone())
.await
.context("Failed to query database for users.")?
else {
return Err(anyhow!("No user found with given id / username"));
};
if user.super_admin {
return Err(anyhow!("Cannot delete a super admin user."));
}
if user.admin && !admin.super_admin {
return Err(anyhow!(
"Only a Super Admin can delete an admin user."
));
}
db.users
.delete_one(query)
.await
.context("Failed to delete user from database")?;
Ok(user)
}
}

View File

@@ -16,12 +16,10 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
state::State,
state::{db_client, jwt_client},
helpers::hash_password,
state::{db_client, jwt_client, State},
};
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateLocalUser, HeaderMap> for State {
#[instrument(name = "CreateLocalUser", skip(self))]
async fn resolve(
@@ -47,8 +45,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
return Err(anyhow!("Password cannot be empty string"));
}
let password = bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")?;
let hashed_password = hash_password(password)?;
let no_users_exist =
db_client().users.find_one(Document::new()).await?.is_none();
@@ -71,7 +68,9 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
config: UserConfig::Local {
password: hashed_password,
},
};
let user_id = db_client()

View File

@@ -32,17 +32,15 @@ pub async fn init_default_oidc_client() {
return;
}
async {
let provider = config.oidc_provider.to_string();
// Use OpenID Connect Discovery to fetch the provider metadata.
let provider_metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(if provider.ends_with('/') {
provider
} else {
provider + "/"
})?,
IssuerUrl::new(config.oidc_provider.clone())?,
async_http_client,
)
.await?;
.await
.context(
"Failed to get OIDC /.well-known/openid-configuration",
)?;
// Create an OpenID Connect client by specifying the client ID, client secret, authorization URL
// and token URL.

View File

@@ -135,7 +135,9 @@ async fn callback(
.context("CSRF Token invalid")?;
if komodo_timestamp() > valid_until {
return Err(anyhow!("CSRF token invalid (Timed out). The token must be "));
return Err(anyhow!(
"CSRF token invalid (Timed out). The token must be "
));
}
let token_response = client
@@ -150,8 +152,21 @@ async fn callback(
let id_token = token_response
.id_token()
.context("OIDC Server did not return an ID token")?;
// Some providers attach additional audiences, they must be added here
// so token verification succeeds.
let verifier = client.id_token_verifier();
let additional_audiences = &core_config().oidc_additional_audiences;
let verifier = if additional_audiences.is_empty() {
verifier
} else {
verifier.set_other_audience_verifier_fn(|aud| {
additional_audiences.contains(aud)
})
};
let claims = id_token
.claims(&client.id_token_verifier(), &nonce)
.claims(&verifier, &nonce)
.context("Failed to verify token claims")?;
// Verify the access token hash to ensure that the access token hasn't been substituted for
@@ -191,20 +206,25 @@ async fn callback(
if !no_users_exist && core_config.disable_user_registration {
return Err(anyhow!("User registration is disabled"));
}
// Email will use user_id if it isn't available.
let email = claims
.email()
.map(|email| email.as_str())
.unwrap_or(user_id);
let username = if core_config.oidc_use_full_email {
email
} else {
email
.split_once('@')
.map(|(username, _)| username)
.unwrap_or(email)
}
.to_string();
// Will use preferred_username, then email, then user_id if it isn't available.
let username = claims
.preferred_username()
.map(|username| username.to_string())
.unwrap_or_else(|| {
let email = claims
.email()
.map(|email| email.as_str())
.unwrap_or(user_id);
if core_config.oidc_use_full_email {
email
} else {
email
.split_once('@')
.map(|(username, _)| username)
.unwrap_or(email)
}
.to_string()
});
let user = User {
id: Default::default(),
username,

View File

@@ -87,6 +87,9 @@ pub fn core_config() -> &'static CoreConfig {
.unwrap_or(config.oidc_client_secret),
oidc_use_full_email: env.komodo_oidc_use_full_email
.unwrap_or(config.oidc_use_full_email),
oidc_additional_audiences: maybe_read_list_from_file(env.komodo_oidc_additional_audiences_file,env
.komodo_oidc_additional_audiences)
.unwrap_or(config.oidc_additional_audiences),
google_oauth: OauthCredentials {
enabled: env
.komodo_google_oauth_enabled
@@ -141,6 +144,9 @@ pub fn core_config() -> &'static CoreConfig {
jwt_ttl: env
.komodo_jwt_ttl
.unwrap_or(config.jwt_ttl),
sync_directory: env
.komodo_sync_directory
.unwrap_or(config.sync_directory),
repo_directory: env
.komodo_repo_directory
.unwrap_or(config.repo_directory),

View File

@@ -3,8 +3,9 @@ use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use komodo_client::{
api::write::CreateServer,
api::write::{CreateBuilder, CreateServer},
entities::{
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
@@ -65,6 +66,15 @@ pub fn random_string(length: usize) -> String {
.collect()
}
const BCRYPT_COST: u32 = 10;
pub fn hash_password<P>(password: P) -> anyhow::Result<String>
where
P: AsRef<[u8]>,
{
bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
/// Returns (token, use_https)
@@ -280,8 +290,8 @@ async fn startup_open_alert_cleanup() {
}
}
/// Ensures a default server exists with the defined address
pub async fn ensure_first_server() {
/// Ensures a default server / builder exists with the defined address
pub async fn ensure_first_server_and_builder() {
let first_server = &core_config().first_server;
if first_server.is_empty() {
return;
@@ -295,23 +305,49 @@ pub async fn ensure_first_server() {
else {
return;
};
if server.is_some() {
return;
}
let server = if let Some(server) = server {
server
} else {
match State
.resolve(
CreateServer {
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(first_server.to_string()),
enabled: Some(true),
..Default::default()
},
},
system_user().to_owned(),
)
.await
{
Ok(server) => server,
Err(e) => {
error!("Failed to initialize 'first_server'. Failed to CreateServer. {e:?}");
return;
}
}
};
let Ok(None) = db.builders
.find_one(Document::new()).await
.inspect_err(|e| error!("Failed to initialize 'first_builder'. Failed to query db. {e:?}")) else {
return;
};
if let Err(e) = State
.resolve(
CreateServer {
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(first_server.to_string()),
enabled: Some(true),
..Default::default()
},
CreateBuilder {
name: String::from("local"),
config: PartialBuilderConfig::Server(
PartialServerBuilderConfig {
server_id: Some(server.id),
},
),
},
system_user().to_owned(),
)
.await
{
error!("Failed to initialize 'first_server'. Failed to CreateServer. {e:?}");
error!("Failed to initialize 'first_builder'. Failed to CreateBuilder. {e:?}");
}
}

View File

@@ -706,6 +706,11 @@ async fn execute_execution(
)
.await?
}
// Exception: This is a write operation.
Execution::CommitSync(req) => State
.resolve(req, user)
.await
.context("Failed at CommitSync")?,
Execution::DeployStack(req) => {
let req = ExecuteRequest::DeployStack(req);
let update = init_execution_update(&req, &user).await?;
@@ -722,6 +727,22 @@ async fn execute_execution(
)
.await?
}
Execution::DeployStackIfChanged(req) => {
let req = ExecuteRequest::DeployStackIfChanged(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStackIfChanged(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("Failed at DeployStackIfChanged"),
&update_id,
)
.await?
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;

View File

@@ -201,6 +201,14 @@ pub async fn get_tag_check_owner(
Err(anyhow!("user must be tag owner or admin"))
}
pub async fn get_all_tags(
filter: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Tag>> {
find_collect(&db_client().tags, filter, None)
.await
.context("failed to query db for tags")
}
pub async fn get_id_to_tags(
filter: impl Into<Option<Document>>,
) -> anyhow::Result<HashMap<String, Tag>> {

View File

@@ -370,6 +370,12 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
Operation::StartStackService
@@ -429,7 +435,10 @@ pub async fn init_execution_update(
};
let mut update = make_update(target, operation, user);
update.in_progress();
// Don't actually send it here, let the handlers send it after they can set action state.
update.id = add_update_without_send(&update).await?;
// Hold off on even adding update for DeployStackIfChanged
if !matches!(&request, ExecuteRequest::DeployStackIfChanged(_)) {
// Don't actually send it here, let the handlers send it after they can set action state.
update.id = add_update_without_send(&update).await?;
}
Ok(update)
}

View File

@@ -6,7 +6,9 @@ use komodo_client::{
api::execute::RunBuild,
entities::{build::Build, user::git_webhook_user},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
api::execute::ExecuteRequest,
@@ -20,22 +22,30 @@ fn build_locks() -> &'static ListenerLockCache {
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook(
build_id: String,
pub async fn auth_build_webhook(
build_id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<Build> {
let build = resource::get::<Build>(build_id)
.await
.status_code(StatusCode::NOT_FOUND)?;
verify_gh_signature(headers, body, &build.config.webhook_secret)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(build)
}
pub async fn handle_build_webhook(
build: Build,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let lock = build_locks().get_or_insert_default(&build.id).await;
let _lock = lock.lock().await;
let build = resource::get::<Build>(&build_id).await?;
verify_gh_signature(headers, &body, &build.config.webhook_secret)
.await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
@@ -46,7 +56,7 @@ pub async fn handle_build_webhook(
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let req = ExecuteRequest::RunBuild(RunBuild { build: build.id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()

View File

@@ -39,10 +39,11 @@ pub fn router() -> Router {
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let build = build::auth_build_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = build::handle_build_webhook(id.clone(), headers, body).await;
let res = build::handle_build_webhook(build, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
@@ -50,6 +51,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
@@ -57,10 +59,11 @@ pub fn router() -> Router {
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let repo = repo::auth_repo_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = repo::handle_repo_clone_webhook(id.clone(), headers, body).await;
let res = repo::handle_repo_clone_webhook(repo, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
@@ -68,6 +71,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -75,10 +79,11 @@ pub fn router() -> Router {
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let repo = repo::auth_repo_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = repo::handle_repo_pull_webhook(id.clone(), headers, body).await;
let res = repo::handle_repo_pull_webhook(repo, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
@@ -86,6 +91,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -93,10 +99,11 @@ pub fn router() -> Router {
"/repo/:id/build",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let repo = repo::auth_repo_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("repo_build_webhook", id);
async {
let res = repo::handle_repo_build_webhook(id.clone(), headers, body).await;
let res = repo::handle_repo_build_webhook(repo, body).await;
if let Err(e) = res {
warn!("failed to run repo build webook for repo {id} | {e:#}");
}
@@ -104,6 +111,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -111,10 +119,11 @@ pub fn router() -> Router {
"/stack/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let stack = stack::auth_stack_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("stack_clone_webhook", id);
async {
let res = stack::handle_stack_refresh_webhook(id.clone(), headers, body).await;
let res = stack::handle_stack_refresh_webhook(stack, body).await;
if let Err(e) = res {
warn!("failed to run stack clone webook for stack {id} | {e:#}");
}
@@ -122,6 +131,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -129,10 +139,11 @@ pub fn router() -> Router {
"/stack/:id/deploy",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let stack = stack::auth_stack_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("stack_pull_webhook", id);
async {
let res = stack::handle_stack_deploy_webhook(id.clone(), headers, body).await;
let res = stack::handle_stack_deploy_webhook(stack, body).await;
if let Err(e) = res {
warn!("failed to run stack pull webook for stack {id} | {e:#}");
}
@@ -140,6 +151,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -147,13 +159,13 @@ pub fn router() -> Router {
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
let procedure = procedure::auth_procedure_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = procedure::handle_procedure_webhook(
id.clone(),
procedure,
branch.unwrap_or_else(|| String::from("main")),
headers,
body
).await;
if let Err(e) = res {
@@ -163,6 +175,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -170,12 +183,12 @@ pub fn router() -> Router {
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let sync = sync::auth_sync_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = sync::handle_sync_refresh_webhook(
id.clone(),
headers,
sync,
body
).await;
if let Err(e) = res {
@@ -185,6 +198,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)
@@ -192,12 +206,12 @@ pub fn router() -> Router {
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let sync = sync::auth_sync_webhook(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = sync::handle_sync_execute_webhook(
id.clone(),
headers,
sync,
body
).await;
if let Err(e) = res {
@@ -207,6 +221,7 @@ pub fn router() -> Router {
.instrument(span)
.await
});
serror::Result::Ok(())
},
)
)

View File

@@ -6,7 +6,9 @@ use komodo_client::{
api::execute::RunProcedure,
entities::{procedure::Procedure, user::git_webhook_user},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
api::execute::ExecuteRequest,
@@ -20,28 +22,36 @@ fn procedure_locks() -> &'static ListenerLockCache {
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
pub async fn auth_procedure_webhook(
procedure_id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<Procedure> {
let procedure = resource::get::<Procedure>(procedure_id)
.await
.status_code(StatusCode::NOT_FOUND)?;
verify_gh_signature(
headers,
body,
&procedure.config.webhook_secret,
)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(procedure)
}
pub async fn handle_procedure_webhook(
procedure: Procedure,
target_branch: String,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
procedure_locks().get_or_insert_default(&procedure.id).await;
let _lock = lock.lock().await;
let procedure = resource::get::<Procedure>(&procedure_id).await?;
verify_gh_signature(
headers,
&body,
&procedure.config.webhook_secret,
)
.await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
@@ -53,7 +63,7 @@ pub async fn handle_procedure_webhook(
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure_id,
procedure: procedure.id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {

View File

@@ -6,7 +6,9 @@ use komodo_client::{
api::execute::{BuildRepo, CloneRepo, PullRepo},
entities::{repo::Repo, user::git_webhook_user},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::update::init_execution_update, resource, state::State,
@@ -19,22 +21,30 @@ fn repo_locks() -> &'static ListenerLockCache {
REPO_LOCKS.get_or_init(Default::default)
}
pub async fn handle_repo_clone_webhook(
repo_id: String,
pub async fn auth_repo_webhook(
repo_id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<Repo> {
let repo = resource::get::<Repo>(repo_id)
.await
.status_code(StatusCode::NOT_FOUND)?;
verify_gh_signature(headers, body, &repo.config.webhook_secret)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(repo)
}
pub async fn handle_repo_clone_webhook(
repo: Repo,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let lock = repo_locks().get_or_insert_default(&repo.id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
@@ -47,7 +57,7 @@ pub async fn handle_repo_clone_webhook(
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo_id,
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
@@ -59,21 +69,15 @@ pub async fn handle_repo_clone_webhook(
}
pub async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
repo: Repo,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let lock = repo_locks().get_or_insert_default(&repo.id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
@@ -85,7 +89,7 @@ pub async fn handle_repo_pull_webhook(
let user = git_webhook_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo_id,
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
@@ -96,21 +100,15 @@ pub async fn handle_repo_pull_webhook(
}
pub async fn handle_repo_build_webhook(
repo_id: String,
headers: HeaderMap,
repo: Repo,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let lock = repo_locks().get_or_insert_default(&repo.id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
@@ -123,7 +121,7 @@ pub async fn handle_repo_build_webhook(
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::BuildRepo(BuildRepo {
repo: repo_id,
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::BuildRepo(req) = req

View File

@@ -3,10 +3,15 @@ use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::{execute::DeployStack, write::RefreshStackCache},
api::{
execute::{DeployStack, DeployStackIfChanged},
write::RefreshStackCache,
},
entities::{stack::Stack, user::git_webhook_user},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
api::execute::ExecuteRequest,
@@ -20,22 +25,30 @@ fn stack_locks() -> &'static ListenerLockCache {
STACK_LOCKS.get_or_init(Default::default)
}
pub async fn handle_stack_refresh_webhook(
stack_id: String,
pub async fn auth_stack_webhook(
stack_id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<Stack> {
let stack = resource::get::<Stack>(stack_id)
.await
.status_code(StatusCode::NOT_FOUND)?;
verify_gh_signature(headers, body, &stack.config.webhook_secret)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(stack)
}
pub async fn handle_stack_refresh_webhook(
stack: Stack,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through, from "action state busy".
let lock = stack_locks().get_or_insert_default(&stack_id).await;
let lock = stack_locks().get_or_insert_default(&stack.id).await;
let _lock = lock.lock().await;
let stack = resource::get::<Stack>(&stack_id).await?;
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
.await?;
if !stack.config.webhook_enabled {
return Err(anyhow!("stack does not have webhook enabled"));
}
@@ -53,21 +66,15 @@ pub async fn handle_stack_refresh_webhook(
}
pub async fn handle_stack_deploy_webhook(
stack_id: String,
headers: HeaderMap,
stack: Stack,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = stack_locks().get_or_insert_default(&stack_id).await;
let lock = stack_locks().get_or_insert_default(&stack.id).await;
let _lock = lock.lock().await;
let stack = resource::get::<Stack>(&stack_id).await?;
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
.await?;
if !stack.config.webhook_enabled {
return Err(anyhow!("stack does not have webhook enabled"));
}
@@ -78,14 +85,28 @@ pub async fn handle_stack_deploy_webhook(
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack_id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
if stack.config.webhook_force_deploy {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack.id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
} else {
let req =
ExecuteRequest::DeployStackIfChanged(DeployStackIfChanged {
stack: stack.id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStackIfChanged(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
}
Ok(())
}

View File

@@ -6,7 +6,9 @@ use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{sync::ResourceSync, user::git_webhook_user},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
api::execute::ExecuteRequest,
@@ -20,22 +22,30 @@ fn sync_locks() -> &'static ListenerLockCache {
SYNC_LOCKS.get_or_init(Default::default)
}
pub async fn handle_sync_refresh_webhook(
sync_id: String,
pub async fn auth_sync_webhook(
sync_id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<ResourceSync> {
let sync = resource::get::<ResourceSync>(sync_id)
.await
.status_code(StatusCode::NOT_FOUND)?;
verify_gh_signature(headers, body, &sync.config.webhook_secret)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
Ok(sync)
}
pub async fn handle_sync_refresh_webhook(
sync: ResourceSync,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let lock = sync_locks().get_or_insert_default(&sync.id).await;
let _lock = lock.lock().await;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
.await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
@@ -47,27 +57,21 @@ pub async fn handle_sync_refresh_webhook(
let user = git_webhook_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.resolve(RefreshResourceSyncPending { sync: sync.id }, user)
.await?;
Ok(())
}
pub async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
sync: ResourceSync,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let lock = sync_locks().get_or_insert_default(&sync.id).await;
let _lock = lock.lock().await;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
.await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
@@ -78,7 +82,11 @@ pub async fn handle_sync_execute_webhook(
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
let req = ExecuteRequest::RunSync(RunSync {
sync: sync.id,
resource_type: None,
resources: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()

View File

@@ -44,7 +44,7 @@ async fn app() -> anyhow::Result<()> {
);
tokio::join!(
// Maybe initialize first server
helpers::ensure_first_server(),
helpers::ensure_first_server_and_builder(),
// Cleanup open updates / invalid alerts
helpers::startup_cleanup(),
);

View File

@@ -2,9 +2,7 @@ use std::collections::HashMap;
use anyhow::Context;
use komodo_client::entities::{
resource::ResourceQuery,
server::{Server, ServerListItem},
user::User,
resource::ResourceQuery, server::Server, user::User,
};
use crate::resource;
@@ -32,16 +30,16 @@ pub async fn check_alerts(ts: i64) {
}
#[instrument(level = "debug")]
async fn get_all_servers_map() -> anyhow::Result<(
HashMap<String, ServerListItem>,
HashMap<String, String>,
)> {
let servers = resource::list_for_user::<Server>(
async fn get_all_servers_map(
) -> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),
&User {
admin: true,
..Default::default()
},
&[],
)
.await
.context("failed to get servers from db (in alert_servers)")?;

View File

@@ -5,7 +5,7 @@ use derive_variants::ExtractVariant;
use komodo_client::entities::{
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
komodo_timestamp, optional_string,
server::{ServerListItem, ServerState},
server::{Server, ServerState},
ResourceTarget,
};
use mongo_indexed::Indexed;
@@ -28,7 +28,7 @@ type OpenDiskAlertMap = OpenAlertMap<PathBuf>;
#[instrument(level = "debug")]
pub async fn alert_servers(
ts: i64,
mut servers: HashMap<String, ServerListItem>,
mut servers: HashMap<String, Server>,
) {
let server_statuses = server_status_cache().get_list().await;
@@ -70,12 +70,12 @@ pub async fn alert_servers(
data: AlertData::ServerUnreachable {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
err: server_status.err.clone(),
},
};
alerts_to_open
.push((alert, server.info.send_unreachable_alerts))
.push((alert, server.config.send_unreachable_alerts))
}
(ServerState::NotOk, Some(alert)) => {
// update alert err
@@ -102,8 +102,10 @@ pub async fn alert_servers(
// Close an open alert
(ServerState::Ok | ServerState::Disabled, Some(alert)) => {
alert_ids_to_close
.push((alert.clone(), server.info.send_unreachable_alerts));
alert_ids_to_close.push((
alert.clone(),
server.config.send_unreachable_alerts,
));
}
_ => {}
}
@@ -119,20 +121,21 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(&AlertDataVariant::ServerCpu))
.cloned();
match (health.cpu, cpu_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.cpu.level, cpu_alert, health.cpu.should_close_alert)
{
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
// open alert
let alert = Alert {
id: Default::default(),
ts,
resolved: false,
resolved_ts: None,
level: health.cpu,
level: health.cpu.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
@@ -140,41 +143,44 @@ pub async fn alert_servers(
.unwrap_or(0.0),
},
};
alerts_to_open.push((alert, server.info.send_cpu_alerts));
alerts_to_open.push((alert, server.config.send_cpu_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// modify alert level only if it has increased
if alert.level < health.cpu {
alert.level = health.cpu;
if alert.level < health.cpu.level {
alert.level = health.cpu.level;
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alerts_to_update.push((alert, server.info.send_cpu_alerts));
alerts_to_update
.push((alert, server.config.send_cpu_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_cpu_alerts))
alert_ids_to_close
.push((alert, server.config.send_cpu_alerts))
}
_ => {}
}
@@ -186,20 +192,21 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(&AlertDataVariant::ServerMem))
.cloned();
match (health.mem, mem_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.mem.level, mem_alert, health.mem.should_close_alert)
{
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
// open alert
let alert = Alert {
id: Default::default(),
ts,
resolved: false,
resolved_ts: None,
level: health.mem,
level: health.mem.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -212,19 +219,20 @@ pub async fn alert_servers(
.unwrap_or(0.0),
},
};
alerts_to_open.push((alert, server.info.send_mem_alerts));
alerts_to_open.push((alert, server.config.send_mem_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// modify alert level only if it has increased
if alert.level < health.mem {
alert.level = health.mem;
if alert.level < health.mem.level {
alert.level = health.mem.level;
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -236,15 +244,16 @@ pub async fn alert_servers(
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alerts_to_update.push((alert, server.info.send_mem_alerts));
alerts_to_update
.push((alert, server.config.send_mem_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -256,7 +265,8 @@ pub async fn alert_servers(
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_mem_alerts))
alert_ids_to_close
.push((alert, server.config.send_mem_alerts))
}
_ => {}
}
@@ -273,8 +283,12 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(path))
.cloned();
match (*health, disk_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.level, disk_alert, health.should_close_alert) {
(
SeverityLevel::Warning | SeverityLevel::Critical,
None,
_,
) => {
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
@@ -283,58 +297,60 @@ pub async fn alert_servers(
ts,
resolved: false,
resolved_ts: None,
level: *health,
level: health.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
},
};
alerts_to_open.push((alert, server.info.send_disk_alerts));
alerts_to_open
.push((alert, server.config.send_disk_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// Disk is persistent, update alert if health changes regardless of direction
if *health != alert.level {
if health.level != alert.level {
let disk =
server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.level = health.level;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alerts_to_update
.push((alert, server.info.send_disk_alerts));
.push((alert, server.config.send_disk_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.level = health.level;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alert_ids_to_close
.push((alert, server.info.send_disk_alerts))
.push((alert, server.config.send_disk_alerts))
}
_ => {}
}
@@ -347,7 +363,7 @@ pub async fn alert_servers(
let mut alert = alert.clone();
alert.level = SeverityLevel::Ok;
alert_ids_to_close
.push((alert, server.info.send_disk_alerts));
.push((alert, server.config.send_disk_alerts));
}
}
}

View File

@@ -6,7 +6,10 @@ use komodo_client::entities::{
network::NetworkListItem, volume::VolumeListItem,
},
repo::Repo,
server::{Server, ServerConfig, ServerHealth, ServerState},
server::{
Server, ServerConfig, ServerHealth, ServerHealthState,
ServerState,
},
stack::{ComposeProject, Stack, StackState},
stats::{SingleDiskUsage, SystemStats},
};
@@ -126,6 +129,8 @@ pub async fn insert_server_status(
.await;
}
const ALERT_PERCENTAGE_THRESHOLD: f32 = 5.0;
fn get_server_health(
server: &Server,
SystemStats {
@@ -148,16 +153,22 @@ fn get_server_health(
let mut health = ServerHealth::default();
if cpu_perc >= cpu_critical {
health.cpu = SeverityLevel::Critical
health.cpu.level = SeverityLevel::Critical;
} else if cpu_perc >= cpu_warning {
health.cpu = SeverityLevel::Warning
health.cpu.level = SeverityLevel::Warning
} else if *cpu_perc < cpu_warning - ALERT_PERCENTAGE_THRESHOLD {
health.cpu.should_close_alert = true
}
let mem_perc = 100.0 * mem_used_gb / mem_total_gb;
if mem_perc >= *mem_critical {
health.mem = SeverityLevel::Critical
health.mem.level = SeverityLevel::Critical
} else if mem_perc >= *mem_warning {
health.mem = SeverityLevel::Warning
health.mem.level = SeverityLevel::Warning
} else if mem_perc
< mem_warning - (ALERT_PERCENTAGE_THRESHOLD as f64)
{
health.mem.should_close_alert = true
}
for SingleDiskUsage {
@@ -168,14 +179,17 @@ fn get_server_health(
} in disks
{
let perc = 100.0 * used_gb / total_gb;
let stats_state = if perc >= *disk_critical {
SeverityLevel::Critical
let mut state = ServerHealthState::default();
if perc >= *disk_critical {
state.level = SeverityLevel::Critical;
} else if perc >= *disk_warning {
SeverityLevel::Warning
} else {
SeverityLevel::Ok
state.level = SeverityLevel::Warning;
} else if perc
< disk_warning - (ALERT_PERCENTAGE_THRESHOLD as f64)
{
state.should_close_alert = true;
};
health.disks.insert(mount.clone(), stats_state);
health.disks.insert(mount.clone(), state);
}
health

View File

@@ -206,7 +206,10 @@ pub async fn update_cache_for_server(server: &Server) {
};
match lists::get_docker_lists(&periphery).await {
Ok((containers, networks, images, volumes, projects)) => {
Ok((mut containers, networks, images, volumes, projects)) => {
containers.iter_mut().for_each(|container| {
container.server_id = Some(server.id.clone())
});
tokio::join!(
resources::update_deployment_cache(deployments, &containers),
resources::update_stack_cache(stacks, &containers),

View File

@@ -57,6 +57,7 @@ impl super::KomodoResource for Build {
version: build.config.version,
builder_id: build.config.builder_id,
git_provider: build.config.git_provider,
image_registry_domain: build.config.image_registry.domain,
repo: build.config.repo,
branch: build.config.branch,
built_hash: build.info.built_hash,
@@ -179,9 +180,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(builder_id) = &config.builder_id {
if !builder_id.is_empty() {
let builder = super::get_check_permissions::<Builder>(builder_id, user, PermissionLevel::Read)
.await
.context("cannot create build using this builder. user must have at least read permissions on the builder.")?;
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read,
)
.await
.context("Cannot attach Build to this Builder")?;
config.builder_id = Some(builder.id)
}
}

View File

@@ -262,9 +262,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(server_id) = &config.server_id {
if !server_id.is_empty() {
let server = get_check_permissions::<Server>(server_id, user, PermissionLevel::Write)
.await
.context("cannot create deployment on this server. user must have update permissions on the server to perform this action.")?;
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Write,
)
.await
.context("Cannot attach Deployment to this Server")?;
config.server_id = Some(server.id);
}
}
@@ -272,9 +276,15 @@ async fn validate_config(
&config.image
{
if !build_id.is_empty() {
let build = get_check_permissions::<Build>(build_id, user, PermissionLevel::Read)
.await
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
let build = get_check_permissions::<Build>(
build_id,
user,
PermissionLevel::Read,
)
.await
.context(
"Cannot update deployment with this build attached.",
)?;
config.image = Some(DeploymentImage::Build {
build_id: build.id,
version: *version,

View File

@@ -15,7 +15,7 @@ use komodo_client::{
tag::Tag,
to_komodo_name,
update::Update,
user::User,
user::{system_user, User},
Operation, ResourceTarget, ResourceTargetVariant,
},
};
@@ -228,7 +228,7 @@ pub async fn get_check_permissions<T: KomodoResource>(
Ok(resource)
} else {
Err(anyhow!(
"user does not have required permissions on this {}",
"User does not have required permissions on this {}. Must have at least {permission_level} permissions",
T::resource_type()
))
}
@@ -382,8 +382,9 @@ pub async fn get_user_permission_on_resource<T: KomodoResource>(
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
validate_resource_query_tags(&mut query).await;
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_for_user_using_document::<T>(filters, user).await
@@ -404,8 +405,9 @@ pub async fn list_for_user_using_document<T: KomodoResource>(
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
validate_resource_query_tags(&mut query).await;
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_full_for_user_using_document::<T>(filters, user).await
@@ -508,6 +510,17 @@ pub async fn create<T: KomodoResource>(
return Err(anyhow!("valid ObjectIds cannot be used as names."));
}
// Ensure an existing resource with same name doesn't already exist
// The database indexing also ensures this but doesn't give a good error message.
if list_full_for_user::<T>(Default::default(), system_user(), &[])
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
.any(|r| r.name == name)
{
return Err(anyhow!("Must provide unique name for resource."));
}
let start_ts = komodo_timestamp();
T::validate_create_config(&mut config, user).await?;
@@ -782,14 +795,24 @@ pub async fn delete<T: KomodoResource>(
// =======
#[instrument(level = "debug")]
pub async fn validate_resource_query_tags<
T: Default + std::fmt::Debug,
>(
pub fn validate_resource_query_tags<T: Default + std::fmt::Debug>(
query: &mut ResourceQuery<T>,
) {
let futures = query.tags.iter().map(|tag| get_tag(tag));
let res = join_all(futures).await;
query.tags = res.into_iter().flatten().map(|tag| tag.id).collect();
all_tags: &[Tag],
) -> anyhow::Result<()> {
query.tags = query
.tags
.iter()
.map(|tag| {
all_tags
.iter()
.find(|t| t.name == *tag || t.id == *tag)
.map(|tag| tag.id.clone())
.with_context(|| {
format!("No tag found matching name or id: {}", tag)
})
})
.collect::<anyhow::Result<Vec<_>>>()?;
Ok(())
}
#[instrument]

View File

@@ -494,6 +494,16 @@ async fn validate_config(
.await?;
params.sync = sync.id;
}
Execution::CommitSync(params) => {
// This one is actually a write operation.
let sync = super::get_check_permissions::<ResourceSync>(
&params.sync,
user,
PermissionLevel::Write,
)
.await?;
params.sync = sync.id;
}
Execution::DeployStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -503,6 +513,15 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::DeployStackIfChanged(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
}
Execution::StartStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,

View File

@@ -213,15 +213,19 @@ async fn validate_config(
PermissionLevel::Write,
)
.await
.context("Cannot attach repo to this server. User must have write permissions on the server.")?;
.context("Cannot attach Repo to this Server")?;
config.server_id = Some(server.id);
}
}
if let Some(builder_id) = &config.builder_id {
if !builder_id.is_empty() {
let builder = super::get_check_permissions::<Builder>(builder_id, user, PermissionLevel::Read)
.await
.context("Cannot attach repo to this builder. User must have at least read permissions on the builder.")?;
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read,
)
.await
.context("Cannot attach Repo to this Builder")?;
config.builder_id = Some(builder.id);
}
}

View File

@@ -301,9 +301,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(server_id) = &config.server_id {
if !server_id.is_empty() {
let server = get_check_permissions::<Server>(server_id, user, PermissionLevel::Write)
.await
.context("cannot create stack on this server. user must have update permissions on the server to perform this action.")?;
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Write,
)
.await
.context("Cannot attach stack to this Server")?;
// in case it comes in as name
config.server_id = Some(server.id);
}

View File

@@ -18,7 +18,7 @@ use komodo_client::{
toml::ResourceToml,
update::Log,
user::sync_user,
ResourceTarget,
FileContents, ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -26,7 +26,6 @@ use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update,
stack::remote::ensure_remote_repo,
state::{deployment_status_cache, stack_status_cache, State},
};
@@ -541,7 +540,45 @@ fn build_cache_for_stack<'a>(
StackState::Running => {
// Here can diff the changes, to see if they merit a redeploy.
// First merge toml resource config (partial) onto default resource config.
// See if any remote contents don't match deployed contents
match (
&original.info.deployed_contents,
&original.info.remote_contents,
) {
(Some(deployed_contents), Some(remote_contents)) => {
for FileContents { path, contents } in remote_contents {
if let Some(deployed) =
deployed_contents.iter().find(|c| &c.path == path)
{
if &deployed.contents != contents {
cache.insert(
target,
Some((
format!(
"File contents for {path} have changed"
),
after,
)),
);
return Ok(());
}
} else {
cache.insert(
target,
Some((
format!("New file contents at {path}"),
after,
)),
);
return Ok(());
}
}
}
// Maybe should handle other cases
_ => {}
}
// Merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: StackConfig = stack.config.clone().into();
let mut config: PartialStackConfig = config.into();
@@ -589,40 +626,6 @@ fn build_cache_for_stack<'a>(
}
};
// We know the config hasn't changed at this point, but still need
// to check if its a repo based stack, and the hash has updated.
// Can use 'original' for this (config hasn't changed)
if stack.latest_hash {
if let Some(deployed_hash) = &original.info.deployed_hash {
let (_, _, hash, _) = ensure_remote_repo(original.into())
.await
.context("failed to get latest hash for repo based stack")
.with_context(|| {
format!(
"Stack {} {}",
bold(&stack.name),
colored("has errors", Color::Red)
)
})?;
if let Some(hash) = hash {
if &hash != deployed_hash {
cache.insert(
target,
Some((
format!(
"outdated hash. deployed: {} -> latest: {}",
colored(deployed_hash, Color::Red),
colored(hash, Color::Green)
),
after,
)),
);
return Ok(());
}
}
}
}
// Check 'after' to see if they deploy.
insert_target_using_after_list(
target,

View File

@@ -6,6 +6,7 @@ use komodo_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
tag::Tag, toml::ResourceToml, update::Log, user::sync_user,
ResourceTargetVariant,
},
};
use mungos::find::find_collect;
@@ -26,6 +27,8 @@ pub async fn get_updates_for_execution<
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
@@ -35,11 +38,31 @@ pub async fn get_updates_for_execution<
.into_iter()
.filter(|r| {
Resource::include_resource(
&r.config, &r.tags, id_to_tags, match_tags,
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let resources = resources
.into_iter()
.filter(|r| {
Resource::include_resource_partial(
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.collect::<Vec<_>>();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
@@ -54,15 +77,6 @@ pub async fn get_updates_for_execution<
}
for mut resource in resources {
// only resource that might not be included is resource sync
if !Resource::include_resource_partial(
&resource.config,
&resource.tags,
id_to_tags,
match_tags,
) {
continue;
}
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.

View File

@@ -1,91 +1,217 @@
use std::{fs, path::Path};
use std::{
fs,
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, muted, Color};
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::entities::{
sync::SyncFileContents,
toml::{ResourceToml, ResourcesToml},
update::Log,
FileContents,
};
pub fn read_resources(
path: &Path,
root_path: &Path,
resource_path: &[String],
match_tags: &[String],
logs: &mut Vec<Log>,
files: &mut Vec<FileContents>,
file_errors: &mut Vec<FileContents>,
files: &mut Vec<SyncFileContents>,
file_errors: &mut Vec<SyncFileContents>,
) -> anyhow::Result<ResourcesToml> {
let mut res = ResourcesToml::default();
let mut log =
format!("{}: reading resources from {path:?}", muted("INFO"));
if let Err(e) = read_resources_recursive(
path,
match_tags,
&mut res,
&mut log,
files,
file_errors,
)
.with_context(|| format!("failed to read resources from {path:?}"))
{
file_errors.push(FileContents {
path: path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("read remote resources", log));
} else {
logs.push(Log::simple("read remote resources", log));
};
Ok(res)
let mut resources = ResourcesToml::default();
for resource_path in resource_path {
let resource_path = resource_path
.parse::<PathBuf>()
.context("Invalid resource path")?;
let full_path = root_path
.join(&resource_path)
.components()
.collect::<PathBuf>();
let mut log = format!(
"{}: reading resources from {full_path:?}",
muted("INFO")
);
if full_path.is_file() {
if let Err(e) = read_resource_file(
root_path,
None,
&resource_path,
match_tags,
&mut resources,
&mut log,
files,
)
.with_context(|| {
format!("failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("Read remote resources", log));
} else {
logs.push(Log::simple("Read remote resources", log));
};
} else if full_path.is_dir() {
if let Err(e) = read_resources_directory(
root_path,
&resource_path,
&PathBuf::new(),
match_tags,
&mut resources,
&mut log,
files,
file_errors,
)
.with_context(|| {
format!("Failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("Read remote resources", log));
} else {
logs.push(Log::simple("Read remote resources", log));
};
} else if !full_path.exists() {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(
&anyhow!("Initialize the file to proceed.")
.context(format!("Path {full_path:?} does not exist."))
.into(),
),
});
log.push_str(&format!(
"{}: Resoure path {} does not exist.",
colored("ERROR", Color::Red),
bold(resource_path.display())
));
logs.push(Log::error("Read remote resources", log));
} else {
log.push_str(&format!(
"{}: Resoure path {} exists, but is neither a file nor a directory.",
colored("WARN", Color::Red),
bold(resource_path.display())
));
logs.push(Log::error("Read remote resources", log));
}
}
Ok(resources)
}
fn read_resources_recursive(
path: &Path,
/// Use when incoming resource path is a file.
fn read_resource_file(
root_path: &Path,
// relative to root path.
resource_path: Option<&Path>,
// relative to resource path if provided, or root path.
file_path: &Path,
match_tags: &[String],
resources: &mut ResourcesToml,
log: &mut String,
files: &mut Vec<FileContents>,
file_errors: &mut Vec<FileContents>,
files: &mut Vec<SyncFileContents>,
) -> anyhow::Result<()> {
let res =
fs::metadata(path).context("failed to get path metadata")?;
if res.is_file() {
if !path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
files.push(FileContents {
path: path.display().to_string(),
contents: contents.clone(),
});
let more = toml::from_str::<ResourcesToml>(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse resource file contents")?;
let full_path = if let Some(resource_path) = resource_path {
root_path.join(resource_path).join(file_path)
} else {
root_path.join(file_path)
};
if !full_path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let contents = std::fs::read_to_string(&full_path)
.context("failed to read file contents")?;
log.push('\n');
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", Color::Green),
colored(path.display(), Color::Blue)
));
files.push(SyncFileContents {
resource_path: resource_path
.map(|path| path.display().to_string())
.unwrap_or_default(),
path: file_path.display().to_string(),
contents: contents.clone(),
});
let more = toml::from_str::<ResourcesToml>(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse resource file contents")?;
log.push('\n');
let path_for_view =
if let Some(resource_path) = resource_path.as_ref() {
resource_path.join(file_path)
} else {
file_path.to_path_buf()
};
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", Color::Green),
colored(path_for_view.display(), Color::Blue)
));
extend_resources(resources, more, match_tags);
extend_resources(resources, more, match_tags);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
if let Err(e) = read_resources_recursive(
&path,
Ok(())
}
/// Reads down into directories.
fn read_resources_directory(
root_path: &Path,
// relative to root path.
resource_path: &Path,
// relative to resource path. start as empty path
curr_path: &Path,
match_tags: &[String],
resources: &mut ResourcesToml,
log: &mut String,
files: &mut Vec<SyncFileContents>,
file_errors: &mut Vec<SyncFileContents>,
) -> anyhow::Result<()> {
let full_resource_path = root_path.join(resource_path);
let full_path = full_resource_path.join(curr_path);
let directory = fs::read_dir(&full_path).with_context(|| {
format!("Failed to read directory contents at {full_path:?}")
})?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
let curr_path =
path.strip_prefix(&full_resource_path).unwrap_or(&path);
if path.is_file() {
if let Err(e) = read_resource_file(
root_path,
Some(resource_path),
curr_path,
match_tags,
resources,
log,
files,
)
.with_context(|| {
format!("failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
};
} else if path.is_dir() {
if let Err(e) = read_resources_directory(
root_path,
resource_path,
curr_path,
match_tags,
resources,
log,
@@ -95,8 +221,9 @@ fn read_resources_recursive(
.with_context(|| {
format!("failed to read resources from {path:?}")
}) {
file_errors.push(FileContents {
path: path.display().to_string(),
file_errors.push(SyncFileContents {
resource_path: resource_path.display().to_string(),
path: curr_path.display().to_string(),
contents: format_serror(&e.into()),
});
log.push('\n');
@@ -108,10 +235,8 @@ fn read_resources_recursive(
));
}
}
Ok(())
} else {
Err(anyhow!("resources path is neither file nor directory"))
}
Ok(())
}
pub fn extend_resources(

View File

@@ -5,10 +5,13 @@ use komodo_client::entities::{
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, toml::ResourceToml, ResourceTarget,
ResourceTargetVariant,
};
use mungos::mongodb::bson::oid::ObjectId;
use toml::ToToml;
use crate::resource::KomodoResource;
pub mod deploy;
pub mod execute;
pub mod file;
@@ -38,22 +41,44 @@ pub trait ResourceSyncTrait: ToToml + Sized {
/// To exclude resource syncs with "file_contents" (they aren't compatible)
fn include_resource(
name: &String,
_config: &Self::Config,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
include_resource_by_tags(resource_tags, id_to_tags, match_tags)
include_resource_by_resource_type_and_name::<Self>(
match_resource_type,
match_resources,
name,
) && include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
)
}
/// To exclude resource syncs with "file_contents" (they aren't compatible)
fn include_resource_partial(
name: &String,
_config: &Self::PartialConfig,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
include_resource_by_tags(resource_tags, id_to_tags, match_tags)
include_resource_by_resource_type_and_name::<Self>(
match_resource_type,
match_resources,
name,
) && include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
)
}
/// Apply any changes to incoming toml partial config
@@ -90,6 +115,31 @@ pub fn include_resource_by_tags(
match_tags.iter().all(|tag| tag_names.contains(&tag))
}
pub fn include_resource_by_resource_type_and_name<
T: KomodoResource,
>(
resource_type: Option<ResourceTargetVariant>,
resources: Option<&[String]>,
name: &String,
) -> bool {
match (resource_type, resources) {
(Some(resource_type), Some(resources)) => {
if T::resource_type() != resource_type {
return false;
}
resources.contains(name)
}
(Some(resource_type), None) => {
if T::resource_type() != resource_type {
return false;
}
true
}
(None, Some(resources)) => resources.contains(name),
(None, None) => true,
}
}
pub struct AllResourcesById {
pub servers: HashMap<String, Server>,
pub deployments: HashMap<String, Deployment>,

View File

@@ -1,10 +1,11 @@
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use git::GitRes;
use komodo_client::entities::{
sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs,
FileContents,
sync::{ResourceSync, SyncFileContents},
to_komodo_name,
toml::ResourcesToml,
update::Log,
CloneArgs,
};
use crate::{config::core_config, helpers::git_token};
@@ -13,8 +14,8 @@ use super::file::extend_resources;
pub struct RemoteResources {
pub resources: anyhow::Result<ResourcesToml>,
pub files: Vec<FileContents>,
pub file_errors: Vec<FileContents>,
pub files: Vec<SyncFileContents>,
pub file_errors: Vec<SyncFileContents>,
pub logs: Vec<Log>,
pub hash: Option<String>,
pub message: Option<String>,
@@ -28,15 +29,14 @@ pub async fn get_remote_resources(
// =============
// FILES ON HOST
// =============
let path = sync
.config
.resource_path
.parse::<PathBuf>()
.context("Resource path is not valid path")?;
let root_path = core_config()
.sync_directory
.join(to_komodo_name(&sync.name));
let (mut logs, mut files, mut file_errors) =
(Vec::new(), Vec::new(), Vec::new());
let resources = super::file::read_resources(
&path,
&root_path,
&sync.config.resource_path,
&sync.config.match_tags,
&mut logs,
&mut files,
@@ -50,9 +50,7 @@ pub async fn get_remote_resources(
hash: None,
message: None,
});
} else if sync.config.managed
|| !sync.config.file_contents.is_empty()
{
} else if sync.config.repo.is_empty() {
// ==========
// UI DEFINED
// ==========
@@ -72,10 +70,10 @@ pub async fn get_remote_resources(
Ok(resources)
};
// filter_by_
return Ok(RemoteResources {
resources,
files: vec![FileContents {
files: vec![SyncFileContents {
resource_path: String::new(),
path: "database file".to_string(),
contents: sync.config.file_contents.clone(),
}],
@@ -101,10 +99,10 @@ pub async fn get_remote_resources(
let access_token = if let Some(account) = &clone_args.account {
git_token(&clone_args.provider, account, |https| clone_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
)?
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
)?
} else {
None
};
@@ -139,11 +137,10 @@ pub async fn get_remote_resources(
let message =
message.context("failed to get commit hash message")?;
let resource_path = repo_path.join(&sync.config.resource_path);
let (mut files, mut file_errors) = (Vec::new(), Vec::new());
let resources = super::file::read_resources(
&resource_path,
&repo_path,
&sync.config.resource_path,
&sync.config.match_tags,
&mut logs,
&mut files,

View File

@@ -17,7 +17,7 @@ use komodo_client::{
tag::Tag,
update::Log,
user::sync_user,
ResourceTarget,
ResourceTarget, ResourceTargetVariant,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
@@ -31,8 +31,10 @@ use crate::{
};
use super::{
execute::ExecuteResourceSync, include_resource_by_tags,
AllResourcesById, ResourceSyncTrait, ToCreate, ToDelete, ToUpdate,
execute::ExecuteResourceSync,
include_resource_by_resource_type_and_name,
include_resource_by_tags, AllResourcesById, ResourceSyncTrait,
ToCreate, ToDelete, ToUpdate,
};
impl ResourceSyncTrait for Server {
@@ -237,12 +239,19 @@ impl ResourceSyncTrait for ResourceSync {
}
fn include_resource(
name: &String,
config: &Self::Config,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
if !include_resource_by_tags(
if !include_resource_by_resource_type_and_name::<ResourceSync>(
match_resource_type,
match_resources,
name,
) || !include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
@@ -259,17 +268,24 @@ impl ResourceSyncTrait for ResourceSync {
}
// The file contents MUST be empty
contents_empty &&
// The sync must be files on host mode OR NOT managed
(config.files_on_host || !config.managed)
// The sync must be files on host mode OR git repo mode
(config.files_on_host || !config.repo.is_empty())
}
fn include_resource_partial(
name: &String,
config: &Self::PartialConfig,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
if !include_resource_by_tags(
if !include_resource_by_resource_type_and_name::<ResourceSync>(
match_resource_type,
match_resources,
name,
) || !include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
@@ -291,8 +307,8 @@ impl ResourceSyncTrait for ResourceSync {
}
// The file contents MUST be empty
contents_empty &&
// The sync must be files on host mode OR NOT managed
(files_on_host || !config.managed.unwrap_or_default())
// The sync must be files on host mode OR git repo mode
(files_on_host || !config.repo.as_deref().unwrap_or_default().is_empty())
}
fn get_diff(
@@ -572,6 +588,13 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::CommitSync(config) => {
config.sync = resources
.syncs
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DeployStack(config) => {
config.stack = resources
.stacks
@@ -579,6 +602,13 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DeployStackIfChanged(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::StartStack(config) => {
config.stack = resources
.stacks
@@ -652,14 +682,14 @@ impl ExecuteResourceSync for Procedure {
{
has_error = true;
log.push_str(&format!(
"{}: failed to delete {} '{}' | {e:#}",
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"{}: {} {} '{}'",
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Self::resource_type(),

View File

@@ -105,6 +105,8 @@ pub fn resource_toml_to_toml_string<R: ToToml>(
pub fn resource_push_to_toml<R: ToToml>(
mut resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
toml: &mut String,
all: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
@@ -116,7 +118,7 @@ pub fn resource_push_to_toml<R: ToToml>(
toml
.push_str(&format!("[[{}]]\n", R::resource_type().toml_header()));
R::push_to_toml_string(
convert_resource::<R>(resource, all_tags),
convert_resource::<R>(resource, deploy, after, all_tags),
toml,
)?;
Ok(())
@@ -124,16 +126,22 @@ pub fn resource_push_to_toml<R: ToToml>(
pub fn resource_to_toml<R: ToToml>(
resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
all: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
) -> anyhow::Result<String> {
let mut toml = String::new();
resource_push_to_toml::<R>(resource, &mut toml, all, all_tags)?;
resource_push_to_toml::<R>(
resource, deploy, after, &mut toml, all, all_tags,
)?;
Ok(toml)
}
pub fn convert_resource<R: KomodoResource>(
resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
all_tags: &HashMap<String, Tag>,
) -> ResourceToml<R::PartialConfig> {
ResourceToml {
@@ -144,9 +152,8 @@ pub fn convert_resource<R: KomodoResource>(
.filter_map(|t| all_tags.get(t).map(|t| t.name.clone()))
.collect(),
description: resource.description,
deploy: false,
after: Default::default(),
latest_hash: false,
deploy,
after,
// The config still needs to be minimized.
// This happens in ToToml::push_to_toml
config: resource.config.into(),
@@ -274,7 +281,7 @@ impl ToToml for Build {
config
.into_iter()
.map(|(key, value)| match key.as_str() {
"builder_id" => return Ok((String::from("builder"), value)),
"builder_id" => Ok((String::from("builder"), value)),
"version" => {
match (
&resource.config.version,
@@ -353,7 +360,7 @@ impl ToToml for ServerTemplate {
if empty_params {
// toml_pretty will remove empty map
// but in this case its needed to deserialize the enums.
toml.push_str("\nconfig.params = {}");
toml.push_str("\nparams = {}");
}
}
}
@@ -385,7 +392,7 @@ impl ToToml for Builder {
if empty_params {
// toml_pretty will remove empty map
// but in this case its needed to deserialize the enums.
toml.push_str("\nconfig.params = {}");
toml.push_str("\nparams = {}");
}
}
}
@@ -680,6 +687,13 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::CommitSync(exec) => exec.sync.clone_from(
all
.syncs
.get(&exec.sync)
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::DeployStack(exec) => exec.stack.clone_from(
all
.stacks
@@ -687,6 +701,15 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::DeployStackIfChanged(exec) => {
exec.stack.clone_from(
all
.stacks
.get(&exec.stack)
.map(|r| &r.name)
.unwrap_or(&String::new()),
)
}
Execution::StartStack(exec) => exec.stack.clone_from(
all
.stacks

View File

@@ -94,6 +94,7 @@ pub async fn get_updates_for_view(
Some(original) => original,
None => {
diffs.push(DiffData::Create {
name: user_group.name.clone(),
proposed: format!(
"[[user_group]]\n{}",
toml_pretty::to_string(&user_group, TOML_PRETTY_OPTIONS)

View File

@@ -75,6 +75,7 @@ pub async fn get_updates_for_view(
}
None => {
diffs.push(DiffData::Create {
name: variable.name.clone(),
proposed: format!(
"[[variable]]\n{}",
toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)

View File

@@ -5,6 +5,7 @@ use komodo_client::entities::{
sync::{DiffData, ResourceDiff},
tag::Tag,
toml::ResourceToml,
ResourceTargetVariant,
};
use mungos::find::find_collect;
use partial_derive2::MaybeNone;
@@ -15,7 +16,9 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
diffs: &mut Vec<ResourceDiff>,
) -> anyhow::Result<()> {
@@ -25,12 +28,33 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
.into_iter()
.filter(|r| {
Resource::include_resource(
&r.config, &r.tags, all_tags, match_tags,
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let resources = resources
.into_iter()
.filter(|r| {
Resource::include_resource_partial(
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.collect::<Vec<_>>();
if delete {
for current_resource in current_map.values() {
if !resources.iter().any(|r| r.name == current_resource.name) {
@@ -41,8 +65,10 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
data: DiffData::Delete {
current: super::toml::resource_to_toml::<Resource>(
current_resource.clone(),
false,
vec![],
all_resources,
all_tags,
id_to_tags,
)?,
},
});
@@ -51,15 +77,6 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
}
for mut proposed_resource in resources {
// only resource that might not be included is resource sync
if !Resource::include_resource_partial(
&proposed_resource.config,
&proposed_resource.tags,
all_tags,
match_tags,
) {
continue;
}
match current_map.get(&proposed_resource.name) {
Some(current_resource) => {
// First merge toml resource config (partial) onto default resource config.
@@ -87,7 +104,7 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
let current_tags = current_resource
.tags
.iter()
.filter_map(|id| all_tags.get(id).map(|t| t.name.clone()))
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
@@ -105,12 +122,14 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
current_resource.id.clone(),
),
data: DiffData::Update {
proposed,
current: super::toml::resource_to_toml::<Resource>(
current_resource.clone(),
proposed_resource.deploy,
proposed_resource.after,
all_resources,
all_tags,
id_to_tags,
)?,
proposed,
},
});
}
@@ -120,6 +139,7 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
target: Resource::resource_target(String::new()),
data: DiffData::Create {
name: proposed_resource.name.clone(),
proposed: super::toml::resource_toml_to_toml_string::<
Resource,
>(proposed_resource)?,

View File

@@ -1,23 +0,0 @@
[package]
name = "migrator"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# komodo_client.workspace = true
logger.workspace = true
#
# mungos.workspace = true
#
tokio.workspace = true
anyhow.workspace = true
dotenvy.workspace = true
envy.workspace = true
serde.workspace = true
tracing.workspace = true

View File

@@ -1,16 +0,0 @@
FROM rust:1.80.1-bookworm AS builder
WORKDIR /builder
COPY . .
RUN cargo build -p migrator --release
# Final Image
FROM gcr.io/distroless/cc-debian12
COPY --from=builder /builder/target/release/migrator /
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo
LABEL org.opencontainers.image.description="Database migrator for Komodo version upgrades"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./migrator"]

View File

@@ -1,25 +0,0 @@
# Migrator
Performs schema changes on the Komodo database
## v1.7 - v1.11 migration
Run this before upgrading to latest from versions 1.7 to 1.11.
```sh
docker run --rm --name komodo-migrator \
--network "host" \
--env MIGRATION="v1.11" \
--env TARGET_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env TARGET_DB_NAME="<DB_NAME>" \
ghcr.io/mbecker20/komodo_migrator
```
## v1.0 - v1.6 migration
Run this before upgrading to latest from versions 1.0 to 1.6.
```sh
docker run --rm --name komodo-migrator \
--network "host" \
--env MIGRATION="v1.6" \
--env TARGET_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env TARGET_DB_NAME="<DB_NAME>" \
ghcr.io/mbecker20/komodo_migrator
```

View File

@@ -1,2 +0,0 @@
#[allow(unused)]
pub mod v1_11;

View File

@@ -1,261 +0,0 @@
use komodo_client::entities::{
build::StandardRegistryConfig, EnvironmentVar, NoData,
SystemCommand, Version, I64,
};
use serde::{Deserialize, Serialize};
use super::resource::Resource;
pub type Build = Resource<BuildConfig, BuildInfo>;
impl From<Build> for komodo_client::entities::build::Build {
fn from(value: Build) -> Self {
komodo_client::entities::build::Build {
id: value.id,
name: value.name,
description: value.description,
updated_at: value.updated_at,
tags: value.tags,
info: komodo_client::entities::build::BuildInfo {
last_built_at: value.info.last_built_at,
built_hash: None,
built_message: None,
latest_hash: None,
latest_message: None,
},
config: value.config.into(),
base_permission: Default::default(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct BuildInfo {
pub last_built_at: I64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BuildConfig {
/// Which builder is used to build the image.
#[serde(default, alias = "builder")]
pub builder_id: String,
/// The current version of the build.
#[serde(default)]
pub version: Version,
/// The Github repo used as the source of the build.
#[serde(default)]
pub repo: String,
/// The branch of the repo.
#[serde(default = "default_branch")]
pub branch: String,
/// Optionally set a specific commit hash.
#[serde(default)]
pub commit: String,
/// The github account used to clone (used to access private repos).
/// Empty string is public clone (only public repos).
#[serde(default)]
pub github_account: String,
/// The optional command run after repo clone and before docker build.
#[serde(default)]
pub pre_build: SystemCommand,
/// Configuration for the registry to push the built image to.
#[serde(default)]
pub image_registry: ImageRegistry,
/// The path of the docker build context relative to the root of the repo.
/// Default: "." (the root of the repo).
#[serde(default = "default_build_path")]
pub build_path: String,
/// The path of the dockerfile relative to the build path.
#[serde(default = "default_dockerfile_path")]
pub dockerfile_path: String,
/// Whether to skip secret interpolation in the build_args.
#[serde(default)]
pub skip_secret_interp: bool,
/// Whether to use buildx to build (eg `docker buildx build ...`)
#[serde(default)]
pub use_buildx: bool,
/// Whether incoming webhooks actually trigger action.
#[serde(default = "default_webhook_enabled")]
pub webhook_enabled: bool,
/// Any extra docker cli arguments to be included in the build command
#[serde(default)]
pub extra_args: Vec<String>,
/// Docker build arguments.
///
/// These values are visible in the final image by running `docker inspect`.
#[serde(
default,
deserialize_with = "komodo_client::entities::env_vars_deserializer"
)]
pub build_args: Vec<EnvironmentVar>,
/// Secret arguments.
///
/// These values remain hidden in the final image by using
/// docker secret mounts. See `<https://docs.docker.com/build/building/secrets>`.
///
/// The values can be used in RUN commands:
/// ```
/// RUN --mount=type=secret,id=SECRET_KEY \
/// SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
/// ```
#[serde(
default,
deserialize_with = "komodo_client::entities::env_vars_deserializer"
)]
pub secret_args: Vec<EnvironmentVar>,
/// Docker labels
#[serde(
default,
deserialize_with = "komodo_client::entities::env_vars_deserializer"
)]
pub labels: Vec<EnvironmentVar>,
}
impl From<BuildConfig>
for komodo_client::entities::build::BuildConfig
{
fn from(value: BuildConfig) -> Self {
komodo_client::entities::build::BuildConfig {
builder_id: value.builder_id,
skip_secret_interp: value.skip_secret_interp,
version: komodo_client::entities::Version {
major: value.version.major,
minor: value.version.minor,
patch: value.version.patch,
},
links: Default::default(),
auto_increment_version: true,
image_name: Default::default(),
image_tag: Default::default(),
git_provider: String::from("github.com"),
git_https: true,
repo: value.repo,
branch: value.branch,
commit: value.commit,
git_account: value.github_account,
pre_build: komodo_client::entities::SystemCommand {
path: value.pre_build.path,
command: value.pre_build.command,
},
build_path: value.build_path,
dockerfile_path: value.dockerfile_path,
build_args: value
.build_args
.into_iter()
.map(Into::into)
.collect(),
secret_args: Default::default(),
labels: value.labels.into_iter().map(Into::into).collect(),
extra_args: value.extra_args,
use_buildx: value.use_buildx,
webhook_enabled: value.webhook_enabled,
webhook_secret: Default::default(),
image_registry: value.image_registry.into(),
}
}
}
fn default_branch() -> String {
String::from("main")
}
fn default_build_path() -> String {
String::from(".")
}
fn default_dockerfile_path() -> String {
String::from("Dockerfile")
}
fn default_webhook_enabled() -> bool {
true
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", content = "params")]
pub enum ImageRegistry {
/// Don't push the image to any registry
None(NoData),
/// Push the image to DockerHub
DockerHub(CloudRegistryConfig),
/// Push the image to the Github Container Registry.
///
/// See [the Github docs](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#pushing-container-images)
/// for information on creating an access token
Ghcr(CloudRegistryConfig),
/// Push the image to Aws Elastic Container Registry
///
/// The string held in 'params' should match a label of an `aws_ecr_registry` in the core config.
AwsEcr(String),
/// Todo. Will point to a custom "Registry" resource by id
Custom(String),
}
impl Default for ImageRegistry {
fn default() -> Self {
Self::None(NoData {})
}
}
impl From<ImageRegistry>
for komodo_client::entities::build::ImageRegistry
{
fn from(value: ImageRegistry) -> Self {
match value {
ImageRegistry::None(_) | ImageRegistry::Custom(_) => {
komodo_client::entities::build::ImageRegistry::None(NoData {})
}
ImageRegistry::DockerHub(params) => {
komodo_client::entities::build::ImageRegistry::Standard(
StandardRegistryConfig {
domain: String::from("docker.io"),
account: params.account,
organization: params.organization,
},
)
}
ImageRegistry::Ghcr(params) => {
komodo_client::entities::build::ImageRegistry::Standard(
StandardRegistryConfig {
domain: String::from("ghcr.io"),
account: params.account,
organization: params.organization,
},
)
}
ImageRegistry::AwsEcr(label) => {
komodo_client::entities::build::ImageRegistry::None(NoData {})
}
}
}
}
#[derive(
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
)]
pub struct CloudRegistryConfig {
/// Specify an account to use with the cloud registry.
#[serde(default)]
pub account: String,
/// Optional. Specify an organization to push the image under.
/// Empty string means no organization.
#[serde(default)]
pub organization: String,
}

View File

@@ -1,168 +0,0 @@
use komodo_client::entities::{
deployment::{
conversions_deserializer, term_labels_deserializer, Conversion,
DeploymentImage, RestartMode, TerminationSignalLabel,
},
env_vars_deserializer, EnvironmentVar, TerminationSignal,
};
use serde::{Deserialize, Serialize};
use super::{build::ImageRegistry, resource::Resource};
pub type Deployment = Resource<DeploymentConfig, ()>;
impl From<Deployment>
for komodo_client::entities::deployment::Deployment
{
fn from(value: Deployment) -> Self {
komodo_client::entities::deployment::Deployment {
id: value.id,
name: value.name,
description: value.description,
updated_at: value.updated_at,
tags: value.tags,
info: (),
config: value.config.into(),
base_permission: Default::default(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct DeploymentConfig {
/// The id of server the deployment is deployed on.
#[serde(default, alias = "server")]
pub server_id: String,
/// The image which the deployment deploys.
/// Can either be a user inputted image, or a Komodo build.
#[serde(default)]
pub image: DeploymentImage,
/// Configure the registry used to pull the image from the registry.
/// Used with `docker login`.
///
/// When using attached build as image source:
/// - If the field is `None` variant, will use the same ImageRegistry config as the build.
/// - Otherwise, it must match the variant of the ImageRegistry build config.
/// - Only the account is used, the organization is not needed here
#[serde(default)]
pub image_registry: ImageRegistry,
/// Whether to skip secret interpolation into the deployment environment variables.
#[serde(default)]
pub skip_secret_interp: bool,
/// Whether to redeploy the deployment whenever the attached build finishes.
#[serde(default)]
pub redeploy_on_build: bool,
/// Whether to send ContainerStateChange alerts for this deployment.
#[serde(default = "default_send_alerts")]
pub send_alerts: bool,
/// The network attached to the container.
/// Default is `host`.
#[serde(default = "default_network")]
pub network: String,
/// The restart mode given to the container.
#[serde(default)]
pub restart: RestartMode,
/// This is interpolated at the end of the `docker run` command,
/// which means they are either passed to the containers inner process,
/// or replaces the container command, depending on use of ENTRYPOINT or CMD in dockerfile.
/// Empty is no command.
#[serde(default)]
pub command: String,
/// The default termination signal to use to stop the deployment. Defaults to SigTerm (default docker signal).
#[serde(default)]
pub termination_signal: TerminationSignal,
/// The termination timeout.
#[serde(default = "default_termination_timeout")]
pub termination_timeout: i32,
/// Extra args which are interpolated into the `docker run` command,
/// and affect the container configuration.
#[serde(default)]
pub extra_args: Vec<String>,
/// Labels attached to various termination signal options.
/// Used to specify different shutdown functionality depending on the termination signal.
#[serde(
default = "default_term_signal_labels",
deserialize_with = "term_labels_deserializer"
)]
pub term_signal_labels: Vec<TerminationSignalLabel>,
/// The container port mapping.
/// Irrelevant if container network is `host`.
/// Maps ports on host to ports on container.
#[serde(default, deserialize_with = "conversions_deserializer")]
pub ports: Vec<Conversion>,
/// The container volume mapping.
/// Maps files / folders on host to files / folders in container.
#[serde(default, deserialize_with = "conversions_deserializer")]
pub volumes: Vec<Conversion>,
/// The environment variables passed to the container.
#[serde(default, deserialize_with = "env_vars_deserializer")]
pub environment: Vec<EnvironmentVar>,
/// The docker labels given to the container.
#[serde(default, deserialize_with = "env_vars_deserializer")]
pub labels: Vec<EnvironmentVar>,
}
fn default_send_alerts() -> bool {
true
}
fn default_term_signal_labels() -> Vec<TerminationSignalLabel> {
vec![TerminationSignalLabel::default()]
}
fn default_termination_timeout() -> i32 {
10
}
fn default_network() -> String {
String::from("host")
}
impl From<DeploymentConfig>
for komodo_client::entities::deployment::DeploymentConfig
{
fn from(value: DeploymentConfig) -> Self {
komodo_client::entities::deployment::DeploymentConfig {
server_id: value.server_id,
image: value.image,
image_registry_account: match value.image_registry {
ImageRegistry::None(_)
| ImageRegistry::AwsEcr(_)
| ImageRegistry::Custom(_) => String::new(),
ImageRegistry::DockerHub(params) => params.account,
ImageRegistry::Ghcr(params) => params.account,
},
skip_secret_interp: value.skip_secret_interp,
redeploy_on_build: value.redeploy_on_build,
send_alerts: value.send_alerts,
network: value.network,
restart: value.restart,
command: value.command,
termination_signal: value.termination_signal,
termination_timeout: value.termination_timeout,
extra_args: value.extra_args,
term_signal_labels: value.term_signal_labels,
ports: value.ports,
volumes: value.volumes,
environment: value.environment,
labels: value.labels,
links: Default::default(),
}
}
}

View File

@@ -1,48 +0,0 @@
// use mungos::{init::MongoBuilder, mongodb::Collection};
// use serde::{Deserialize, Serialize};
// pub mod build;
// pub mod deployment;
// pub mod resource;
// pub struct DbClient {
// pub builds: Collection<build::Build>,
// pub deployments: Collection<deployment::Deployment>,
// }
// impl DbClient {
// pub async fn new(
// legacy_uri: &str,
// legacy_db_name: &str,
// ) -> DbClient {
// let client = MongoBuilder::default()
// .uri(legacy_uri)
// .build()
// .await
// .expect("failed to init legacy mongo client");
// let db = client.database(legacy_db_name);
// DbClient {
// builds: db.collection("Build"),
// deployments: db.collection("Deployment"),
// }
// }
// }
// #[derive(
// Serialize, Deserialize, Debug, Clone, Default, PartialEq,
// )]
// pub struct Version {
// pub major: i32,
// pub minor: i32,
// pub patch: i32,
// }
// #[derive(
// Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq,
// )]
// pub struct SystemCommand {
// #[serde(default)]
// pub path: String,
// #[serde(default)]
// pub command: String,
// }

View File

@@ -1,54 +0,0 @@
use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Resource<Config, Info: Default = ()> {
/// The Mongo ID of the resource.
/// This field is de/serialized from/to JSON as
/// `{ "_id": { "$oid": "..." }, ...(rest of serialized Resource<T>) }`
#[serde(
default,
rename = "_id",
skip_serializing_if = "String::is_empty",
with = "hex_string_as_object_id"
)]
pub id: String,
/// The resource name.
/// This is guaranteed unique among others of the same resource type.
pub name: String,
/// A description for the resource
#[serde(default)]
pub description: String,
/// When description last updated
#[serde(default)]
pub updated_at: i64,
/// Tag Ids
#[serde(default)]
pub tags: Vec<String>,
/// Resource-specific information (not user configurable).
#[serde(default)]
pub info: Info,
/// Resource-specific configuration.
pub config: Config,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ResourceListItem<Info> {
/// The resource id
pub id: String,
/// The resource type, ie `Server` or `Deployment`
// #[serde(rename = "type")]
// pub resource_type: ResourceTargetVariant,
/// The resource name
pub name: String,
/// Tag Ids
pub tags: Vec<String>,
/// Resource specific info
pub info: Info,
}

View File

@@ -1,46 +0,0 @@
#![allow(unused)]
#[macro_use]
extern crate tracing;
use serde::Deserialize;
mod legacy;
mod migrate;
#[derive(Deserialize)]
enum Migration {
#[serde(alias = "v1.11")]
V1_11,
}
#[derive(Deserialize)]
struct Env {
migration: Migration,
target_uri: String,
target_db_name: String,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
logger::init(&Default::default())?;
info!("starting migrator");
let env: Env = envy::from_env()?;
match env.migration {
Migration::V1_11 => {
// let db = legacy::v1_11::DbClient::new(
// &env.target_uri,
// &env.target_db_name,
// )
// .await;
// migrate::v1_11::migrate_all_in_place(&db).await?
}
}
info!("finished!");
Ok(())
}

View File

@@ -1 +0,0 @@
pub mod v1_11;

View File

@@ -1,70 +0,0 @@
// use anyhow::Context;
// use komodo_client::entities::{build::Build, deployment::Deployment};
// use mungos::{
// find::find_collect,
// mongodb::bson::{doc, to_document},
// };
// use crate::legacy::v1_11;
// pub async fn migrate_all_in_place(
// db: &v1_11::DbClient,
// ) -> anyhow::Result<()> {
// migrate_builds_in_place(db).await?;
// migrate_deployments_in_place(db).await?;
// Ok(())
// }
// pub async fn migrate_builds_in_place(
// db: &v1_11::DbClient,
// ) -> anyhow::Result<()> {
// let builds = find_collect(&db.builds, None, None)
// .await
// .context("failed to get builds")?
// .into_iter()
// .map(Into::into)
// .collect::<Vec<Build>>();
// info!("migrating {} builds...", builds.len());
// for build in builds {
// db.builds
// .update_one(
// doc! { "name": &build.name },
// doc! { "$set": to_document(&build)? },
// )
// .await
// .context("failed to insert builds on target")?;
// }
// info!("builds have been migrated\n");
// Ok(())
// }
// pub async fn migrate_deployments_in_place(
// db: &v1_11::DbClient,
// ) -> anyhow::Result<()> {
// let deployments = find_collect(&db.deployments, None, None)
// .await
// .context("failed to get deployments")?
// .into_iter()
// .map(Into::into)
// .collect::<Vec<Deployment>>();
// info!("migrating {} deployments...", deployments.len());
// for deployment in deployments {
// db.deployments
// .update_one(
// doc! { "name": &deployment.name },
// doc! { "$set": to_document(&deployment)? },
// )
// .await
// .context("failed to insert deployments on target")?;
// }
// info!("deployments have been migrated\n");
// Ok(())
// }

View File

@@ -26,6 +26,7 @@ git.workspace = true
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
async_timing_util.workspace = true
derive_variants.workspace = true
resolver_api.workspace = true
run_command.workspace = true
svi.workspace = true

View File

@@ -87,10 +87,18 @@ impl Resolve<build::Build> for State {
// Get command parts
let image_name =
get_image_name(&build).context("failed to make image name")?;
let build_args = parse_build_args(
&environment_vars_from_str(build_args)
.context("Invalid build_args")?,
);
// Add VERSION to build args (if not already there)
let mut build_args = environment_vars_from_str(build_args)
.context("Invalid build_args")?;
if !build_args.iter().any(|a| a.variable == "VERSION") {
build_args.push(EnvironmentVar {
variable: String::from("VERSION"),
value: build.config.version.to_string(),
});
}
let build_args = parse_build_args(&build_args);
let secret_args = environment_vars_from_str(secret_args)
.context("Invalid secret_args")?;
let _secret_args =
@@ -110,13 +118,16 @@ impl Resolve<build::Build> for State {
// Construct command
let command = format!(
"cd {} && docker{buildx} build{build_args}{_secret_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
build_dir.display()
"docker{buildx} build{build_args}{_secret_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
);
if *skip_secret_interp {
let build_log =
run_komodo_command("docker build", command).await;
let build_log = run_komodo_command(
"docker build",
build_dir.as_ref(),
command,
)
.await;
logs.push(build_log);
} else {
// Interpolate any missing secrets
@@ -131,8 +142,12 @@ impl Resolve<build::Build> for State {
)?;
replacers.extend(core_replacers);
let mut build_log =
run_komodo_command("docker build", command).await;
let mut build_log = run_komodo_command(
"docker build",
build_dir.as_ref(),
command,
)
.await;
build_log.command =
svi::replace_in_string(&build_log.command, &replacers);
build_log.stdout =
@@ -229,7 +244,7 @@ impl Resolve<PruneBuilders> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker builder prune -a -f");
Ok(run_komodo_command("prune builders", command).await)
Ok(run_komodo_command("prune builders", None, command).await)
}
}
@@ -243,6 +258,6 @@ impl Resolve<PruneBuildx> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker buildx prune -a -f");
Ok(run_komodo_command("prune buildx", command).await)
Ok(run_komodo_command("prune buildx", None, command).await)
}
}

View File

@@ -3,10 +3,15 @@ use std::path::PathBuf;
use anyhow::{anyhow, Context};
use command::run_komodo_command;
use formatting::format_serror;
use git::{write_commit_file, GitRes};
use komodo_client::entities::{
stack::ComposeProject, to_komodo_name, update::Log, FileContents,
stack::ComposeProject, to_komodo_name, update::Log, CloneArgs,
FileContents,
};
use periphery_client::api::{
compose::*,
git::{PullOrCloneRepo, RepoActionResponse},
};
use periphery_client::api::compose::*;
use resolver_api::Resolve;
use serde::{Deserialize, Serialize};
use tokio::fs;
@@ -28,6 +33,7 @@ impl Resolve<ListComposeProjects, ()> for State {
let docker_compose = docker_compose();
let res = run_komodo_command(
"list projects",
None,
format!("{docker_compose} ls --all --format json"),
)
.await;
@@ -76,25 +82,67 @@ pub struct DockerComposeLsItem {
//
const DEFAULT_COMPOSE_CONTENTS: &str = "## 🦎 Hello Komodo 🦎
services:
hello_world:
image: hello-world
# networks:
# - default
# ports:
# - 3000:3000
# volumes:
# - data:/data
impl Resolve<GetComposeServiceLog> for State {
#[instrument(
name = "GetComposeServiceLog",
level = "debug",
skip(self)
)]
async fn resolve(
&self,
GetComposeServiceLog {
project,
service,
tail,
timestamps,
}: GetComposeServiceLog,
_: (),
) -> anyhow::Result<Log> {
let docker_compose = docker_compose();
let timestamps =
timestamps.then_some(" --timestamps").unwrap_or_default();
let command = format!(
"{docker_compose} -p {project} logs {service} --tail {tail}{timestamps}"
);
Ok(run_komodo_command("get stack log", None, command).await)
}
}
# networks:
# default: {}
impl Resolve<GetComposeServiceLogSearch> for State {
#[instrument(
name = "GetComposeServiceLogSearch",
level = "debug",
skip(self)
)]
async fn resolve(
&self,
GetComposeServiceLogSearch {
project,
service,
terms,
combinator,
invert,
timestamps,
}: GetComposeServiceLogSearch,
_: (),
) -> anyhow::Result<Log> {
let docker_compose = docker_compose();
let grep = log_grep(&terms, combinator, invert);
let timestamps =
timestamps.then_some(" --timestamps").unwrap_or_default();
let command = format!("{docker_compose} -p {project} logs {service} --tail 5000{timestamps} 2>&1 | {grep}");
Ok(run_komodo_command("get stack log grep", None, command).await)
}
}
# volumes:
# data:
";
//
impl Resolve<GetComposeContentsOnHost, ()> for State {
#[instrument(
name = "GetComposeContentsOnHost",
level = "debug",
skip(self)
)]
async fn resolve(
&self,
GetComposeContentsOnHost {
@@ -115,95 +163,36 @@ impl Resolve<GetComposeContentsOnHost, ()> for State {
.context("Failed to initialize run directory")?;
}
let file_paths = file_paths
.iter()
.map(|path| {
run_directory.join(path).components().collect::<PathBuf>()
})
.collect::<Vec<_>>();
let mut res = GetComposeContentsOnHostResponse::default();
for full_path in &file_paths {
if !full_path.exists() {
fs::write(&full_path, DEFAULT_COMPOSE_CONTENTS)
.await
.context("Failed to init missing compose file on host")?;
}
for path in file_paths {
let full_path =
run_directory.join(&path).components().collect::<PathBuf>();
match fs::read_to_string(&full_path).await.with_context(|| {
format!(
"Failed to read compose file contents at {full_path:?}"
)
}) {
Ok(contents) => {
res.contents.push(FileContents {
path: full_path.display().to_string(),
contents,
});
// The path we store here has to be the same as incoming file path in the array,
// in order for WriteComposeContentsToHost to write to the correct path.
res.contents.push(FileContents { path, contents });
}
Err(e) => {
res.errors.push(FileContents {
path: full_path.display().to_string(),
path,
contents: format_serror(&e.into()),
});
}
}
}
Ok(res)
}
}
//
impl Resolve<GetComposeServiceLog> for State {
#[instrument(
name = "GetComposeServiceLog",
level = "debug",
skip(self)
)]
async fn resolve(
&self,
GetComposeServiceLog {
project,
service,
tail,
}: GetComposeServiceLog,
_: (),
) -> anyhow::Result<Log> {
let docker_compose = docker_compose();
let command = format!(
"{docker_compose} -p {project} logs {service} --tail {tail}"
);
Ok(run_komodo_command("get stack log", command).await)
}
}
impl Resolve<GetComposeServiceLogSearch> for State {
#[instrument(
name = "GetComposeServiceLogSearch",
level = "debug",
skip(self)
)]
async fn resolve(
&self,
GetComposeServiceLogSearch {
project,
service,
terms,
combinator,
invert,
}: GetComposeServiceLogSearch,
_: (),
) -> anyhow::Result<Log> {
let docker_compose = docker_compose();
let grep = log_grep(&terms, combinator, invert);
let command = format!("{docker_compose} -p {project} logs {service} --tail 5000 2>&1 | {grep}");
Ok(run_komodo_command("get stack log grep", command).await)
}
}
//
impl Resolve<WriteComposeContentsToHost> for State {
#[instrument(name = "WriteComposeContentsToHost", skip(self))]
async fn resolve(
@@ -216,13 +205,10 @@ impl Resolve<WriteComposeContentsToHost> for State {
}: WriteComposeContentsToHost,
_: (),
) -> anyhow::Result<Log> {
let root =
periphery_config().stack_dir.join(to_komodo_name(&name));
let run_directory = root.join(&run_directory);
let run_directory = run_directory.canonicalize().context(
"failed to validate run directory on host (canonicalize error)",
)?;
let file_path = run_directory
let file_path = periphery_config()
.stack_dir
.join(to_komodo_name(&name))
.join(&run_directory)
.join(file_path)
.components()
.collect::<PathBuf>();
@@ -244,6 +230,104 @@ impl Resolve<WriteComposeContentsToHost> for State {
//
impl Resolve<WriteCommitComposeContents> for State {
#[instrument(name = "WriteCommitComposeContents", skip(self))]
async fn resolve(
&self,
WriteCommitComposeContents {
stack,
username,
file_path,
contents,
git_token,
}: WriteCommitComposeContents,
_: (),
) -> anyhow::Result<RepoActionResponse> {
if stack.config.files_on_host {
return Err(anyhow!(
"Wrong method called for files on host stack"
));
}
if stack.config.repo.is_empty() {
return Err(anyhow!("Repo is not configured"));
}
let root = periphery_config()
.stack_dir
.join(to_komodo_name(&stack.name));
let mut args: CloneArgs = (&stack).into();
// Set the clone destination to the one created for this run
args.destination = Some(root.display().to_string());
let git_token = match git_token {
Some(token) => Some(token),
None => {
if !stack.config.git_account.is_empty() {
match crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
) {
Ok(token) => Some(token.to_string()),
Err(e) => {
return Err(
e.context("Failed to find required git token"),
);
}
}
} else {
None
}
}
};
State
.resolve(
PullOrCloneRepo {
args,
git_token,
environment: vec![],
env_file_path: stack.config.env_file_path.clone(),
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await?;
let file_path = stack
.config
.run_directory
.parse::<PathBuf>()
.context("Run directory is not a valid path")?
.join(&file_path);
let msg = if let Some(username) = username {
format!("{}: Write Compose File", username)
} else {
"Write Compose File".to_string()
};
let GitRes {
logs,
hash,
message,
..
} = write_commit_file(&msg, &root, &file_path, &contents).await?;
Ok(RepoActionResponse {
logs,
commit_hash: hash,
commit_message: message,
env_file_path: None,
})
}
}
//
impl Resolve<ComposeUp> for State {
#[instrument(
name = "ComposeUp",
@@ -292,6 +376,7 @@ impl Resolve<ComposeExecution> for State {
let docker_compose = docker_compose();
let log = run_komodo_command(
"compose command",
None,
format!("{docker_compose} -p {project} {command}"),
)
.await;

View File

@@ -42,11 +42,18 @@ impl Resolve<GetContainerLog> for State {
#[instrument(name = "GetContainerLog", level = "debug", skip(self))]
async fn resolve(
&self,
GetContainerLog { name, tail }: GetContainerLog,
GetContainerLog {
name,
tail,
timestamps,
}: GetContainerLog,
_: (),
) -> anyhow::Result<Log> {
let command = format!("docker logs {name} --tail {tail}");
Ok(run_komodo_command("get container log", command).await)
let timestamps =
timestamps.then_some(" --timestamps").unwrap_or_default();
let command =
format!("docker logs {name} --tail {tail}{timestamps}");
Ok(run_komodo_command("get container log", None, command).await)
}
}
@@ -65,13 +72,20 @@ impl Resolve<GetContainerLogSearch> for State {
terms,
combinator,
invert,
timestamps,
}: GetContainerLogSearch,
_: (),
) -> anyhow::Result<Log> {
let grep = log_grep(&terms, combinator, invert);
let command =
format!("docker logs {name} --tail 5000 2>&1 | {grep}");
Ok(run_komodo_command("get container log grep", command).await)
let timestamps =
timestamps.then_some(" --timestamps").unwrap_or_default();
let command = format!(
"docker logs {name} --tail 5000{timestamps} 2>&1 | {grep}"
);
Ok(
run_komodo_command("get container log grep", None, command)
.await,
)
}
}
@@ -126,6 +140,7 @@ impl Resolve<StartContainer> for State {
Ok(
run_komodo_command(
"docker start",
None,
format!("docker start {name}"),
)
.await,
@@ -145,6 +160,7 @@ impl Resolve<RestartContainer> for State {
Ok(
run_komodo_command(
"docker restart",
None,
format!("docker restart {name}"),
)
.await,
@@ -164,6 +180,7 @@ impl Resolve<PauseContainer> for State {
Ok(
run_komodo_command(
"docker pause",
None,
format!("docker pause {name}"),
)
.await,
@@ -181,6 +198,7 @@ impl Resolve<UnpauseContainer> for State {
Ok(
run_komodo_command(
"docker unpause",
None,
format!("docker unpause {name}"),
)
.await,
@@ -198,10 +216,11 @@ impl Resolve<StopContainer> for State {
_: (),
) -> anyhow::Result<Log> {
let command = stop_container_command(&name, signal, time);
let log = run_komodo_command("docker stop", command).await;
let log = run_komodo_command("docker stop", None, command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(&name, None, time);
let mut log = run_komodo_command("docker stop", command).await;
let mut log =
run_komodo_command("docker stop", None, command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
@@ -230,12 +249,14 @@ impl Resolve<RemoveContainer> for State {
let command =
format!("{stop_command} && docker container rm {name}");
let log =
run_komodo_command("docker stop and remove", command).await;
run_komodo_command("docker stop and remove", None, command)
.await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command = stop_container_command(&name, None, time);
let command =
format!("{stop_command} && docker container rm {name}");
let mut log = run_komodo_command("docker stop", command).await;
let mut log =
run_komodo_command("docker stop", None, command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
@@ -265,7 +286,7 @@ impl Resolve<RenameContainer> for State {
) -> anyhow::Result<Log> {
let new = to_komodo_name(&new_name);
let command = format!("docker rename {curr_name} {new}");
Ok(run_komodo_command("docker rename", command).await)
Ok(run_komodo_command("docker rename", None, command).await)
}
}
@@ -279,7 +300,7 @@ impl Resolve<PruneContainers> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker container prune -f");
Ok(run_komodo_command("prune containers", command).await)
Ok(run_komodo_command("prune containers", None, command).await)
}
}
@@ -296,15 +317,17 @@ impl Resolve<StartAllContainers> for State {
.list_containers()
.await
.context("failed to list all containers on host")?;
let futures =
containers.iter().map(
|ContainerListItem { name, .. }| {
let command = format!("docker start {name}");
async move {
run_komodo_command(&command.clone(), command).await
}
},
);
let futures = containers.iter().filter_map(
|ContainerListItem { name, labels, .. }| {
if labels.contains_key("komodo.skip") {
return None;
}
let command = format!("docker start {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
})
},
);
Ok(join_all(futures).await)
}
}
@@ -322,12 +345,15 @@ impl Resolve<RestartAllContainers> for State {
.list_containers()
.await
.context("failed to list all containers on host")?;
let futures = containers.iter().map(
|ContainerListItem { name, .. }| {
let command = format!("docker restart {name}");
async move {
run_komodo_command(&command.clone(), command).await
let futures = containers.iter().filter_map(
|ContainerListItem { name, labels, .. }| {
if labels.contains_key("komodo.skip") {
return None;
}
let command = format!("docker restart {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
})
},
);
Ok(join_all(futures).await)
@@ -347,12 +373,15 @@ impl Resolve<PauseAllContainers> for State {
.list_containers()
.await
.context("failed to list all containers on host")?;
let futures = containers.iter().map(
|ContainerListItem { name, .. }| {
let command = format!("docker pause {name}");
async move {
run_komodo_command(&command.clone(), command).await
let futures = containers.iter().filter_map(
|ContainerListItem { name, labels, .. }| {
if labels.contains_key("komodo.skip") {
return None;
}
let command = format!("docker pause {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
})
},
);
Ok(join_all(futures).await)
@@ -372,12 +401,15 @@ impl Resolve<UnpauseAllContainers> for State {
.list_containers()
.await
.context("failed to list all containers on host")?;
let futures = containers.iter().map(
|ContainerListItem { name, .. }| {
let command = format!("docker unpause {name}");
async move {
run_komodo_command(&command.clone(), command).await
let futures = containers.iter().filter_map(
|ContainerListItem { name, labels, .. }| {
if labels.contains_key("komodo.skip") {
return None;
}
let command = format!("docker unpause {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
})
},
);
Ok(join_all(futures).await)
@@ -397,13 +429,19 @@ impl Resolve<StopAllContainers> for State {
.list_containers()
.await
.context("failed to list all containers on host")?;
let futures = containers.iter().map(
|ContainerListItem { name, .. }| async move {
run_komodo_command(
&format!("docker stop {name}"),
stop_container_command(name, None, None),
)
.await
let futures = containers.iter().filter_map(
|ContainerListItem { name, labels, .. }| {
if labels.contains_key("komodo.skip") {
return None;
}
Some(async move {
run_komodo_command(
&format!("docker stop {name}"),
None,
stop_container_command(name, None, None),
)
.await
})
},
);
Ok(join_all(futures).await)

View File

@@ -87,7 +87,7 @@ impl Resolve<Deploy> for State {
debug!("docker run command: {command}");
if deployment.config.skip_secret_interp {
Ok(run_komodo_command("docker run", command).await)
Ok(run_komodo_command("docker run", None, command).await)
} else {
let command = svi::interpolate_variables(
&command,
@@ -107,7 +107,8 @@ impl Resolve<Deploy> for State {
};
replacers.extend(core_replacers);
let mut log = run_komodo_command("docker run", command).await;
let mut log =
run_komodo_command("docker run", None, command).await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
log.stderr = svi::replace_in_string(&log.stderr, &replacers);

View File

@@ -44,7 +44,7 @@ impl Resolve<DeleteImage> for State {
_: (),
) -> anyhow::Result<Log> {
let command = format!("docker image rm {name}");
Ok(run_komodo_command("delete image", command).await)
Ok(run_komodo_command("delete image", None, command).await)
}
}
@@ -58,6 +58,6 @@ impl Resolve<PruneImages> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker image prune -a -f");
Ok(run_komodo_command("prune images", command).await)
Ok(run_komodo_command("prune images", None, command).await)
}
}

View File

@@ -1,5 +1,6 @@
use anyhow::Context;
use command::run_komodo_command;
use derive_variants::EnumVariants;
use futures::TryFutureExt;
use komodo_client::entities::{update::Log, SystemCommand};
use periphery_client::api::{
@@ -30,7 +31,10 @@ mod network;
mod stats;
mod volume;
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
)]
#[variant_derive(Debug)]
#[serde(tag = "type", content = "params")]
#[resolver_target(State)]
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
@@ -76,6 +80,7 @@ pub enum PeripheryRequest {
// Compose (Write)
WriteComposeContentsToHost(WriteComposeContentsToHost),
WriteCommitComposeContents(WriteCommitComposeContents),
ComposeUp(ComposeUp),
ComposeExecution(ComposeExecution),
@@ -206,7 +211,7 @@ impl ResolveToString<ListSecrets> for State {
}
impl Resolve<GetDockerLists> for State {
#[instrument(name = "GetDockerLists", skip(self))]
#[instrument(name = "GetDockerLists", level = "debug", skip(self))]
async fn resolve(
&self,
GetDockerLists {}: GetDockerLists,
@@ -251,7 +256,7 @@ impl Resolve<RunCommand> for State {
} else {
format!("cd {path} && {command}")
};
run_komodo_command("run command", command).await
run_komodo_command("run command", None, command).await
})
.await
.context("failure in spawned task")
@@ -266,6 +271,6 @@ impl Resolve<PruneSystem> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker system prune -a -f --volumes");
Ok(run_komodo_command("prune system", command).await)
Ok(run_komodo_command("prune system", None, command).await)
}
}

View File

@@ -34,7 +34,7 @@ impl Resolve<CreateNetwork> for State {
None => String::new(),
};
let command = format!("docker network create{driver} {name}");
Ok(run_komodo_command("create network", command).await)
Ok(run_komodo_command("create network", None, command).await)
}
}
@@ -48,7 +48,7 @@ impl Resolve<DeleteNetwork> for State {
_: (),
) -> anyhow::Result<Log> {
let command = format!("docker network rm {name}");
Ok(run_komodo_command("delete network", command).await)
Ok(run_komodo_command("delete network", None, command).await)
}
}
@@ -62,6 +62,6 @@ impl Resolve<PruneNetworks> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker network prune -f");
Ok(run_komodo_command("prune networks", command).await)
Ok(run_komodo_command("prune networks", None, command).await)
}
}

View File

@@ -28,7 +28,7 @@ impl Resolve<DeleteVolume> for State {
_: (),
) -> anyhow::Result<Log> {
let command = format!("docker volume rm {name}");
Ok(run_komodo_command("delete volume", command).await)
Ok(run_komodo_command("delete volume", None, command).await)
}
}
@@ -42,6 +42,6 @@ impl Resolve<PruneVolumes> for State {
_: (),
) -> anyhow::Result<Log> {
let command = String::from("docker volume prune -a -f");
Ok(run_komodo_command("prune volumes", command).await)
Ok(run_komodo_command("prune volumes", None, command).await)
}
}

View File

@@ -10,14 +10,16 @@ use komodo_client::entities::{
};
use periphery_client::api::{
compose::ComposeUpResponse,
git::{PullOrCloneRepo, RepoActionResponse},
git::{CloneRepo, PullOrCloneRepo, RepoActionResponse},
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
config::periphery_config, docker::docker_login,
helpers::parse_extra_args, State,
config::periphery_config,
docker::docker_login,
helpers::{interpolate_variables, parse_extra_args},
State,
};
pub fn docker_compose() -> &'static str {
@@ -71,7 +73,7 @@ pub async fn compose_up(
return Err(anyhow!("A compose file doesn't exist after writing stack. Ensure the run_directory and file_paths are correct."));
}
for (_, full_path) in &file_paths {
for (path, full_path) in &file_paths {
let file_contents =
match fs::read_to_string(&full_path).await.with_context(|| {
format!(
@@ -86,7 +88,7 @@ pub async fn compose_up(
.push(Log::error("read compose file", error.clone()));
// This should only happen for repo stacks, ie remote error
res.remote_errors.push(FileContents {
path: full_path.display().to_string(),
path: path.to_string(),
contents: error,
});
return Err(anyhow!(
@@ -95,13 +97,12 @@ pub async fn compose_up(
}
};
res.file_contents.push(FileContents {
path: full_path.display().to_string(),
path: path.to_string(),
contents: file_contents,
});
}
let docker_compose = docker_compose();
let run_dir = run_directory.display();
let service_arg = service
.as_ref()
.map(|service| format!(" {service}"))
@@ -137,7 +138,7 @@ pub async fn compose_up(
}
let env_file = env_file_path
.map(|path| format!(" --env-file {}", path.display()))
.map(|path| format!(" --env-file {path}"))
.unwrap_or_default();
// Build images before destroying to minimize downtime.
@@ -146,10 +147,15 @@ pub async fn compose_up(
let build_extra_args =
parse_extra_args(&stack.config.build_extra_args);
let command = format!(
"cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} build{build_extra_args}{service_arg}",
"{docker_compose} -p {project_name} -f {file_args}{env_file} build{build_extra_args}{service_arg}",
);
if stack.config.skip_secret_interp {
let log = run_komodo_command("compose build", command).await;
let log = run_komodo_command(
"compose build",
run_directory.as_ref(),
command,
)
.await;
res.logs.push(log);
} else {
let (command, mut replacers) = svi::interpolate_variables(
@@ -160,8 +166,12 @@ pub async fn compose_up(
).context("failed to interpolate periphery secrets into stack build command")?;
replacers.extend(core_replacers.clone());
let mut log =
run_komodo_command("compose build", command).await;
let mut log = run_komodo_command(
"compose build",
run_directory.as_ref(),
command,
)
.await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
@@ -177,13 +187,15 @@ pub async fn compose_up(
}
}
//
if stack.config.auto_pull {
// Pull images before destroying to minimize downtime.
// If this fails, do not continue.
let log = run_komodo_command(
"compose pull",
run_directory.as_ref(),
format!(
"cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} pull{service_arg}",
"{docker_compose} -p {project_name} -f {file_args}{env_file} pull{service_arg}",
),
)
.await;
@@ -197,20 +209,78 @@ pub async fn compose_up(
}
}
// Take down the existing containers.
// This one tries to use the previously deployed service name, to ensure the right stack is taken down.
compose_down(&last_project_name, service, res)
.await
.context("failed to destroy existing containers")?;
if !stack.config.pre_deploy.command.is_empty() {
let pre_deploy_path =
run_directory.join(&stack.config.pre_deploy.path);
if !stack.config.skip_secret_interp {
let (full_command, mut replacers) =
interpolate_variables(&stack.config.pre_deploy.command)
.context(
"failed to interpolate secrets into pre_deploy command",
)?;
replacers.extend(core_replacers.to_owned());
let mut pre_deploy_log = run_komodo_command(
"pre deploy",
pre_deploy_path.as_ref(),
&full_command,
)
.await;
pre_deploy_log.command =
svi::replace_in_string(&pre_deploy_log.command, &replacers);
pre_deploy_log.stdout =
svi::replace_in_string(&pre_deploy_log.stdout, &replacers);
pre_deploy_log.stderr =
svi::replace_in_string(&pre_deploy_log.stderr, &replacers);
tracing::debug!(
"run Stack pre_deploy command | command: {} | cwd: {:?}",
pre_deploy_log.command,
pre_deploy_path
);
res.logs.push(pre_deploy_log);
} else {
let pre_deploy_log = run_komodo_command(
"pre deploy",
pre_deploy_path.as_ref(),
&stack.config.pre_deploy.command,
)
.await;
tracing::debug!(
"run Stack pre_deploy command | command: {} | cwd: {:?}",
&stack.config.pre_deploy.command,
pre_deploy_path
);
res.logs.push(pre_deploy_log);
}
if !all_logs_success(&res.logs) {
return Err(anyhow!(
"Failed at running pre_deploy command, stopping the run."
));
}
}
if stack.config.destroy_before_deploy
// Also check if project name changed, which also requires taking down.
|| last_project_name != project_name
{
// Take down the existing containers.
// This one tries to use the previously deployed service name, to ensure the right stack is taken down.
compose_down(&last_project_name, service, res)
.await
.context("failed to destroy existing containers")?;
}
// Run compose up
let extra_args = parse_extra_args(&stack.config.extra_args);
let command = format!(
"cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}",
"{docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}",
);
let log = if stack.config.skip_secret_interp {
run_komodo_command("compose up", command).await
run_komodo_command("compose up", run_directory.as_ref(), command)
.await
} else {
let (command, mut replacers) = svi::interpolate_variables(
&command,
@@ -220,7 +290,12 @@ pub async fn compose_up(
).context("failed to interpolate periphery secrets into stack run command")?;
replacers.extend(core_replacers);
let mut log = run_komodo_command("compose up", command).await;
let mut log = run_komodo_command(
"compose up",
run_directory.as_ref(),
command,
)
.await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
@@ -237,11 +312,11 @@ pub async fn compose_up(
/// Either writes the stack file_contents to a file, or clones the repo.
/// Returns (run_directory, env_file_path)
async fn write_stack(
stack: &Stack,
async fn write_stack<'a>(
stack: &'a Stack,
git_token: Option<String>,
res: &mut ComposeUpResponse,
) -> anyhow::Result<(PathBuf, Option<PathBuf>)> {
) -> anyhow::Result<(PathBuf, Option<&'a str>)> {
let root = periphery_config()
.stack_dir
.join(to_komodo_name(&stack.name));
@@ -265,7 +340,7 @@ async fn write_stack(
.config
.skip_secret_interp
.then_some(&periphery_config().secrets),
&run_directory,
run_directory.as_ref(),
&mut res.logs,
)
.await
@@ -275,7 +350,14 @@ async fn write_stack(
return Err(anyhow!("failed to write environment file"));
}
};
Ok((run_directory, env_file_path))
Ok((
run_directory,
// Env file paths are already relative to run directory,
// so need to pass original env_file_path here.
env_file_path
.is_some()
.then_some(&stack.config.env_file_path),
))
} else if stack.config.repo.is_empty() {
if stack.config.file_contents.trim().is_empty() {
return Err(anyhow!("Must either input compose file contents directly, or use file one host / git repo options."));
@@ -296,7 +378,7 @@ async fn write_stack(
.config
.skip_secret_interp
.then_some(&periphery_config().secrets),
&run_directory,
run_directory.as_ref(),
&mut res.logs,
)
.await
@@ -324,7 +406,12 @@ async fn write_stack(
format!("failed to write compose file to {file_path:?}")
})?;
Ok((run_directory, env_file_path))
Ok((
run_directory,
env_file_path
.is_some()
.then_some(&stack.config.env_file_path),
))
} else {
// ================
// REPO BASED FILES
@@ -362,27 +449,55 @@ async fn write_stack(
}
};
let env_file_path = stack
.config
.run_directory
.parse::<PathBuf>()
.context("Invalid run_directory")?
.join(&stack.config.env_file_path)
.display()
.to_string();
let clone_or_pull_res = if stack.config.reclone {
State
.resolve(
CloneRepo {
args,
git_token,
environment: env_vars,
env_file_path,
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await
} else {
State
.resolve(
PullOrCloneRepo {
args,
git_token,
environment: env_vars,
env_file_path,
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await
};
let RepoActionResponse {
logs,
commit_hash,
commit_message,
env_file_path,
} = match State
.resolve(
PullOrCloneRepo {
args,
git_token,
environment: env_vars,
env_file_path: stack.config.env_file_path.clone(),
skip_secret_interp: stack.config.skip_secret_interp,
// repo replacer only needed for on_clone / on_pull,
// which aren't available for stacks
replacers: Default::default(),
},
(),
)
.await
{
} = match clone_or_pull_res {
Ok(res) => res,
Err(e) => {
let error = format_serror(
@@ -407,7 +522,12 @@ async fn write_stack(
return Err(anyhow!("Stopped after repo pull failure"));
}
Ok((run_directory, env_file_path))
Ok((
run_directory,
env_file_path
.is_some()
.then_some(&stack.config.env_file_path),
))
}
}
@@ -422,7 +542,8 @@ async fn compose_down(
.map(|service| format!(" {service}"))
.unwrap_or_default();
let log = run_komodo_command(
"destroy container",
"compose down",
None,
format!("{docker_compose} -p {project} down{service_arg}"),
)
.await;

View File

@@ -50,6 +50,7 @@ impl DockerClient {
.into_iter()
.map(|container| {
Ok(ContainerListItem {
server_id: None,
name: container
.names
.context("no names on container")?
@@ -88,6 +89,7 @@ impl DockerClient {
.collect()
})
.unwrap_or_default(),
labels: container.labels.unwrap_or_default(),
})
})
.collect()
@@ -935,7 +937,7 @@ pub async fn docker_login(
#[instrument]
pub async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_komodo_command("docker pull", command).await
run_komodo_command("docker pull", None, command).await
}
pub fn stop_container_command(

View File

@@ -66,3 +66,14 @@ pub fn log_grep(
}
}
}
pub fn interpolate_variables(
input: &str,
) -> svi::Result<(String, Vec<(String, String)>)> {
svi::interpolate_variables(
input,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
}

View File

@@ -1,4 +1,4 @@
use std::{net::SocketAddr, time::Instant};
use std::net::SocketAddr;
use anyhow::{anyhow, Context};
use axum::{
@@ -11,6 +11,7 @@ use axum::{
Router,
};
use axum_extra::{headers::ContentType, TypedHeader};
use derive_variants::ExtractVariant;
use resolver_api::Resolver;
use serror::{AddStatusCode, AddStatusCodeError, Json};
use uuid::Uuid;
@@ -40,13 +41,12 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "PeripheryHandler")]
async fn task(
req_id: Uuid,
request: crate::api::PeripheryRequest,
) -> anyhow::Result<String> {
let timer = Instant::now();
let variant = request.extract_variant();
let res =
State
.resolve_request(request, ())
@@ -59,16 +59,12 @@ async fn task(
});
if let Err(e) = &res {
warn!("request {req_id} error: {e:#}");
warn!("request {req_id} | type: {variant:?} | error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("request {req_id} | resolve time: {elapsed:?}");
res
}
#[instrument(level = "debug")]
async fn guard_request_by_passkey(
req: Request<Body>,
next: Next,
@@ -100,7 +96,6 @@ async fn guard_request_by_passkey(
}
}
#[instrument(level = "debug")]
async fn guard_request_by_ip(
req: Request<Body>,
next: Next,

View File

@@ -91,10 +91,13 @@ impl StatsClient {
}
pub fn get_system_stats(&self) -> SystemStats {
let total_mem = self.system.total_memory();
let available_mem = self.system.available_memory();
SystemStats {
cpu_perc: self.system.global_cpu_usage(),
mem_used_gb: self.system.used_memory() as f64 / BYTES_PER_GB,
mem_total_gb: self.system.total_memory() as f64 / BYTES_PER_GB,
mem_free_gb: self.system.free_memory() as f64 / BYTES_PER_GB,
mem_used_gb: (total_mem - available_mem) as f64 / BYTES_PER_GB,
mem_total_gb: total_mem as f64 / BYTES_PER_GB,
disks: self.get_disks(),
polling_rate: self.stats.polling_rate,
refresh_ts: self.stats.refresh_ts,

View File

@@ -1,109 +0,0 @@
# Changelog
## <ins>Komodo v1.14 (Sep 2024)</ins>
- Renamed the project to **Komodo**.
- Manage docker networks, volumes, and images.
- Manage Containers at the server level, without creating any Deployment.
- Add bulk Start / Restart / Pause actions for all containers on a server.
- Add **Secret** mode to Variables to hide the value in updates / logs
- Secret mode also prevents any non-admin users from retrieving the value from the API. Non admin users will still see the variable name.
- Interpolate Variables / Secrets into everything I could think of
- Deployment / Stack / Repo / Build **extra args**.
- Deployment **command**.
- Build **pre build**.
- Repo **on_clone / on_pull**.
- Added **Hetzner Singapore** datacenter for Hetzner ServerTemplates
- **Removed Google Font** - now just use system local font to avoid any third party calls.
## <ins>Monitor v1.13 - Komodo (Aug 2024)</ins>
- This is the first named release, as I think it is really big. The Komodo Dragon is the largest species of Monitor lizard.
- **Deploy docker compose** with the new **Stack** resource.
- Can define the compose file in the UI, or direct Monitor to clone a git repo containing compose files.
- Use webhooks to redeploy the stack on push to the repo
- Manage the environment variables passed to the compose command.
- **Builds** can now be configured with an alternate repository name to push the image under.
-An optional tag can also be configured to be postfixed onto the version, like image:1.13-aarch64.
This helps for pushing alternate build configurations under the same image repo, just under different tags.
- **Repos** can now be "built" using builders. The idea is, you spawn an AWS instance, clone a repo, execute a shell command
(like running a script in the repo), and terminating the instance. The script can build a binary, and push it to some binary repository.
Users will have to manage their own versioning though.
- **High level UI Updates** courtesy of @karamvirsingh98
## <ins>v1.12 (July 2024)</ins>
- Break free of Github dependance. Use other git providers, including self hosted ones.
- Same for Docker registry. You can also now use any docker registry for your images.
## <ins>v1 (Spring 2024)</ins>
- **New resource types**:
- **Repo**: Clone / pull configured repositories on desired Server. Run shell commands in the repo on every clone / pull to acheive automation. Listen for pushes to a particular branch to automatically pull the repo and run the command.
- **Procedure**: Combine multiple *executions* (Deploy, RunBuild) and run them in sequence or in parallel. *RunProcedure* is an execution type, meaning procedures can run *other procedures*.
- **Builder**: Ephemeral builder configuration has moved to being an API / UI managed entity for greater observability and ease of management.
- **Alerter**: Define multiple alerting endpoints and manage them via the API / UI.
- Slack support continues with the *Slack* Alerter variant.
- Send JSON serialized alert data to any HTTP endpoint with the *Custom* Alerter variant.
- **Template**: Define a template for your cloud provider's VM configuration
- Launch VMs based on the template and automatically add them as Monitor servers.
- Supports AWS EC2 and Hetzner Cloud.
- **Sync**: Sync resources declared in toml files in Github repos.
- Manage resources declaratively, with git history for configuration rollbacks.
- See the actions which will be performed in the UI, and execute them upon manual confirmation.
- Use a Git webhook to automatically execute syncs on git push.
- **Resource Tagging**
- Attach multiple *tags* to resources, which can be used to group related resources together. These can be used to filter resources in the UI.
- For example, resources can be given tags for *environment*, like `Prod`, `Uat`, or `Dev`. This can be combined with tags for the larger system the resource is a part of, such as `Authentication`, `Logging`, or `Messaging`.
- Proper tagging will make it easy to find resources and visualize system components, even as the number of resources grows large.
- **Variables**
- Manage global, non-secret key-value pairs using the API / UI.
- These values can be interpolated into deployment `environments` and build `build_args`
- **Core Accounts and Secrets**
- Docker / Github accounts and Secrets can now be configured in the Core configuration file.
- They can still be added to the Periphery configuration as before. Accounts / Secrets defined in the Core configuration will be preferentially used over like ones defined in Periphery configuration.
- **User Groups**
- Admins can now create User Groups and assign permissions to them as if they were a user.
- Multiple users can then be added to the group, and a user can be added to multiple groups.
- Users in the group inherit the group's permissions.
- **Builds**
- Build log displays the **latest commit hash and message**.
- In-progress builds are able to be **cancelled before completion**.
- Specify a specific commit hash to build from.
- **Deployments**
- Filter log lines using multiple search terms.
- **Alerting**
- The alerting system has been redesigned with a stateful model.
- Alerts can be in an Open or a Resolved state, and alerts are only sent on state changes.
- For example, say a server has just crossed 80% memory usage, the configured memory threshold. An alert will be created in the Open state and the alert data will be sent out. Later, it has dropped down to 70%. The alert will be changed to the Resolved state and the alert data will again be sent.
- In addition to server usage alerts, Monitor now supports deployment state change alerts. These are sent when a deployment's state changes without being caused by a Monitor action. For example, if a deployment goes from the Running state to the Exited state unexpectedly, say from a crash, an alert will be sent.
- Current and past alerts can be retrieved using the API and viewed on the UI.
- **New UI**:
- The Monitor UI has been revamped to support the new features and improve the user experience.
## <ins>v0 (Winter 2022)</ins>
- Move Core and Periphery implementation to Rust.
- Add AWS Ec2 ephemeral build instance support.
- Configuration added to core configuration file.
- Automatic build versioning system, supporting image rollback.
- Realtime and historical system stats - CPU, memory, disk.
- Simple stats alerting based on out-of-bounds values for system stats.
- Support sending alerts to Slack.
## <ins>Pre-versioned releases</ins>
- Defined main resource types:
- Server
- Deployment
- Build
- Basics of Monitor:
- Build docker images from Github repos.
- Manage image deployment on connected servers, see container status, get container logs.
- Add account credentials in Periphery configuration.
- Core and Periphery implemented in Typescript.

View File

@@ -1,4 +1,4 @@
# Komodo
*A system to build and deploy software accross many servers*
*A system to build and deploy software across many servers*
Docs: [https://docs.rs/komodo_client/latest/komodo_client](https://docs.rs/komodo_client/latest/komodo_client)

View File

@@ -23,7 +23,10 @@ pub use server_template::*;
pub use stack::*;
pub use sync::*;
use crate::entities::{NoData, I64};
use crate::{
api::write::CommitSync,
entities::{NoData, I64},
};
pub trait KomodoExecuteRequest: HasResponse {}
@@ -101,9 +104,11 @@ pub enum Execution {
// SYNC
RunSync(RunSync),
CommitSync(CommitSync), // This is a special case, its actually a write operation.
// STACK
DeployStack(DeployStack),
DeployStackIfChanged(DeployStackIfChanged),
StartStack(StartStack),
RestartStack(RestartStack),
PauseStack(PauseStack),

View File

@@ -9,8 +9,6 @@ use crate::entities::update::Update;
use super::KomodoExecuteRequest;
/// Deploys the target stack. `docker compose up`. Response: [Update]
///
/// Note. If the stack is already deployed, it will be destroyed first.
#[typeshare]
#[derive(
Debug,
@@ -32,6 +30,30 @@ pub struct DeployStack {
pub stop_time: Option<i32>,
}
/// Checks deployed contents vs latest contents,
/// and only if any changes found
/// will `docker compose up`. Response: [Update]
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoExecuteRequest)]
#[response(Update)]
pub struct DeployStackIfChanged {
/// Id or name
pub stack: String,
/// Override the default termination max time.
/// Only used if the stack needs to be taken down first.
pub stop_time: Option<i32>,
}
//
/// Starts the target stack. `docker compose start`. Response: [Update]
@@ -128,7 +150,7 @@ pub struct UnpauseStack {
//
/// Starts the target stack. `docker compose stop`. Response: [Update]
/// Stops the target stack. `docker compose stop`. Response: [Update]
#[typeshare]
#[derive(
Debug,

View File

@@ -4,7 +4,7 @@ use resolver_api::derive::Request;
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::entities::update::Update;
use crate::entities::{update::Update, ResourceTargetVariant};
use super::KomodoExecuteRequest;
@@ -25,4 +25,11 @@ use super::KomodoExecuteRequest;
pub struct RunSync {
/// Id or name
pub sync: String,
/// Only execute sync on a specific resource type.
/// Combine with `resource_id` to specify resource.
pub resource_type: Option<ResourceTargetVariant>,
/// Only execute sync on a specific resources.
/// Combine with `resource_type` to specify resources.
/// Supports name or id.
pub resources: Option<Vec<String>>,
}

View File

@@ -7,11 +7,14 @@
//! - Path: `/auth`, `/user`, `/read`, `/write`, `/execute`
//! - Headers:
//! - Content-Type: `application/json`
//! - Authorication: `your_jwt`
//! - Authorization: `your_jwt`
//! - X-Api-Key: `your_api_key`
//! - X-Api-Secret: `your_api_secret`
//! - Use either Authorization *or* X-Api-Key and X-Api-Secret to authenticate requests.
//! - Body: JSON specifying the request type (`type`) and the parameters (`params`).
//!
//! You can create API keys for your user, or for a Service User with limited permissions,
//! from the Komodo UI Settings page.
//!
//! To call the api, construct JSON bodies following
//! the schemas given in [read], [mod@write], [execute], and so on.
@@ -28,6 +31,18 @@
//!
//! The request's parent module (eg. [read], [mod@write]) determines the http path which
//! must be used for the requests. For example, requests under [read] are made using http path `/read`.
//!
//! ## Curl Example
//!
//! Putting it all together, here is an example `curl` for [write::UpdateBuild], to update the version:
//!
//! ```text
//! curl --header "Content-Type: application/json" \
//! --header "X-Api-Key: your_api_key" \
//! --header "X-Api-Secret: your_api_secret" \
//! --data '{ "type": "UpdateBuild", "params": { "id": "67076689ed600cfdd52ac637", "config": { "version": "1.15.9" } } }' \
//! https://komodo.example.com/write
//! ```
//!
//! ## Modules
//!

View File

@@ -120,6 +120,9 @@ pub struct GetDeploymentLog {
/// Max: 5000.
#[serde(default = "default_tail")]
pub tail: U64,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
fn default_tail() -> u64 {
@@ -156,6 +159,9 @@ pub struct SearchDeploymentLog {
/// Invert the results, ie return all lines that DON'T match the terms / combinator.
#[serde(default)]
pub invert: bool,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
#[typeshare]

View File

@@ -262,6 +262,25 @@ pub type ListDockerContainersResponse = Vec<ContainerListItem>;
//
/// List all docker containers on the target server.
/// Response: [ListDockerContainersResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,
)]
#[empty_traits(KomodoReadRequest)]
#[response(ListAllDockerContainersResponse)]
pub struct ListAllDockerContainers {
/// Filter by server id or name.
#[serde(default)]
pub servers: Vec<String>,
}
#[typeshare]
pub type ListAllDockerContainersResponse = Vec<ContainerListItem>;
//
/// Inspect a docker container on the server. Response: [Container].
#[typeshare]
#[derive(
@@ -303,6 +322,9 @@ pub struct GetContainerLog {
/// Max: 5000.
#[serde(default = "default_tail")]
pub tail: U64,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
fn default_tail() -> u64 {
@@ -341,6 +363,9 @@ pub struct SearchContainerLog {
/// Invert the results, ie return all lines that DON'T match the terms / combinator.
#[serde(default)]
pub invert: bool,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
#[typeshare]

View File

@@ -69,6 +69,9 @@ pub struct GetStackServiceLog {
/// Max: 5000.
#[serde(default = "default_tail")]
pub tail: U64,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
fn default_tail() -> u64 {
@@ -107,6 +110,9 @@ pub struct SearchStackServiceLog {
/// Invert the results, ie return all lines that DON'T match the terms / combinator.
#[serde(default)]
pub invert: bool,
/// Enable `--timestamps`
#[serde(default)]
pub timestamps: bool,
}
#[typeshare]

View File

@@ -103,7 +103,7 @@ pub struct RenameStack {
//
/// Rename the stack at id to the given name. Response: [Update].
/// Update file contents in Files on Server or Git Repo mode. Response: [Update].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,
@@ -111,7 +111,7 @@ pub struct RenameStack {
#[empty_traits(KomodoWriteRequest)]
#[response(Update)]
pub struct WriteStackFileContents {
/// The name or id of the Stack to write the contents to.
/// The name or id of the target Stack.
#[serde(alias = "id", alias = "name")]
pub stack: String,
/// The file path relative to the stack run directory,

View File

@@ -1,3 +1,4 @@
use clap::Parser;
use derive_empty_traits::EmptyTraits;
use resolver_api::derive::Request;
use serde::{Deserialize, Serialize};
@@ -5,6 +6,7 @@ use typeshare::typeshare;
use crate::entities::{
sync::{ResourceSync, _PartialResourceSyncConfig},
update::Update,
NoData,
};
@@ -98,15 +100,44 @@ pub struct RefreshResourceSyncPending {
//
/// Commits matching resources updated configuration to the target resource sync. Response: [Update]
///
/// Note. Will fail if the Sync is not `managed`.
/// Rename the stack at id to the given name. Response: [Update].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Request, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(ResourceSync)]
#[response(Update)]
pub struct WriteSyncFileContents {
/// The name or id of the target Sync.
#[serde(alias = "id", alias = "name")]
pub sync: String,
/// If this file was under a resource folder, this will be the folder.
/// Otherwise, it should be empty string.
pub resource_path: String,
/// The file path relative to the resource path.
pub file_path: String,
/// The contents to write.
pub contents: String,
}
//
/// Exports matching resources, and writes to the target sync's resource file. Response: [Update]
///
/// Note. Will fail if the Sync is not `managed`.
#[typeshare]
#[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Request,
EmptyTraits,
Parser,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(Update)]
pub struct CommitSync {
/// Id or name
#[serde(alias = "id", alias = "name")]

Some files were not shown because too many files have changed in this diff Show More