diff --git a/.gitignore b/.gitignore index 6421117ee..0c0cf3d3c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ dist .env .env.development creds.toml -core.config.toml \ No newline at end of file +core.config.toml +.syncs +.stacks \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 8d19c5bc4..3e294dadd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,11 +41,11 @@ dependencies = [ [[package]] name = "alerter" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "axum 0.7.5", - "dotenv", + "dotenvy", "envy", "logger", "monitor_client", @@ -261,9 +261,9 @@ dependencies = [ [[package]] name = "aws-sdk-ec2" -version = "1.61.0" +version = "1.62.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31b40d3fc7f1f2165f4705c4293f74890bfd8a9bef37d481e7b45957a051f2" +checksum = "a1de56a6030ebc05c84d23b6ed37971244d63ac8eb23e13781dd6d5c550c8d2f" dependencies = [ "aws-credential-types", "aws-runtime", @@ -285,9 +285,9 @@ dependencies = [ [[package]] name = "aws-sdk-ecr" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa6847b27655b7f8c7ad505c3cc49493b3bbde0915e47bfe5071dd1d56bac760" +checksum = "8b744392dee153a7e3181cb61b894c6450bcb6a392fc85949dfc520c93d3d0fc" dependencies = [ "aws-credential-types", "aws-runtime", @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aed08d3adb6ebe0eff737115056652670ae290f177759aac19c30456135f94c" +checksum = "4a063d51a634c7137ecd9f6390ec78e1c512e84c9ded80198ec7df3339a16a33" dependencies = [ "base64 0.22.1", "bollard-stubs", @@ -793,7 +793,7 @@ dependencies = [ "hyper 1.4.1", "hyper-named-pipe", "hyper-util", - "hyperlocal-next", + "hyperlocal", "log", "pin-project-lite", "serde", @@ -811,9 +811,9 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.44.0-rc.2" +version = "1.45.0-rc.26.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "709d9aa1c37abb89d40f19f5d0ad6f0d88cb1581264e571c9350fc5bb89cf1c5" +checksum = "6d7c5415e3a6bc6d3e99eff6268e488fd4ee25e7b28c10f08fa6760bd9de16e4" dependencies = [ "serde", "serde_repr", @@ -911,9 +911,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.9" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" +checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc" dependencies = [ "clap_builder", "clap_derive", @@ -921,9 +921,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.9" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" +checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99" dependencies = [ "anstream", "anstyle", @@ -933,9 +933,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.8" +version = "4.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -967,7 +967,7 @@ dependencies = [ [[package]] name = "command" -version = "1.12.0" +version = "1.13.0" dependencies = [ "monitor_client", "run_command", @@ -1244,10 +1244,10 @@ dependencies = [ ] [[package]] -name = "dotenv" -version = "0.15.0" +name = "dotenvy" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] name = "dyn-clone" @@ -1351,7 +1351,7 @@ dependencies = [ [[package]] name = "formatting" -version = "1.12.0" +version = "1.13.0" dependencies = [ "serror", ] @@ -1482,13 +1482,15 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "git" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "command", "formatting", "monitor_client", "run_command", + "svi", + "tokio", "tracing", ] @@ -1854,10 +1856,10 @@ dependencies = [ ] [[package]] -name = "hyperlocal-next" -version = "0.9.0" +name = "hyperlocal" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf569d43fa9848e510358c07b80f4adf34084ddc28c6a4a651ee8474c070dcc" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" dependencies = [ "hex", "http-body-util", @@ -1879,7 +1881,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -2083,7 +2085,7 @@ dependencies = [ [[package]] name = "logger" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "monitor_client", @@ -2152,11 +2154,11 @@ dependencies = [ [[package]] name = "migrator" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "chrono", - "dotenv", + "dotenvy", "envy", "logger", "monitor_client", @@ -2193,13 +2195,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" dependencies = [ + "hermit-abi", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2286,7 +2289,7 @@ dependencies = [ [[package]] name = "monitor_cli" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "clap", @@ -2306,7 +2309,7 @@ dependencies = [ [[package]] name = "monitor_client" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "async_timing_util", @@ -2338,7 +2341,7 @@ dependencies = [ [[package]] name = "monitor_core" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "async_timing_util", @@ -2350,7 +2353,7 @@ dependencies = [ "base64 0.22.1", "bcrypt", "derive_variants", - "dotenv", + "dotenvy", "envy", "formatting", "futures", @@ -2376,6 +2379,7 @@ dependencies = [ "run_command", "serde", "serde_json", + "serde_yaml", "serror", "sha2", "slack_client_rs", @@ -2395,7 +2399,7 @@ dependencies = [ [[package]] name = "monitor_periphery" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "async_timing_util", @@ -2404,9 +2408,10 @@ dependencies = [ "bollard", "clap", "command", - "dotenv", + "dotenvy", "envy", "formatting", + "futures", "git", "logger", "merge_config_files", @@ -2417,6 +2422,7 @@ dependencies = [ "run_command", "serde", "serde_json", + "serde_yaml", "serror", "svi", "sysinfo", @@ -2529,16 +2535,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" version = "0.32.2" @@ -2699,9 +2695,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.2.0" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" +checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" dependencies = [ "num-traits", ] @@ -2839,7 +2835,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "periphery_client" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "monitor_client", @@ -3024,9 +3020,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -3576,12 +3572,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "indexmap 2.2.6", "itoa", + "memchr", "ryu", "serde", ] @@ -3609,9 +3606,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -3667,6 +3664,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.2.6", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + [[package]] name = "serror" version = "0.4.6" @@ -3909,15 +3919,14 @@ checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "sysinfo" -version = "0.30.13" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +checksum = "d4115055da5f572fff541dd0c4e61b0262977f453cc9fe04be83aba25a89bdab" dependencies = [ - "cfg-if", "core-foundation-sys", "libc", + "memchr", "ntapi", - "once_cell", "rayon", "windows", ] @@ -3978,10 +3987,10 @@ dependencies = [ [[package]] name = "tests" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", - "dotenv", + "dotenvy", "envy", "logger", "monitor_client", @@ -4072,21 +4081,20 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.1" +version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -4101,9 +4109,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -4192,9 +4200,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.15" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", @@ -4204,18 +4212,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.16" +version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ "indexmap 2.2.6", "serde", @@ -4566,6 +4574,12 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.7.1" @@ -4580,7 +4594,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "update_logger" -version = "1.12.0" +version = "1.13.0" dependencies = [ "anyhow", "logger", @@ -4812,11 +4826,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.52.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" dependencies = [ - "windows-core", + "windows-core 0.57.0", "windows-targets 0.52.5", ] @@ -4829,6 +4843,49 @@ dependencies = [ "windows-targets 0.52.5", ] +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.64", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.64", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.5", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -4970,9 +5027,9 @@ checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.6.8" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 31e4723dc..a8ff8135c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,12 +3,12 @@ resolver = "2" members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"] [workspace.package] -version = "1.12.0" +version = "1.13.0" edition = "2021" authors = ["mbecker20 "] license = "GPL-3.0-or-later" repository = "https://github.com/mbecker20/monitor" -homepage = "https://docs.monitor.mogh.tech" +homepage = "https://docs.monitor.dev" [patch.crates-io] monitor_client = { path = "client/core/rs" } @@ -40,7 +40,7 @@ mungos = "1.0.0" svi = "1.0.1" # ASYNC -tokio = { version = "1.38.1", features = ["full"] } +tokio = { version = "1.39.2", features = ["full"] } reqwest = { version = "0.12.5", features = ["json"] } tokio-util = "0.7.11" futures = "0.3.30" @@ -57,8 +57,9 @@ tokio-tungstenite = "0.23.1" ordered_hash_map = { version = "0.4.0", features = ["serde"] } serde = { version = "1.0.204", features = ["derive"] } strum = { version = "0.26.3", features = ["derive"] } -serde_json = "1.0.120" -toml = "0.8.15" +serde_json = "1.0.122" +serde_yaml = "0.9.34" +toml = "0.8.19" # ERROR anyhow = "1.0.86" @@ -73,8 +74,8 @@ opentelemetry = "0.23.0" tracing = "0.1.40" # CONFIG -clap = { version = "4.5.9", features = ["derive"] } -dotenv = "0.15.0" +clap = { version = "4.5.13", features = ["derive"] } +dotenvy = "0.15.7" envy = "0.4.2" # CRYPTO @@ -90,19 +91,19 @@ jwt = "0.16.0" hex = "0.4.3" # SYSTEM -bollard = "0.16.1" -sysinfo = "0.30.13" +bollard = "0.17.0" +sysinfo = "0.31.2" # CLOUD aws-config = "1.5.4" -aws-sdk-ec2 = "1.60.0" -aws-sdk-ecr = "1.35.0" +aws-sdk-ec2 = "1.62.0" +aws-sdk-ecr = "1.37.0" # MISC derive_builder = "0.20.0" typeshare = "1.0.3" octorust = "0.7.0" colored = "2.1.0" -regex = "1.10.5" +regex = "1.10.6" bson = "2.11.0" diff --git a/bin/alerter/Cargo.toml b/bin/alerter/Cargo.toml index 67949e057..0f03a8c46 100644 --- a/bin/alerter/Cargo.toml +++ b/bin/alerter/Cargo.toml @@ -19,5 +19,5 @@ tracing.workspace = true axum.workspace = true anyhow.workspace = true serde.workspace = true -dotenv.workspace = true +dotenvy.workspace = true envy.workspace = true \ No newline at end of file diff --git a/bin/alerter/src/main.rs b/bin/alerter/src/main.rs index ccc3ba3f6..a28f4b1a2 100644 --- a/bin/alerter/src/main.rs +++ b/bin/alerter/src/main.rs @@ -21,7 +21,7 @@ fn default_port() -> u16 { } async fn app() -> anyhow::Result<()> { - dotenv::dotenv().ok(); + dotenvy::dotenv().ok(); logger::init(&Default::default())?; let Env { port } = diff --git a/bin/cli/README.md b/bin/cli/README.md index 9d3dd0443..c8533a46b 100644 --- a/bin/cli/README.md +++ b/bin/cli/README.md @@ -26,33 +26,6 @@ You can also bypass using any file and pass the information using `--url`, `--ke monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ... ``` -### Run Syncs - -```sh -## Sync resources in a single file -monitor sync ./resources/deployments.toml - -## Sync resources gathered across multiple files in a directory -monitor sync ./resources - -## Path defaults to './resources', in this case you can just use: -monitor sync -``` - -#### Manual -```md -Runs syncs on resource files - -Usage: monitor sync [OPTIONS] [PATH] - -Arguments: - [PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources] - -Options: - --delete Will delete any resources that aren't included in the resource files - -h, --help Print help -``` - ### Run Executions ```sh @@ -70,16 +43,31 @@ Commands: none The "null" execution. Does nothing run-procedure Runs the target procedure. Response: [Update] run-build Runs the target build. Response: [Update] + cancel-build Cancels the target build. Only does anything if the build is `building` when called. Response: [Update] deploy Deploys the container for the target deployment. Response: [Update] start-container Starts the container for the target deployment. Response: [Update] + restart-container Restarts the container for the target deployment. Response: [Update] + pause-container Pauses the container for the target deployment. Response: [Update] + unpause-container Unpauses the container for the target deployment. Response: [Update] stop-container Stops the container for the target deployment. Response: [Update] - stop-all-containers Stops all deployments on the target server. Response: [Update] remove-container Stops and removes the container for the target deployment. Reponse: [Update] clone-repo Clones the target repo. Response: [Update] pull-repo Pulls the target repo. Response: [Update] + build-repo Builds the target repo, using the attached builder. Response: [Update] + cancel-repo-build Cancels the target repo build. Only does anything if the repo build is `building` when called. Response: [Update] + stop-all-containers Stops all containers on the target server. Response: [Update] prune-networks Prunes the docker networks on the target server. Response: [Update] prune-images Prunes the docker images on the target server. Response: [Update] prune-containers Prunes the docker containers on the target server. Response: [Update] + run-sync Runs the target resource sync. Response: [Update] + deploy-stack Deploys the target stack. `docker compose up`. Response: [Update] + start-stack Starts the target stack. `docker compose start`. Response: [Update] + restart-stack Restarts the target stack. `docker compose restart`. Response: [Update] + pause-stack Pauses the target stack. `docker compose pause`. Response: [Update] + unpause-stack Unpauses the target stack. `docker compose unpause`. Response: [Update] + stop-stack Starts the target stack. `docker compose stop`. Response: [Update] + destroy-stack Destoys the target stack. `docker compose down`. Response: [Update] + sleep help Print this message or the help of the given subcommand(s) Options: diff --git a/bin/cli/src/args.rs b/bin/cli/src/args.rs index 34bea9581..26da13e8a 100644 --- a/bin/cli/src/args.rs +++ b/bin/cli/src/args.rs @@ -39,23 +39,12 @@ fn default_creds() -> String { #[derive(Debug, Clone, Subcommand)] pub enum Command { - /// Runs syncs on resource files - Sync { - /// The path of the resource folder / file - /// Folder paths will recursively incorporate all the resources it finds under the folder - #[arg(default_value_t = String::from("./resources"))] - path: String, - - /// Will delete any resources that aren't included in the resource files. - #[arg(long, default_value_t = false)] - delete: bool, - }, - /// Runs an execution Execute { #[command(subcommand)] execution: Execution, }, + // Room for more } #[derive(Debug, Deserialize)] diff --git a/bin/cli/src/exec.rs b/bin/cli/src/exec.rs index 48e6ff8da..78faa08a3 100644 --- a/bin/cli/src/exec.rs +++ b/bin/cli/src/exec.rs @@ -27,12 +27,24 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::RunBuild(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::CancelBuild(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::Deploy(data) => { println!("{}: {data:?}", "Data".dimmed()) } Execution::StartContainer(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::RestartContainer(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PauseContainer(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::UnpauseContainer(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::StopContainer(data) => { println!("{}: {data:?}", "Data".dimmed()) } @@ -48,6 +60,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::PullRepo(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::BuildRepo(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::CancelRepoBuild(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::PruneNetworks(data) => { println!("{}: {data:?}", "Data".dimmed()) } @@ -60,6 +78,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::RunSync(data) => { println!("{}: {data:?}", "Data".dimmed()) } + Execution::DeployStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::StartStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::RestartStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::PauseStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::UnpauseStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::StopStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } + Execution::DestroyStack(data) => { + println!("{}: {data:?}", "Data".dimmed()) + } Execution::Sleep(data) => { println!("{}: {data:?}", "Data".dimmed()) } @@ -78,12 +117,24 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::RunBuild(request) => { monitor_client().execute(request).await } + Execution::CancelBuild(request) => { + monitor_client().execute(request).await + } Execution::Deploy(request) => { monitor_client().execute(request).await } Execution::StartContainer(request) => { monitor_client().execute(request).await } + Execution::RestartContainer(request) => { + monitor_client().execute(request).await + } + Execution::PauseContainer(request) => { + monitor_client().execute(request).await + } + Execution::UnpauseContainer(request) => { + monitor_client().execute(request).await + } Execution::StopContainer(request) => { monitor_client().execute(request).await } @@ -99,6 +150,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::PullRepo(request) => { monitor_client().execute(request).await } + Execution::BuildRepo(request) => { + monitor_client().execute(request).await + } + Execution::CancelRepoBuild(request) => { + monitor_client().execute(request).await + } Execution::PruneNetworks(request) => { monitor_client().execute(request).await } @@ -111,6 +168,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> { Execution::RunSync(request) => { monitor_client().execute(request).await } + Execution::DeployStack(request) => { + monitor_client().execute(request).await + } + Execution::StartStack(request) => { + monitor_client().execute(request).await + } + Execution::RestartStack(request) => { + monitor_client().execute(request).await + } + Execution::PauseStack(request) => { + monitor_client().execute(request).await + } + Execution::UnpauseStack(request) => { + monitor_client().execute(request).await + } + Execution::StopStack(request) => { + monitor_client().execute(request).await + } + Execution::DestroyStack(request) => { + monitor_client().execute(request).await + } Execution::Sleep(request) => { let duration = Duration::from_millis(request.duration_ms as u64); diff --git a/bin/cli/src/main.rs b/bin/cli/src/main.rs index 4faaf2df9..62a5a3cbe 100644 --- a/bin/cli/src/main.rs +++ b/bin/cli/src/main.rs @@ -7,9 +7,7 @@ use monitor_client::api::read::GetVersion; mod args; mod exec; mod helpers; -mod maps; mod state; -mod sync; #[tokio::main] async fn main() -> anyhow::Result<()> { @@ -20,9 +18,6 @@ async fn main() -> anyhow::Result<()> { info!("monitor version: {}", version.to_string().blue().bold()); match &state::cli_args().command { - args::Command::Sync { path, delete } => { - sync::run(path, *delete).await? - } args::Command::Execute { execution } => { exec::run(execution.to_owned()).await? } diff --git a/bin/cli/src/maps.rs b/bin/cli/src/maps.rs deleted file mode 100644 index c8f8c7e4f..000000000 --- a/bin/cli/src/maps.rs +++ /dev/null @@ -1,327 +0,0 @@ -use std::{collections::HashMap, sync::OnceLock}; - -use monitor_client::{ - api::read, - entities::{ - alerter::Alerter, build::Build, builder::Builder, - deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, - sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup, - variable::Variable, - }, -}; - -use crate::state::monitor_client; - -pub fn name_to_build() -> &'static HashMap { - static NAME_TO_BUILD: OnceLock> = - OnceLock::new(); - NAME_TO_BUILD.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullBuilds::default()), - ) - .expect("failed to get builds") - .into_iter() - .map(|build| (build.name.clone(), build)) - .collect() - }) -} - -pub fn id_to_build() -> &'static HashMap { - static ID_TO_BUILD: OnceLock> = - OnceLock::new(); - ID_TO_BUILD.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullBuilds::default()), - ) - .expect("failed to get builds") - .into_iter() - .map(|build| (build.id.clone(), build)) - .collect() - }) -} - -pub fn name_to_deployment() -> &'static HashMap { - static NAME_TO_DEPLOYMENT: OnceLock> = - OnceLock::new(); - NAME_TO_DEPLOYMENT.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullDeployments::default()), - ) - .expect("failed to get deployments") - .into_iter() - .map(|deployment| (deployment.name.clone(), deployment)) - .collect() - }) -} - -pub fn id_to_deployment() -> &'static HashMap { - static ID_TO_DEPLOYMENT: OnceLock> = - OnceLock::new(); - ID_TO_DEPLOYMENT.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullDeployments::default()), - ) - .expect("failed to get deployments") - .into_iter() - .map(|deployment| (deployment.id.clone(), deployment)) - .collect() - }) -} - -pub fn name_to_server() -> &'static HashMap { - static NAME_TO_SERVER: OnceLock> = - OnceLock::new(); - NAME_TO_SERVER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullServers::default()), - ) - .expect("failed to get servers") - .into_iter() - .map(|server| (server.name.clone(), server)) - .collect() - }) -} - -pub fn id_to_server() -> &'static HashMap { - static ID_TO_SERVER: OnceLock> = - OnceLock::new(); - ID_TO_SERVER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullServers::default()), - ) - .expect("failed to get servers") - .into_iter() - .map(|server| (server.id.clone(), server)) - .collect() - }) -} - -pub fn name_to_builder() -> &'static HashMap { - static NAME_TO_BUILDER: OnceLock> = - OnceLock::new(); - NAME_TO_BUILDER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullBuilders::default()), - ) - .expect("failed to get builders") - .into_iter() - .map(|builder| (builder.name.clone(), builder)) - .collect() - }) -} - -pub fn id_to_builder() -> &'static HashMap { - static ID_TO_BUILDER: OnceLock> = - OnceLock::new(); - ID_TO_BUILDER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullBuilders::default()), - ) - .expect("failed to get builders") - .into_iter() - .map(|builder| (builder.id.clone(), builder)) - .collect() - }) -} - -pub fn name_to_alerter() -> &'static HashMap { - static NAME_TO_ALERTER: OnceLock> = - OnceLock::new(); - NAME_TO_ALERTER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullAlerters::default()), - ) - .expect("failed to get alerters") - .into_iter() - .map(|alerter| (alerter.name.clone(), alerter)) - .collect() - }) -} - -pub fn id_to_alerter() -> &'static HashMap { - static ID_TO_ALERTER: OnceLock> = - OnceLock::new(); - ID_TO_ALERTER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullAlerters::default()), - ) - .expect("failed to get alerters") - .into_iter() - .map(|alerter| (alerter.id.clone(), alerter)) - .collect() - }) -} - -pub fn name_to_repo() -> &'static HashMap { - static NAME_TO_ALERTER: OnceLock> = - OnceLock::new(); - NAME_TO_ALERTER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullRepos::default()), - ) - .expect("failed to get repos") - .into_iter() - .map(|repo| (repo.name.clone(), repo)) - .collect() - }) -} - -pub fn id_to_repo() -> &'static HashMap { - static ID_TO_ALERTER: OnceLock> = - OnceLock::new(); - ID_TO_ALERTER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullRepos::default()), - ) - .expect("failed to get repos") - .into_iter() - .map(|repo| (repo.id.clone(), repo)) - .collect() - }) -} - -pub fn name_to_procedure() -> &'static HashMap { - static NAME_TO_PROCEDURE: OnceLock> = - OnceLock::new(); - NAME_TO_PROCEDURE.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullProcedures::default()), - ) - .expect("failed to get procedures") - .into_iter() - .map(|procedure| (procedure.name.clone(), procedure)) - .collect() - }) -} - -pub fn id_to_procedure() -> &'static HashMap { - static ID_TO_PROCEDURE: OnceLock> = - OnceLock::new(); - ID_TO_PROCEDURE.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullProcedures::default()), - ) - .expect("failed to get procedures") - .into_iter() - .map(|procedure| (procedure.id.clone(), procedure)) - .collect() - }) -} - -pub fn name_to_server_template( -) -> &'static HashMap { - static NAME_TO_SERVER_TEMPLATE: OnceLock< - HashMap, - > = OnceLock::new(); - NAME_TO_SERVER_TEMPLATE.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullServerTemplates::default()), - ) - .expect("failed to get server templates") - .into_iter() - .map(|procedure| (procedure.name.clone(), procedure)) - .collect() - }) -} - -pub fn id_to_server_template( -) -> &'static HashMap { - static ID_TO_SERVER_TEMPLATE: OnceLock< - HashMap, - > = OnceLock::new(); - ID_TO_SERVER_TEMPLATE.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullServerTemplates::default()), - ) - .expect("failed to get server templates") - .into_iter() - .map(|procedure| (procedure.id.clone(), procedure)) - .collect() - }) -} - -pub fn name_to_resource_sync( -) -> &'static HashMap { - static NAME_TO_SYNC: OnceLock> = - OnceLock::new(); - NAME_TO_SYNC.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullResourceSyncs::default()), - ) - .expect("failed to get syncs") - .into_iter() - .map(|sync| (sync.name.clone(), sync)) - .collect() - }) -} - -pub fn id_to_resource_sync() -> &'static HashMap -{ - static ID_TO_SYNC: OnceLock> = - OnceLock::new(); - ID_TO_SYNC.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListFullResourceSyncs::default()), - ) - .expect("failed to get syncs") - .into_iter() - .map(|sync| (sync.id.clone(), sync)) - .collect() - }) -} - -pub fn name_to_user_group() -> &'static HashMap { - static NAME_TO_USER_GROUP: OnceLock> = - OnceLock::new(); - NAME_TO_USER_GROUP.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListUserGroups::default()), - ) - .expect("failed to get user groups") - .into_iter() - .map(|user_group| (user_group.name.clone(), user_group)) - .collect() - }) -} - -pub fn name_to_variable() -> &'static HashMap { - static NAME_TO_VARIABLE: OnceLock> = - OnceLock::new(); - NAME_TO_VARIABLE.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListVariables::default()), - ) - .expect("failed to get variables") - .into_iter() - .map(|variable| (variable.name.clone(), variable)) - .collect() - }) -} - -pub fn id_to_user() -> &'static HashMap { - static ID_TO_USER: OnceLock> = - OnceLock::new(); - ID_TO_USER.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListUsers::default()), - ) - .expect("failed to get users") - .into_iter() - .map(|user| (user.id.clone(), user)) - .collect() - }) -} - -pub fn id_to_tag() -> &'static HashMap { - static ID_TO_TAG: OnceLock> = OnceLock::new(); - ID_TO_TAG.get_or_init(|| { - futures::executor::block_on( - monitor_client().read(read::ListTags::default()), - ) - .expect("failed to get tags") - .into_iter() - .map(|tag| (tag.id.clone(), tag)) - .collect() - }) -} diff --git a/bin/cli/src/sync/file.rs b/bin/cli/src/sync/file.rs deleted file mode 100644 index 950e34eea..000000000 --- a/bin/cli/src/sync/file.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::{ - fs, - path::{Path, PathBuf}, - str::FromStr, -}; - -use anyhow::{anyhow, Context}; -use colored::Colorize; -use monitor_client::entities::toml::ResourcesToml; -use serde::de::DeserializeOwned; - -pub fn read_resources(path: &str) -> anyhow::Result { - let mut res = ResourcesToml::default(); - let path = - PathBuf::from_str(path).context("invalid resources path")?; - read_resources_recursive(&path, &mut res)?; - Ok(res) -} - -fn read_resources_recursive( - path: &Path, - resources: &mut ResourcesToml, -) -> anyhow::Result<()> { - let res = - fs::metadata(path).context("failed to get path metadata")?; - if res.is_file() { - if !path - .extension() - .map(|ext| ext == "toml") - .unwrap_or_default() - { - return Ok(()); - } - let more = match parse_toml_file::(path) { - Ok(res) => res, - Err(e) => { - warn!("failed to parse {:?}. skipping file | {e:#}", path); - return Ok(()); - } - }; - info!( - "{} from {}", - "adding resources".green().bold(), - path.display().to_string().blue().bold() - ); - resources.servers.extend(more.servers); - resources.deployments.extend(more.deployments); - resources.builds.extend(more.builds); - resources.repos.extend(more.repos); - resources.procedures.extend(more.procedures); - resources.builders.extend(more.builders); - resources.alerters.extend(more.alerters); - resources.server_templates.extend(more.server_templates); - resources.resource_syncs.extend(more.resource_syncs); - resources.user_groups.extend(more.user_groups); - resources.variables.extend(more.variables); - Ok(()) - } else if res.is_dir() { - let directory = fs::read_dir(path) - .context("failed to read directory contents")?; - for entry in directory.into_iter().flatten() { - if let Err(e) = - read_resources_recursive(&entry.path(), resources) - { - warn!("failed to read additional resources at path | {e:#}"); - } - } - Ok(()) - } else { - Err(anyhow!("resources path is neither file nor directory")) - } -} - -fn parse_toml_file( - path: impl AsRef, -) -> anyhow::Result { - let contents = std::fs::read_to_string(path) - .context("failed to read file contents")?; - toml::from_str(&contents).context("failed to parse toml contents") -} diff --git a/bin/cli/src/sync/mod.rs b/bin/cli/src/sync/mod.rs deleted file mode 100644 index 9a820cd03..000000000 --- a/bin/cli/src/sync/mod.rs +++ /dev/null @@ -1,174 +0,0 @@ -use colored::Colorize; -use monitor_client::entities::{ - self, alerter::Alerter, build::Build, builder::Builder, - deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, -}; - -use crate::{helpers::wait_for_enter, state::cli_args}; - -mod file; -mod resource; -mod resources; -mod user_group; -mod variables; - -use resource::ResourceSync; - -pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> { - info!("resources path: {}", path.blue().bold()); - if delete { - warn!("Delete mode {}", "enabled".bold()); - } - - let resources = file::read_resources(path)?; - - info!("computing sync actions..."); - - let (server_creates, server_updates, server_deletes) = - resource::get_updates::(resources.servers, delete)?; - let (deployment_creates, deployment_updates, deployment_deletes) = - resource::get_updates::( - resources.deployments, - delete, - )?; - let (build_creates, build_updates, build_deletes) = - resource::get_updates::(resources.builds, delete)?; - let (repo_creates, repo_updates, repo_deletes) = - resource::get_updates::(resources.repos, delete)?; - let (procedure_creates, procedure_updates, procedure_deletes) = - resource::get_updates::(resources.procedures, delete)?; - let (builder_creates, builder_updates, builder_deletes) = - resource::get_updates::(resources.builders, delete)?; - let (alerter_creates, alerter_updates, alerter_deletes) = - resource::get_updates::(resources.alerters, delete)?; - let ( - server_template_creates, - server_template_updates, - server_template_deletes, - ) = resource::get_updates::( - resources.server_templates, - delete, - )?; - let ( - resource_sync_creates, - resource_sync_updates, - resource_sync_deletes, - ) = resource::get_updates::( - resources.resource_syncs, - delete, - )?; - - let (variable_creates, variable_updates, variable_deletes) = - variables::get_updates(resources.variables, delete)?; - - let (user_group_creates, user_group_updates, user_group_deletes) = - user_group::get_updates(resources.user_groups, delete).await?; - - if resource_sync_creates.is_empty() - && resource_sync_updates.is_empty() - && resource_sync_deletes.is_empty() - && server_template_creates.is_empty() - && server_template_updates.is_empty() - && server_template_deletes.is_empty() - && server_creates.is_empty() - && server_updates.is_empty() - && server_deletes.is_empty() - && deployment_creates.is_empty() - && deployment_updates.is_empty() - && deployment_deletes.is_empty() - && build_creates.is_empty() - && build_updates.is_empty() - && build_deletes.is_empty() - && builder_creates.is_empty() - && builder_updates.is_empty() - && builder_deletes.is_empty() - && alerter_creates.is_empty() - && alerter_updates.is_empty() - && alerter_deletes.is_empty() - && repo_creates.is_empty() - && repo_updates.is_empty() - && repo_deletes.is_empty() - && procedure_creates.is_empty() - && procedure_updates.is_empty() - && procedure_deletes.is_empty() - && user_group_creates.is_empty() - && user_group_updates.is_empty() - && user_group_deletes.is_empty() - && variable_creates.is_empty() - && variable_updates.is_empty() - && variable_deletes.is_empty() - { - info!("{}. exiting.", "nothing to do".green().bold()); - return Ok(()); - } - - if !cli_args().yes { - wait_for_enter("run sync")?; - } - - // No deps - entities::sync::ResourceSync::run_updates( - resource_sync_creates, - resource_sync_updates, - resource_sync_deletes, - ) - .await; - ServerTemplate::run_updates( - server_template_creates, - server_template_updates, - server_template_deletes, - ) - .await; - Server::run_updates(server_creates, server_updates, server_deletes) - .await; - Alerter::run_updates( - alerter_creates, - alerter_updates, - alerter_deletes, - ) - .await; - - // Dependant on server - Builder::run_updates( - builder_creates, - builder_updates, - builder_deletes, - ) - .await; - Repo::run_updates(repo_creates, repo_updates, repo_deletes).await; - - // Dependant on builder - Build::run_updates(build_creates, build_updates, build_deletes) - .await; - - // Dependant on server / build - Deployment::run_updates( - deployment_creates, - deployment_updates, - deployment_deletes, - ) - .await; - - // Dependant on everything - Procedure::run_updates( - procedure_creates, - procedure_updates, - procedure_deletes, - ) - .await; - variables::run_updates( - variable_creates, - variable_updates, - variable_deletes, - ) - .await; - user_group::run_updates( - user_group_creates, - user_group_updates, - user_group_deletes, - ) - .await; - - Ok(()) -} diff --git a/bin/cli/src/sync/resource.rs b/bin/cli/src/sync/resource.rs deleted file mode 100644 index f951bf59a..000000000 --- a/bin/cli/src/sync/resource.rs +++ /dev/null @@ -1,358 +0,0 @@ -use std::collections::HashMap; - -use colored::Colorize; -use monitor_client::{ - api::write::{UpdateDescription, UpdateTagsOnResource}, - entities::{ - resource::Resource, toml::ResourceToml, update::ResourceTarget, - }, -}; -use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff}; -use serde::Serialize; - -use crate::maps::id_to_tag; - -pub type ToUpdate = Vec>; -pub type ToCreate = Vec>; -/// Vec of resource names -pub type ToDelete = Vec; - -type UpdatesResult = (ToCreate, ToUpdate, ToDelete); - -pub struct ToUpdateItem { - pub id: String, - pub resource: ResourceToml, - pub update_description: bool, - pub update_tags: bool, -} - -pub trait ResourceSync: Sized { - type Config: Clone - + Default - + Send - + From - + PartialDiff - + 'static; - type Info: Default + 'static; - type PartialConfig: std::fmt::Debug - + Clone - + Send - + Default - + From - + From - + Serialize - + MaybeNone - + 'static; - type ConfigDiff: Diff + MaybeNone; - - fn display() -> &'static str; - - fn resource_target(id: String) -> ResourceTarget; - - fn name_to_resource( - ) -> &'static HashMap>; - - /// Creates the resource and returns created id. - async fn create( - resource: ResourceToml, - ) -> anyhow::Result; - - /// Updates the resource at id with the partial config. - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()>; - - /// Apply any changes to incoming toml partial config - /// before it is diffed against existing config - fn validate_partial_config(_config: &mut Self::PartialConfig) {} - - /// Diffs the declared toml (partial) against the full existing config. - /// Removes all fields from toml (partial) that haven't changed. - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result; - - /// Apply any changes to computed config diff - /// before logging - fn validate_diff(_diff: &mut Self::ConfigDiff) {} - - /// Deletes the target resource - async fn delete(id_or_name: String) -> anyhow::Result<()>; - - async fn run_updates( - to_create: ToCreate, - to_update: ToUpdate, - to_delete: ToDelete, - ) { - for resource in to_create { - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - let id = match Self::create(resource).await { - Ok(id) => id, - Err(e) => { - warn!( - "failed to create {} {name} | {e:#}", - Self::display(), - ); - continue; - } - }; - run_update_tags::(id.clone(), &name, tags).await; - run_update_description::(id, &name, description).await; - info!( - "{} {} '{}'", - "created".green().bold(), - Self::display(), - name.bold(), - ); - } - - for ToUpdateItem { - id, - resource, - update_description, - update_tags, - } in to_update - { - // Update resource - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - - if update_description { - run_update_description::( - id.clone(), - &name, - description, - ) - .await; - } - - if update_tags { - run_update_tags::(id.clone(), &name, tags).await; - } - - if !resource.config.is_none() { - if let Err(e) = Self::update(id, resource).await { - warn!( - "failed to update config on {} {name} | {e:#}", - Self::display() - ); - } else { - info!( - "{} {} '{}' configuration", - "updated".blue().bold(), - Self::display(), - name.bold(), - ); - } - } - } - - for resource in to_delete { - if let Err(e) = Self::delete(resource.clone()).await { - warn!( - "failed to delete {} {resource} | {e:#}", - Self::display() - ); - } else { - info!( - "{} {} '{}'", - "deleted".red().bold(), - Self::display(), - resource.bold(), - ); - } - } - } -} - -/// Gets all the resources to update, logging along the way. -pub fn get_updates( - resources: Vec>, - delete: bool, -) -> anyhow::Result> { - let map = Resource::name_to_resource(); - - let mut to_create = ToCreate::::new(); - let mut to_update = ToUpdate::::new(); - let mut to_delete = ToDelete::new(); - - if delete { - for resource in map.values() { - if !resources.iter().any(|r| r.name == resource.name) { - to_delete.push(resource.name.clone()); - } - } - } - - for mut resource in resources { - match map.get(&resource.name) { - Some(original) => { - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: Resource::Config = resource.config.into(); - resource.config = config.into(); - - Resource::validate_partial_config(&mut resource.config); - - let mut diff = Resource::get_diff( - original.config.clone(), - resource.config, - )?; - - Resource::validate_diff(&mut diff); - - let original_tags = original - .tags - .iter() - .filter_map(|id| { - id_to_tag().get(id).map(|t| t.name.clone()) - }) - .collect::>(); - - // Only proceed if there are any fields to update, - // or a change to tags / description - if diff.is_none() - && resource.description == original.description - && resource.tags == original_tags - { - continue; - } - - println!( - "\n{}: {}: '{}'\n-------------------", - "UPDATE".blue(), - Resource::display(), - resource.name.bold(), - ); - let mut lines = Vec::::new(); - if resource.description != original.description { - lines.push(format!( - "{}: 'description'\n{}: {}\n{}: {}", - "field".dimmed(), - "from".dimmed(), - original.description.red(), - "to".dimmed(), - resource.description.green() - )) - } - if resource.tags != original_tags { - let from = format!("{:?}", original_tags).red(); - let to = format!("{:?}", resource.tags).green(); - lines.push(format!( - "{}: 'tags'\n{}: {from}\n{}: {to}", - "field".dimmed(), - "from".dimmed(), - "to".dimmed(), - )); - } - lines.extend(diff.iter_field_diffs().map( - |FieldDiff { field, from, to }| { - format!( - "{}: '{field}'\n{}: {}\n{}: {}", - "field".dimmed(), - "from".dimmed(), - from.red(), - "to".dimmed(), - to.green() - ) - }, - )); - println!("{}", lines.join("\n-------------------\n")); - - // Minimizes updates through diffing. - resource.config = diff.into(); - - let update = ToUpdateItem { - id: original.id.clone(), - update_description: resource.description - != original.description, - update_tags: resource.tags != original_tags, - resource, - }; - - to_update.push(update); - } - None => { - println!( - "\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}", - "CREATE".green(), - Resource::display(), - resource.name.bold().green(), - "description".dimmed(), - resource.description, - "tags".dimmed(), - resource.tags, - "config".dimmed(), - serde_json::to_string_pretty(&resource.config)? - ); - to_create.push(resource); - } - } - } - - for name in &to_delete { - println!( - "\n{}: {}: '{}'\n-------------------", - "DELETE".red(), - Resource::display(), - name.bold(), - ); - } - - Ok((to_create, to_update, to_delete)) -} - -pub async fn run_update_tags( - id: String, - name: &str, - tags: Vec, -) { - // Update tags - if let Err(e) = crate::state::monitor_client() - .write(UpdateTagsOnResource { - target: Resource::resource_target(id), - tags, - }) - .await - { - warn!( - "failed to update tags on {} {name} | {e:#}", - Resource::display(), - ); - } else { - info!( - "{} {} '{}' tags", - "updated".blue().bold(), - Resource::display(), - name.bold(), - ); - } -} - -pub async fn run_update_description( - id: String, - name: &str, - description: String, -) { - if let Err(e) = crate::state::monitor_client() - .write(UpdateDescription { - target: Resource::resource_target(id.clone()), - description, - }) - .await - { - warn!("failed to update resource {id} description | {e:#}"); - } else { - info!( - "{} {} '{}' description", - "updated".blue().bold(), - Resource::display(), - name.bold(), - ); - } -} diff --git a/bin/cli/src/sync/resources/alerter.rs b/bin/cli/src/sync/resources/alerter.rs deleted file mode 100644 index c5321bd1f..000000000 --- a/bin/cli/src/sync/resources/alerter.rs +++ /dev/null @@ -1,77 +0,0 @@ -use partial_derive2::PartialDiff; -use std::collections::HashMap; - -use monitor_client::{ - api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter}, - entities::{ - alerter::{ - Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; - -use crate::{ - maps::name_to_alerter, state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Alerter { - type Config = AlerterConfig; - type Info = (); - type PartialConfig = PartialAlerterConfig; - type ConfigDiff = AlerterConfigDiff; - - fn display() -> &'static str { - "alerter" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Alerter(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_alerter() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateAlerter { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateAlerter { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteAlerter { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/build.rs b/bin/cli/src/sync/resources/build.rs deleted file mode 100644 index 07ebd4ab0..000000000 --- a/bin/cli/src/sync/resources/build.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{CreateBuild, DeleteBuild, UpdateBuild}, - entities::{ - build::{ - Build, BuildConfig, BuildConfigDiff, BuildInfo, - PartialBuildConfig, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::{id_to_builder, name_to_build}, - state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Build { - type Config = BuildConfig; - type Info = BuildInfo; - type PartialConfig = PartialBuildConfig; - type ConfigDiff = BuildConfigDiff; - - fn display() -> &'static str { - "build" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Build(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_build() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateBuild { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateBuild { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - // need to replace the builder id with name - original.builder_id = id_to_builder() - .get(&original.builder_id) - .map(|b| b.name.clone()) - .unwrap_or_default(); - - Ok(original.partial_diff(update)) - } - - fn validate_diff(diff: &mut Self::ConfigDiff) { - if let Some((_, to)) = &diff.version { - if to.is_none() { - diff.version = None; - } - } - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteBuild { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/builder.rs b/bin/cli/src/sync/resources/builder.rs deleted file mode 100644 index f3a5ea4b2..000000000 --- a/bin/cli/src/sync/resources/builder.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder}, - entities::{ - builder::{ - Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::{id_to_server, name_to_builder}, - state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Builder { - type Config = BuilderConfig; - type Info = (); - type PartialConfig = PartialBuilderConfig; - type ConfigDiff = BuilderConfigDiff; - - fn display() -> &'static str { - "builder" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Builder(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_builder() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateBuilder { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateBuilder { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - // need to replace server builder id with name - if let BuilderConfig::Server(config) = &mut original { - config.server_id = id_to_server() - .get(&config.server_id) - .map(|s| s.name.clone()) - .unwrap_or_default(); - } - - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteBuilder { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/deployment.rs b/bin/cli/src/sync/resources/deployment.rs deleted file mode 100644 index 73fbaa107..000000000 --- a/bin/cli/src/sync/resources/deployment.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{self, DeleteDeployment}, - entities::{ - deployment::{ - Deployment, DeploymentConfig, DeploymentConfigDiff, - DeploymentImage, PartialDeploymentConfig, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::{id_to_build, id_to_server, name_to_deployment}, - state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Deployment { - type Config = DeploymentConfig; - type Info = (); - type PartialConfig = PartialDeploymentConfig; - type ConfigDiff = DeploymentConfigDiff; - - fn display() -> &'static str { - "deployment" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Deployment(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_deployment() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(write::CreateDeployment { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(write::UpdateDeployment { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - // need to replace the server id with name - original.server_id = id_to_server() - .get(&original.server_id) - .map(|s| s.name.clone()) - .unwrap_or_default(); - - // need to replace the build id with name - if let DeploymentImage::Build { build_id, version } = - &original.image - { - original.image = DeploymentImage::Build { - build_id: id_to_build() - .get(build_id) - .map(|b| b.name.clone()) - .unwrap_or_default(), - version: *version, - }; - } - - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteDeployment { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/mod.rs b/bin/cli/src/sync/resources/mod.rs deleted file mode 100644 index 8a4cb1259..000000000 --- a/bin/cli/src/sync/resources/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod alerter; -mod build; -mod builder; -mod deployment; -mod procedure; -mod repo; -mod server; -mod server_template; -mod sync; diff --git a/bin/cli/src/sync/resources/procedure.rs b/bin/cli/src/sync/resources/procedure.rs deleted file mode 100644 index d14399aad..000000000 --- a/bin/cli/src/sync/resources/procedure.rs +++ /dev/null @@ -1,275 +0,0 @@ -use std::collections::HashMap; - -use colored::Colorize; -use monitor_client::{ - api::{ - execute::Execution, - write::{CreateProcedure, DeleteProcedure, UpdateProcedure}, - }, - entities::{ - procedure::{ - PartialProcedureConfig, Procedure, ProcedureConfig, - ProcedureConfigDiff, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::{MaybeNone, PartialDiff}; - -use crate::{ - maps::{ - id_to_build, id_to_deployment, id_to_procedure, id_to_repo, - id_to_resource_sync, id_to_server, name_to_procedure, - }, - state::monitor_client, - sync::resource::{ - run_update_description, run_update_tags, ResourceSync, ToCreate, - ToDelete, ToUpdate, ToUpdateItem, - }, -}; - -impl ResourceSync for Procedure { - type Config = ProcedureConfig; - type Info = (); - type PartialConfig = PartialProcedureConfig; - type ConfigDiff = ProcedureConfigDiff; - - fn display() -> &'static str { - "procedure" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Procedure(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_procedure() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateProcedure { - name: resource.name, - config: resource.config, - }) - .await - .map(|p| p.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateProcedure { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - async fn run_updates( - mut to_create: ToCreate, - mut to_update: ToUpdate, - to_delete: ToDelete, - ) { - for name in to_delete { - if let Err(e) = crate::state::monitor_client() - .write(DeleteProcedure { id: name.clone() }) - .await - { - warn!("failed to delete procedure {name} | {e:#}",); - } else { - info!( - "{} procedure '{}'", - "deleted".red().bold(), - name.bold(), - ); - } - } - - if to_update.is_empty() && to_create.is_empty() { - return; - } - - for i in 0..10 { - let mut to_pull = Vec::new(); - for ToUpdateItem { - id, - resource, - update_description, - update_tags, - } in &to_update - { - // Update resource - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - if *update_description { - run_update_description::( - id.clone(), - &name, - description, - ) - .await; - } - if *update_tags { - run_update_tags::(id.clone(), &name, tags).await; - } - if !resource.config.is_none() { - if let Err(e) = - Self::update(id.clone(), resource.clone()).await - { - if i == 9 { - warn!( - "failed to update {} {name} | {e:#}", - Self::display() - ); - } - continue; - } - } - - info!("{} {name} updated", Self::display()); - // have to clone out so to_update is mutable - to_pull.push(id.clone()); - } - // - to_update.retain(|resource| !to_pull.contains(&resource.id)); - - let mut to_pull = Vec::new(); - for resource in &to_create { - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - let id = match Self::create(resource.clone()).await { - Ok(id) => id, - Err(e) => { - if i == 9 { - warn!( - "failed to create {} {name} | {e:#}", - Self::display(), - ); - } - continue; - } - }; - run_update_tags::(id.clone(), &name, tags).await; - run_update_description::(id, &name, description) - .await; - info!("{} {name} created", Self::display()); - to_pull.push(name); - } - to_create.retain(|resource| !to_pull.contains(&resource.name)); - - if to_update.is_empty() && to_create.is_empty() { - // info!("all procedures synced"); - return; - } - } - warn!("procedure sync loop exited after max iterations"); - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - for stage in &mut original.stages { - for execution in &mut stage.executions { - match &mut execution.execution { - Execution::None(_) | Execution::Sleep(_) => {} - Execution::RunProcedure(config) => { - config.procedure = id_to_procedure() - .get(&config.procedure) - .map(|p| p.name.clone()) - .unwrap_or_default(); - } - Execution::RunBuild(config) => { - config.build = id_to_build() - .get(&config.build) - .map(|b| b.name.clone()) - .unwrap_or_default(); - } - Execution::Deploy(config) => { - config.deployment = id_to_deployment() - .get(&config.deployment) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::StartContainer(config) => { - config.deployment = id_to_deployment() - .get(&config.deployment) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::StopContainer(config) => { - config.deployment = id_to_deployment() - .get(&config.deployment) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::RemoveContainer(config) => { - config.deployment = id_to_deployment() - .get(&config.deployment) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::CloneRepo(config) => { - config.repo = id_to_repo() - .get(&config.repo) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::PullRepo(config) => { - config.repo = id_to_repo() - .get(&config.repo) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::StopAllContainers(config) => { - config.server = id_to_server() - .get(&config.server) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::PruneNetworks(config) => { - config.server = id_to_server() - .get(&config.server) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::PruneImages(config) => { - config.server = id_to_server() - .get(&config.server) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::PruneContainers(config) => { - config.server = id_to_server() - .get(&config.server) - .map(|d| d.name.clone()) - .unwrap_or_default(); - } - Execution::RunSync(config) => { - config.sync = id_to_resource_sync() - .get(&config.sync) - .map(|s| s.name.clone()) - .unwrap_or_default(); - } - } - } - } - Ok(original.partial_diff(update)) - } - - async fn delete(_: String) -> anyhow::Result<()> { - unreachable!() - } -} diff --git a/bin/cli/src/sync/resources/repo.rs b/bin/cli/src/sync/resources/repo.rs deleted file mode 100644 index bd795256f..000000000 --- a/bin/cli/src/sync/resources/repo.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{CreateRepo, DeleteRepo, UpdateRepo}, - entities::{ - repo::{ - PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo, - }, - resource::Resource, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::{id_to_server, name_to_repo}, - state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Repo { - type Config = RepoConfig; - type Info = RepoInfo; - type PartialConfig = PartialRepoConfig; - type ConfigDiff = RepoConfigDiff; - - fn display() -> &'static str { - "repo" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Repo(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_repo() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateRepo { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateRepo { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - // Need to replace server id with name - original.server_id = id_to_server() - .get(&original.server_id) - .map(|s| s.name.clone()) - .unwrap_or_default(); - - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteRepo { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/server.rs b/bin/cli/src/sync/resources/server.rs deleted file mode 100644 index aa7a18b87..000000000 --- a/bin/cli/src/sync/resources/server.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{CreateServer, DeleteServer, UpdateServer}, - entities::{ - resource::Resource, - server::{ - PartialServerConfig, Server, ServerConfig, ServerConfigDiff, - }, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::name_to_server, state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for Server { - type Config = ServerConfig; - type Info = (); - type PartialConfig = PartialServerConfig; - type ConfigDiff = ServerConfigDiff; - - fn display() -> &'static str { - "server" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Server(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_server() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateServer { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateServer { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteServer { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/server_template.rs b/bin/cli/src/sync/resources/server_template.rs deleted file mode 100644 index 667fc3ea4..000000000 --- a/bin/cli/src/sync/resources/server_template.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{ - CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate, - }, - entities::{ - resource::Resource, - server_template::{ - PartialServerTemplateConfig, ServerTemplate, - ServerTemplateConfig, ServerTemplateConfigDiff, - }, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::name_to_server_template, state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for ServerTemplate { - type Config = ServerTemplateConfig; - type Info = (); - type PartialConfig = PartialServerTemplateConfig; - type ConfigDiff = ServerTemplateConfigDiff; - - fn display() -> &'static str { - "server template" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::ServerTemplate(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_server_template() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateServerTemplate { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateServerTemplate { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteServerTemplate { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/resources/sync.rs b/bin/cli/src/sync/resources/sync.rs deleted file mode 100644 index 89762cae7..000000000 --- a/bin/cli/src/sync/resources/sync.rs +++ /dev/null @@ -1,81 +0,0 @@ -use std::collections::HashMap; - -use monitor_client::{ - api::write::{ - CreateResourceSync, DeleteResourceSync, UpdateResourceSync, - }, - entities::{ - self, - resource::Resource, - sync::{ - PartialResourceSyncConfig, ResourceSyncConfig, - ResourceSyncConfigDiff, ResourceSyncInfo, - }, - toml::ResourceToml, - update::ResourceTarget, - }, -}; -use partial_derive2::PartialDiff; - -use crate::{ - maps::name_to_resource_sync, state::monitor_client, - sync::resource::ResourceSync, -}; - -impl ResourceSync for entities::sync::ResourceSync { - type Config = ResourceSyncConfig; - type Info = ResourceSyncInfo; - type PartialConfig = PartialResourceSyncConfig; - type ConfigDiff = ResourceSyncConfigDiff; - - fn display() -> &'static str { - "resource sync" - } - - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::ResourceSync(id) - } - - fn name_to_resource( - ) -> &'static HashMap> - { - name_to_resource_sync() - } - - async fn create( - resource: ResourceToml, - ) -> anyhow::Result { - monitor_client() - .write(CreateResourceSync { - name: resource.name, - config: resource.config, - }) - .await - .map(|res| res.id) - } - - async fn update( - id: String, - resource: ResourceToml, - ) -> anyhow::Result<()> { - monitor_client() - .write(UpdateResourceSync { - id, - config: resource.config, - }) - .await?; - Ok(()) - } - - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - ) -> anyhow::Result { - Ok(original.partial_diff(update)) - } - - async fn delete(id: String) -> anyhow::Result<()> { - monitor_client().write(DeleteResourceSync { id }).await?; - Ok(()) - } -} diff --git a/bin/cli/src/sync/user_group.rs b/bin/cli/src/sync/user_group.rs deleted file mode 100644 index d1c7ee922..000000000 --- a/bin/cli/src/sync/user_group.rs +++ /dev/null @@ -1,388 +0,0 @@ -use std::cmp::Ordering; - -use anyhow::Context; -use colored::Colorize; -use monitor_client::{ - api::{ - read::ListUserTargetPermissions, - write::{ - CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup, - UpdatePermissionOnTarget, - }, - }, - entities::{ - permission::UserTarget, - toml::{PermissionToml, UserGroupToml}, - update::ResourceTarget, - }, -}; - -use crate::maps::{ - id_to_alerter, id_to_build, id_to_builder, id_to_deployment, - id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server, - id_to_server_template, id_to_user, name_to_user_group, -}; - -pub struct UpdateItem { - user_group: UserGroupToml, - update_users: bool, - update_permissions: bool, -} - -pub struct DeleteItem { - id: String, - name: String, -} - -pub async fn get_updates( - user_groups: Vec, - delete: bool, -) -> anyhow::Result<( - Vec, - Vec, - Vec, -)> { - let map = name_to_user_group(); - - let mut to_create = Vec::::new(); - let mut to_update = Vec::::new(); - let mut to_delete = Vec::::new(); - - if delete { - for user_group in map.values() { - if !user_groups.iter().any(|ug| ug.name == user_group.name) { - to_delete.push(DeleteItem { - id: user_group.id.clone(), - name: user_group.name.clone(), - }); - } - } - } - - let id_to_user = id_to_user(); - - for mut user_group in user_groups { - let original = match map.get(&user_group.name).cloned() { - Some(original) => original, - None => { - println!( - "\n{}: user group: {}\n{}: {:?}\n{}: {:?}", - "CREATE".green(), - user_group.name.bold().green(), - "users".dimmed(), - user_group.users, - "permissions".dimmed(), - user_group.permissions, - ); - to_create.push(user_group); - continue; - } - }; - - let mut original_users = original - .users - .into_iter() - .filter_map(|user_id| { - id_to_user.get(&user_id).map(|u| u.username.clone()) - }) - .collect::>(); - - let mut original_permissions = crate::state::monitor_client() - .read(ListUserTargetPermissions { - user_target: UserTarget::UserGroup(original.id), - }) - .await - .context("failed to query for existing UserGroup permissions")? - .into_iter() - .map(|mut p| { - // replace the ids with names - match &mut p.resource_target { - ResourceTarget::System(_) => {} - ResourceTarget::Build(id) => { - *id = id_to_build() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Builder(id) => { - *id = id_to_builder() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Deployment(id) => { - *id = id_to_deployment() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Server(id) => { - *id = id_to_server() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Repo(id) => { - *id = id_to_repo() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Alerter(id) => { - *id = id_to_alerter() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::Procedure(id) => { - *id = id_to_procedure() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::ServerTemplate(id) => { - *id = id_to_server_template() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - ResourceTarget::ResourceSync(id) => { - *id = id_to_resource_sync() - .get(id) - .map(|b| b.name.clone()) - .unwrap_or_default() - } - } - PermissionToml { - target: p.resource_target, - level: p.level, - } - }) - .collect::>(); - - original_users.sort(); - user_group.users.sort(); - - user_group.permissions.sort_by(sort_permissions); - original_permissions.sort_by(sort_permissions); - - let update_users = user_group.users != original_users; - let update_permissions = - user_group.permissions != original_permissions; - - // only push update after failed diff - if update_users || update_permissions { - println!( - "\n{}: user group: '{}'\n-------------------", - "UPDATE".blue(), - user_group.name.bold(), - ); - let mut lines = Vec::::new(); - if update_users { - let adding = user_group - .users - .iter() - .filter(|user| !original_users.contains(user)) - .map(|user| user.as_str()) - .collect::>(); - let adding = if adding.is_empty() { - String::from("None").into() - } else { - adding.join(", ").green() - }; - let removing = original_users - .iter() - .filter(|user| !user_group.users.contains(user)) - .map(|user| user.as_str()) - .collect::>(); - let removing = if removing.is_empty() { - String::from("None").into() - } else { - removing.join(", ").red() - }; - lines.push(format!( - "{}: 'users'\n{}: {removing}\n{}: {adding}", - "field".dimmed(), - "removing".dimmed(), - "adding".dimmed(), - )) - } - if update_permissions { - let adding = user_group - .permissions - .iter() - .filter(|permission| { - !original_permissions.contains(permission) - }) - .map(|permission| format!("{permission:?}")) - .collect::>(); - let adding = if adding.is_empty() { - String::from("None").into() - } else { - adding.join(", ").green() - }; - let removing = original_permissions - .iter() - .filter(|permission| { - !user_group.permissions.contains(permission) - }) - .map(|permission| format!("{permission:?}")) - .collect::>(); - let removing = if removing.is_empty() { - String::from("None").into() - } else { - removing.join(", ").red() - }; - lines.push(format!( - "{}: 'permissions'\n{}: {removing}\n{}: {adding}", - "field".dimmed(), - "removing".dimmed(), - "adding".dimmed() - )) - } - println!("{}", lines.join("\n-------------------\n")); - to_update.push(UpdateItem { - user_group, - update_users, - update_permissions, - }); - } - } - - for d in &to_delete { - println!( - "\n{}: user group: '{}'\n-------------------", - "DELETE".red(), - d.name.bold(), - ); - } - - Ok((to_create, to_update, to_delete)) -} - -/// order permissions in deterministic way -fn sort_permissions( - a: &PermissionToml, - b: &PermissionToml, -) -> Ordering { - let (a_t, a_id) = a.target.extract_variant_id(); - let (b_t, b_id) = b.target.extract_variant_id(); - match (a_t.cmp(&b_t), a_id.cmp(b_id)) { - (Ordering::Greater, _) => Ordering::Greater, - (Ordering::Less, _) => Ordering::Less, - (_, Ordering::Greater) => Ordering::Greater, - (_, Ordering::Less) => Ordering::Less, - _ => Ordering::Equal, - } -} - -pub async fn run_updates( - to_create: Vec, - to_update: Vec, - to_delete: Vec, -) { - // Create the non-existant user groups - for user_group in to_create { - // Create the user group - if let Err(e) = crate::state::monitor_client() - .write(CreateUserGroup { - name: user_group.name.clone(), - }) - .await - { - warn!( - "failed to create user group {} | {e:#}", - user_group.name - ); - continue; - } else { - info!( - "{} user group '{}'", - "created".green().bold(), - user_group.name.bold(), - ); - }; - - set_users(user_group.name.clone(), user_group.users).await; - run_update_permissions(user_group.name, user_group.permissions) - .await; - } - - // Update the existing user groups - for UpdateItem { - user_group, - update_users, - update_permissions, - } in to_update - { - if update_users { - set_users(user_group.name.clone(), user_group.users).await; - } - if update_permissions { - run_update_permissions(user_group.name, user_group.permissions) - .await; - } - } - - for user_group in to_delete { - if let Err(e) = crate::state::monitor_client() - .write(DeleteUserGroup { id: user_group.id }) - .await - { - warn!( - "failed to delete user group {} | {e:#}", - user_group.name - ); - } else { - info!( - "{} user group '{}'", - "deleted".red().bold(), - user_group.name.bold(), - ); - } - } -} - -async fn set_users(user_group: String, users: Vec) { - if let Err(e) = crate::state::monitor_client() - .write(SetUsersInUserGroup { - user_group: user_group.clone(), - users, - }) - .await - { - warn!("failed to set users in group {user_group} | {e:#}"); - } else { - info!( - "{} user group '{}' users", - "updated".blue().bold(), - user_group.bold(), - ); - } -} - -async fn run_update_permissions( - user_group: String, - permissions: Vec, -) { - for PermissionToml { target, level } in permissions { - if let Err(e) = crate::state::monitor_client() - .write(UpdatePermissionOnTarget { - user_target: UserTarget::UserGroup(user_group.clone()), - resource_target: target.clone(), - permission: level, - }) - .await - { - warn!( - "failed to set permssion in group {user_group} | target: {target:?} | {e:#}", - ); - } else { - info!( - "{} user group '{}' permissions", - "updated".blue().bold(), - user_group.bold(), - ); - } - } -} diff --git a/bin/cli/src/sync/variables.rs b/bin/cli/src/sync/variables.rs deleted file mode 100644 index e9ba6da51..000000000 --- a/bin/cli/src/sync/variables.rs +++ /dev/null @@ -1,206 +0,0 @@ -use colored::Colorize; -use monitor_client::{ - api::write::{ - CreateVariable, DeleteVariable, UpdateVariableDescription, - UpdateVariableValue, - }, - entities::variable::Variable, -}; - -use crate::{maps::name_to_variable, state::monitor_client}; - -pub struct ToUpdateItem { - pub variable: Variable, - pub update_value: bool, - pub update_description: bool, -} - -pub fn get_updates( - variables: Vec, - delete: bool, -) -> anyhow::Result<(Vec, Vec, Vec)> { - let map = name_to_variable(); - - let mut to_create = Vec::::new(); - let mut to_update = Vec::::new(); - let mut to_delete = Vec::::new(); - - if delete { - for variable in map.values() { - if !variables.iter().any(|v| v.name == variable.name) { - to_delete.push(variable.name.clone()); - } - } - } - - for variable in variables { - match map.get(&variable.name) { - Some(original) => { - let item = ToUpdateItem { - update_value: original.value != variable.value, - update_description: original.description - != variable.description, - variable, - }; - if !item.update_value && !item.update_description { - continue; - } - println!( - "\n{}: variable: '{}'\n-------------------", - "UPDATE".blue(), - item.variable.name.bold(), - ); - - let mut lines = Vec::::new(); - - if item.update_value { - lines.push(format!( - "{}: 'value'\n{}: {}\n{}: {}", - "field".dimmed(), - "from".dimmed(), - original.value.red(), - "to".dimmed(), - item.variable.value.green() - )) - } - - if item.update_description { - lines.push(format!( - "{}: 'description'\n{}: {}\n{}: {}", - "field".dimmed(), - "from".dimmed(), - original.description.red(), - "to".dimmed(), - item.variable.description.green() - )) - } - - println!("{}", lines.join("\n-------------------\n")); - - to_update.push(item); - } - None => { - if variable.description.is_empty() { - println!( - "\n{}: variable: {}\n{}: {}", - "CREATE".green(), - variable.name.bold().green(), - "value".dimmed(), - variable.value, - ); - } else { - println!( - "\n{}: variable: {}\n{}: {}\n{}: {}", - "CREATE".green(), - variable.name.bold().green(), - "description".dimmed(), - variable.description, - "value".dimmed(), - variable.value, - ); - } - to_create.push(variable) - } - } - } - - for name in &to_delete { - println!( - "\n{}: variable: '{}'\n-------------------", - "DELETE".red(), - name.bold(), - ); - } - - Ok((to_create, to_update, to_delete)) -} - -pub async fn run_updates( - to_create: Vec, - to_update: Vec, - to_delete: Vec, -) { - for variable in to_create { - if let Err(e) = monitor_client() - .write(CreateVariable { - name: variable.name.clone(), - value: variable.value, - description: variable.description, - }) - .await - { - warn!("failed to create variable {} | {e:#}", variable.name); - } else { - info!( - "{} variable '{}'", - "created".green().bold(), - variable.name.bold(), - ); - }; - } - - for ToUpdateItem { - variable, - update_value, - update_description, - } in to_update - { - if update_value { - if let Err(e) = monitor_client() - .write(UpdateVariableValue { - name: variable.name.clone(), - value: variable.value, - }) - .await - { - warn!( - "failed to update variable value for {} | {e:#}", - variable.name - ); - } else { - info!( - "{} variable '{}' value", - "updated".blue().bold(), - variable.name.bold(), - ); - }; - } - if update_description { - if let Err(e) = monitor_client() - .write(UpdateVariableDescription { - name: variable.name.clone(), - description: variable.description, - }) - .await - { - warn!( - "failed to update variable description for {} | {e:#}", - variable.name - ); - } else { - info!( - "{} variable '{}' description", - "updated".blue().bold(), - variable.name.bold(), - ); - }; - } - } - - for variable in to_delete { - if let Err(e) = crate::state::monitor_client() - .write(DeleteVariable { - name: variable.clone(), - }) - .await - { - warn!("failed to delete variable {variable} | {e:#}",); - } else { - info!( - "{} variable '{}'", - "deleted".red().bold(), - variable.bold(), - ); - } - } -} diff --git a/bin/core/Cargo.toml b/bin/core/Cargo.toml index 4646a4b9c..217a92d9e 100644 --- a/bin/core/Cargo.toml +++ b/bin/core/Cargo.toml @@ -44,6 +44,7 @@ tokio-util.workspace = true axum-extra.workspace = true tower-http.workspace = true serde_json.workspace = true +serde_yaml.workspace = true typeshare.workspace = true octorust.workspace = true tracing.workspace = true @@ -51,7 +52,7 @@ reqwest.workspace = true futures.workspace = true nom_pem.workspace = true anyhow.workspace = true -dotenv.workspace = true +dotenvy.workspace = true bcrypt.workspace = true base64.workspace = true tokio.workspace = true diff --git a/bin/core/Dockerfile b/bin/core/Dockerfile index 9b1e93301..2426fa65d 100644 --- a/bin/core/Dockerfile +++ b/bin/core/Dockerfile @@ -1,5 +1,5 @@ # Build Core -FROM rust:1.79.0-bookworm AS core-builder +FROM rust:1.80.0-bookworm AS core-builder WORKDIR /builder COPY . . RUN cargo build -p monitor_core --release @@ -17,6 +17,8 @@ FROM debian:bookworm-slim # Install Deps RUN apt update && apt install -y git curl unzip ca-certificates && \ + curl -SL https://github.com/docker/compose/releases/download/v2.29.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose && \ + chmod +x /usr/local/bin/docker-compose && \ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ unzip awscliv2.zip && \ ./aws/install diff --git a/bin/core/src/api/execute/build.rs b/bin/core/src/api/execute/build.rs index 076ff1557..27ca6b391 100644 --- a/bin/core/src/api/execute/build.rs +++ b/bin/core/src/api/execute/build.rs @@ -1,23 +1,20 @@ use std::{collections::HashSet, future::IntoFuture, time::Duration}; use anyhow::{anyhow, Context}; -use formatting::{format_serror, muted}; +use formatting::format_serror; use futures::future::join_all; use monitor_client::{ - api::execute::{ - CancelBuild, CancelBuildResponse, Deploy, RunBuild, - }, + api::execute::{CancelBuild, Deploy, RunBuild}, entities::{ alert::{Alert, AlertData}, all_logs_success, build::{Build, ImageRegistry, StandardRegistryConfig}, - builder::{AwsBuilderConfig, Builder, BuilderConfig}, + builder::{Builder, BuilderConfig}, config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials}, deployment::DeploymentState, monitor_timestamp, permission::PermissionLevel, - server::{stats::SeverityLevel, Server}, - server_template::aws::AwsServerTemplateConfig, + server::stats::SeverityLevel, to_monitor_name, update::{Log, Update}, user::{auto_redeploy_user, User}, @@ -31,30 +28,20 @@ use mungos::{ options::FindOneOptions, }, }; -use periphery_client::{ - api::{self, GetVersionResponse}, - PeripheryClient, -}; +use periphery_client::api::{self, git::RepoActionResponseV1_13}; use resolver_api::Resolve; use tokio_util::sync::CancellationToken; use crate::{ - cloud::{ - aws::{ - ec2::{ - launch_ec2_instance, terminate_ec2_instance_with_retry, - Ec2Instance, - }, - ecr, - }, - BuildCleanupData, - }, + cloud::aws::ecr, config::core_config, helpers::{ alert::send_alerts, + builder::{cleanup_builder_instance, get_builder_periphery}, channel::build_cancel_channel, - periphery_client, + git_token, query::{get_deployment_state, get_global_variables}, + registry_token, update::update_update, }, resource::{self, refresh_build_state_cache}, @@ -79,6 +66,20 @@ impl Resolve for State { ) .await?; + if build.config.builder_id.is_empty() { + return Err(anyhow!("Must attach builder to RunBuild")); + } + + let git_token = git_token( + &build.config.git_provider, + &build.config.git_account, + |https| build.config.git_https = https, + ) + .await + .with_context( + || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account), + )?; + // get the action state for the build (or insert default). let action_state = action_states().build.get_or_insert_default(&build.id).await; @@ -101,6 +102,12 @@ impl Resolve for State { build_cancel_channel().receiver.resubscribe(); let build_id = build.id.clone(); + let builder = + resource::get::(&build.config.builder_id).await?; + + let is_server_builder = + matches!(&builder.config, BuilderConfig::Server(_)); + tokio::spawn(async move { let poll = async { loop { @@ -109,16 +116,19 @@ impl Resolve for State { id = cancel_recv.recv() => id? }; if incoming_build_id == build_id { - update.push_simple_log( - "cancel acknowledged", - "the build cancellation has been queued, it may still take some time", - ); + if is_server_builder { + update.push_error_log("Cancel acknowledged", "Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature."); + } else { + update.push_simple_log("Cancel acknowledged", "The build cancellation has been queued, it may still take some time."); + } update.finalize(); let id = update.id.clone(); if let Err(e) = update_update(update).await { - warn!("failed to update Update {id} | {e:#}"); + warn!("failed to modify Update {id} on db | {e:#}"); + } + if !is_server_builder { + cancel_clone.cancel(); } - cancel_clone.cancel(); return Ok(()); } } @@ -133,52 +143,44 @@ impl Resolve for State { // GET BUILDER PERIPHERY - let (periphery, cleanup_data) = - match get_build_builder(&build, &mut update).await { - Ok(builder) => { - info!("got builder for build"); - builder - } - Err(e) => { - warn!("failed to get builder | {e:#}"); - update.logs.push(Log::error( - "get builder", - format_serror(&e.context("failed to get builder").into()), - )); - return handle_early_return( - update, build.id, build.name, false, - ) - .await; - } - }; - - let core_config = core_config(); - let variables = get_global_variables().await?; + let (periphery, cleanup_data) = match get_builder_periphery( + build.name.clone(), + Some(build.config.version), + builder, + &mut update, + ) + .await + { + Ok(builder) => builder, + Err(e) => { + warn!( + "failed to get builder for build {} | {e:#}", + build.name + ); + update.logs.push(Log::error( + "get builder", + format_serror(&e.context("failed to get builder").into()), + )); + return handle_early_return( + update, build.id, build.name, false, + ) + .await; + } + }; // CLONE REPO - let git_token = core_config - .git_providers - .iter() - .find(|provider| provider.domain == build.config.git_provider) - .and_then(|provider| { - build.config.git_https = provider.https; - provider - .accounts - .iter() - .find(|account| { - account.username == build.config.git_account - }) - .map(|account| account.token.clone()) - }); let res = tokio::select! { res = periphery .request(api::git::CloneRepo { args: (&build).into(), git_token, + environment: Default::default(), + env_file_path: Default::default(), + skip_secret_interp: Default::default(), }) => res, _ = cancel.cancelled() => { - info!("build cancelled during clone, cleaning up builder"); + debug!("build cancelled during clone, cleaning up builder"); update.push_error_log("build cancelled", String::from("user cancelled build during repo clone")); cleanup_builder_instance(periphery, cleanup_data, &mut update) .await; @@ -187,10 +189,14 @@ impl Resolve for State { }, }; - match res { - Ok(clone_logs) => { - info!("finished repo clone"); - update.logs.extend(clone_logs); + let commit_message = match res { + Ok(res) => { + debug!("finished repo clone"); + let res: RepoActionResponseV1_13 = res.into(); + update.logs.extend(res.logs); + update.commit_hash = + res.commit_hash.unwrap_or_default().to_string(); + res.commit_message.unwrap_or_default() } Err(e) => { warn!("failed build at clone repo | {e:#}"); @@ -198,13 +204,16 @@ impl Resolve for State { "clone repo", format_serror(&e.context("failed to clone repo").into()), ); + Default::default() } - } + }; update_update(update.clone()).await?; if all_logs_success(&update.logs) { let secret_replacers = if !build.config.skip_secret_interp { + let core_config = core_config(); + let variables = get_global_variables().await?; // Interpolate variables / secrets into build args let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); @@ -300,6 +309,12 @@ impl Resolve for State { registry_token, aws_ecr, replacers: secret_replacers.into_iter().collect(), + // Push a commit hash tagged image + additional_tags: if update.commit_hash.is_empty() { + Default::default() + } else { + vec![update.commit_hash.clone()] + }, }) => res.context("failed at call to periphery to build"), _ = cancel.cancelled() => { info!("build cancelled during build, cleaning up builder"); @@ -312,7 +327,7 @@ impl Resolve for State { match res { Ok(logs) => { - info!("finished build"); + debug!("finished build"); update.logs.extend(logs); } Err(e) => { @@ -334,13 +349,13 @@ impl Resolve for State { .builds .update_one( doc! { "name": &build.name }, - doc! { - "$set": { - "config.version": to_bson(&build.config.version) - .context("failed at converting version to bson")?, - "info.last_built_at": monitor_timestamp(), - } - }, + doc! { "$set": { + "config.version": to_bson(&build.config.version) + .context("failed at converting version to bson")?, + "info.last_built_at": monitor_timestamp(), + "info.built_hash": &update.commit_hash, + "info.built_message": commit_message + }}, ) .await; } @@ -499,7 +514,7 @@ impl Resolve for State { &self, CancelBuild { build }: CancelBuild, (user, mut update): (User, Update), - ) -> anyhow::Result { + ) -> anyhow::Result { let build = resource::get_check_permissions::( &build, &user, @@ -524,16 +539,15 @@ impl Resolve for State { ); update_update(update.clone()).await?; - let update_id = update.id.clone(); - build_cancel_channel() .sender .lock() .await - .send((build.id, update))?; + .send((build.id, update.clone()))?; // Make sure cancel is set to complete after some time in case // no reciever is there to do it. Prevents update stuck in InProgress. + let update_id = update.id.clone(); tokio::spawn(async move { tokio::time::sleep(Duration::from_secs(60)).await; if let Err(e) = update_one_by_id( @@ -544,160 +558,11 @@ impl Resolve for State { ) .await { - warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}") + warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}") } }); - Ok(CancelBuildResponse {}) - } -} - -const BUILDER_POLL_RATE_SECS: u64 = 2; -const BUILDER_POLL_MAX_TRIES: usize = 30; - -#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))] -async fn get_build_builder( - build: &Build, - update: &mut Update, -) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> { - if build.config.builder_id.is_empty() { - return Err(anyhow!("build has not configured a builder")); - } - let builder = - resource::get::(&build.config.builder_id).await?; - match builder.config { - BuilderConfig::Server(config) => { - if config.server_id.is_empty() { - return Err(anyhow!("builder has not configured a server")); - } - let server = resource::get::(&config.server_id).await?; - let periphery = periphery_client(&server)?; - Ok(( - periphery, - BuildCleanupData::Server { - repo_name: build.name.clone(), - }, - )) - } - BuilderConfig::Aws(config) => { - get_aws_builder(build, config, update).await - } - } -} - -#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))] -async fn get_aws_builder( - build: &Build, - config: AwsBuilderConfig, - update: &mut Update, -) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> { - let start_create_ts = monitor_timestamp(); - - let instance_name = - format!("BUILDER-{}-v{}", build.name, build.config.version); - let Ec2Instance { instance_id, ip } = launch_ec2_instance( - &instance_name, - AwsServerTemplateConfig::from_builder_config(&config), - ) - .await?; - - info!("ec2 instance launched"); - - let log = Log { - stage: "start build instance".to_string(), - success: true, - stdout: start_aws_builder_log(&instance_id, &ip, &config), - start_ts: start_create_ts, - end_ts: monitor_timestamp(), - ..Default::default() - }; - - update.logs.push(log); - - update_update(update.clone()).await?; - - let periphery_address = format!("http://{ip}:{}", config.port); - let periphery = - PeripheryClient::new(&periphery_address, &core_config().passkey); - - let start_connect_ts = monitor_timestamp(); - let mut res = Ok(GetVersionResponse { - version: String::new(), - }); - for _ in 0..BUILDER_POLL_MAX_TRIES { - let version = periphery - .request(api::GetVersion {}) - .await - .context("failed to reach periphery client on builder"); - if let Ok(GetVersionResponse { version }) = &version { - let connect_log = Log { - stage: "build instance connected".to_string(), - success: true, - stdout: format!( - "established contact with periphery on builder\nperiphery version: v{}", - version - ), - start_ts: start_connect_ts, - end_ts: monitor_timestamp(), - ..Default::default() - }; - update.logs.push(connect_log); - update_update(update.clone()).await?; - return Ok(( - periphery, - BuildCleanupData::Aws { - instance_id, - region: config.region, - }, - )); - } - res = version; - tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)) - .await; - } - - // Spawn terminate task in failure case (if loop is passed without return) - tokio::spawn(async move { - let _ = - terminate_ec2_instance_with_retry(config.region, &instance_id) - .await; - }); - - // Unwrap is safe, only way to get here is after check Ok / early return, so it must be err - Err( - res.err().unwrap().context( - "failed to start usable builder. terminating instance.", - ), - ) -} - -#[instrument(skip(periphery, update))] -async fn cleanup_builder_instance( - periphery: PeripheryClient, - cleanup_data: BuildCleanupData, - update: &mut Update, -) { - match cleanup_data { - BuildCleanupData::Server { repo_name } => { - let _ = periphery - .request(api::git::DeleteRepo { name: repo_name }) - .await; - } - BuildCleanupData::Aws { - instance_id, - region, - } => { - let _instance_id = instance_id.clone(); - tokio::spawn(async move { - let _ = - terminate_ec2_instance_with_retry(region, &_instance_id) - .await; - }); - update.push_simple_log( - "terminate instance", - format!("termination queued for instance id {instance_id}"), - ); - } + Ok(update) } } @@ -759,38 +624,6 @@ async fn handle_post_build_redeploy(build_id: &str) { } } -fn start_aws_builder_log( - instance_id: &str, - ip: &str, - config: &AwsBuilderConfig, -) -> String { - let AwsBuilderConfig { - ami_id, - instance_type, - volume_gb, - subnet_id, - assign_public_ip, - security_group_ids, - use_public_ip, - .. - } = config; - - let readable_sec_group_ids = security_group_ids.join(", "); - - [ - format!("{}: {instance_id}", muted("instance id")), - format!("{}: {ip}", muted("ip")), - format!("{}: {ami_id}", muted("ami id")), - format!("{}: {instance_type}", muted("instance type")), - format!("{}: {volume_gb} GB", muted("volume size")), - format!("{}: {subnet_id}", muted("subnet id")), - format!("{}: {readable_sec_group_ids}", muted("security groups")), - format!("{}: {assign_public_ip}", muted("assign public ip")), - format!("{}: {use_public_ip}", muted("use public ip")), - ] - .join("\n") -} - /// This will make sure that a build with non-none image registry has an account attached, /// and will check the core config for a token / aws ecr config matching requirements. /// Otherwise it is left to periphery. @@ -802,6 +635,7 @@ async fn validate_account_extract_registry_token_aws_ecr( ImageRegistry::None(_) => return Ok((None, None)), // Early return for AwsEcr ImageRegistry::AwsEcr(label) => { + // Note that aws ecr config still only lives in config file let config = core_config() .aws_ecr_registries .iter() @@ -847,18 +681,9 @@ async fn validate_account_extract_registry_token_aws_ecr( )); } - Ok(( - core_config() - .docker_registries - .iter() - .find(|provider| provider.domain == domain) - .and_then(|provider| { - provider - .accounts - .iter() - .find(|_account| &_account.username == account) - .map(|account| account.token.clone()) - }), - None, - )) + let registry_token = registry_token(domain, account).await.with_context( + || format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"), + )?; + + Ok((registry_token, None)) } diff --git a/bin/core/src/api/execute/deployment.rs b/bin/core/src/api/execute/deployment.rs index 32cd50f8c..079674e3c 100644 --- a/bin/core/src/api/execute/deployment.rs +++ b/bin/core/src/api/execute/deployment.rs @@ -1,25 +1,22 @@ -use std::collections::HashSet; - use anyhow::{anyhow, Context}; use formatting::format_serror; -use futures::future::join_all; use monitor_client::{ api::execute::*, entities::{ build::{Build, ImageRegistry}, config::core::AwsEcrConfig, deployment::{ - extract_registry_domain, Deployment, DeploymentImage, + extract_registry_domain, Deployment, DeploymentActionState, + DeploymentImage, }, get_image_name, permission::PermissionLevel, - server::ServerState, + server::{Server, ServerState}, update::{Log, Update}, user::User, Version, }, }; -use mungos::{find::find_collect, mongodb::bson::doc}; use periphery_client::api; use resolver_api::Resolve; @@ -27,16 +24,51 @@ use crate::{ cloud::aws::ecr, config::core_config, helpers::{ - periphery_client, - query::{get_global_variables, get_server_with_status}, + interpolate_variables_secrets_into_environment, periphery_client, + query::get_server_with_status, registry_token, update::update_update, }, monitor::update_cache_for_server, resource, - state::{action_states, db_client, State}, + state::{action_states, State}, }; -use crate::helpers::update::init_execution_update; +async fn setup_deployment_execution( + deployment: &str, + user: &User, + set_in_progress: impl Fn(&mut DeploymentActionState), +) -> anyhow::Result<(Deployment, Server)> { + let deployment = resource::get_check_permissions::( + deployment, + user, + PermissionLevel::Execute, + ) + .await?; + + if deployment.config.server_id.is_empty() { + return Err(anyhow!("deployment has no server configured")); + } + + // get the action state for the deployment (or insert default). + let action_state = action_states() + .deployment + .get_or_insert_default(&deployment.id) + .await; + + // Will check to ensure deployment not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state.update(set_in_progress)?; + + let (server, status) = + get_server_with_status(&deployment.config.server_id).await?; + if status != ServerState::Ok { + return Err(anyhow!( + "cannot send action when server is unreachable or disabled" + )); + } + + Ok((deployment, server)) +} impl Resolve for State { #[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] @@ -49,191 +81,113 @@ impl Resolve for State { }: Deploy, (user, mut update): (User, Update), ) -> anyhow::Result { - let mut deployment = - resource::get_check_permissions::( - &deployment, - &user, - PermissionLevel::Execute, - ) + let (mut deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.deploying = true + }) .await?; - if deployment.config.server_id.is_empty() { - return Err(anyhow!("deployment has no server configured")); - } - - // get the action state for the deployment (or insert default). - let action_state = action_states() - .deployment - .get_or_insert_default(&deployment.id) - .await; - - // Will check to ensure deployment not already busy before updating, and return Err if so. - // The returned guard will set the action state back to default when dropped. - let _action_guard = - action_state.update(|state| state.deploying = true)?; - - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } - let periphery = periphery_client(&server)?; - let (version, registry_token, aws_ecr) = - match &deployment.config.image { - DeploymentImage::Build { build_id, version } => { - let build = resource::get::(build_id).await?; - let image_name = get_image_name(&build, |label| { - core_config() + let (version, registry_token, aws_ecr) = match &deployment + .config + .image + { + DeploymentImage::Build { build_id, version } => { + let build = resource::get::(build_id).await?; + let image_name = get_image_name(&build, |label| { + core_config() + .aws_ecr_registries + .iter() + .find(|reg| ®.label == label) + .map(AwsEcrConfig::from) + }) + .context("failed to create image name")?; + let version = if version.is_none() { + build.config.version + } else { + *version + }; + // Remove ending patch if it is 0, this means use latest patch. + let version_str = if version.patch == 0 { + format!("{}.{}", version.major, version.minor) + } else { + version.to_string() + }; + // Potentially add the build image_tag postfix + let version_str = if build.config.image_tag.is_empty() { + version_str + } else { + format!("{version_str}-{}", build.config.image_tag) + }; + // replace image with corresponding build image. + deployment.config.image = DeploymentImage::Image { + image: format!("{image_name}:{version_str}"), + }; + match build.config.image_registry { + ImageRegistry::None(_) => (version, None, None), + ImageRegistry::AwsEcr(label) => { + let config = core_config() .aws_ecr_registries .iter() - .find(|reg| ®.label == label) - .map(AwsEcrConfig::from) - }) - .context("failed to create image name")?; - let version = if version.is_none() { - build.config.version - } else { - *version - }; - // Remove ending patch if it is 0, this means use latest patch. - let version_str = if version.patch == 0 { - format!("{}.{}", version.major, version.minor) - } else { - version.to_string() - }; - // replace image with corresponding build image. - deployment.config.image = DeploymentImage::Image { - image: format!("{image_name}:{version_str}"), - }; - match build.config.image_registry { - ImageRegistry::None(_) => (version, None, None), - ImageRegistry::AwsEcr(label) => { - let config = core_config() - .aws_ecr_registries - .iter() - .find(|reg| reg.label == label) - .with_context(|| { - format!( + .find(|reg| reg.label == label) + .with_context(|| { + format!( "did not find config for aws ecr registry {label}" ) - })?; - let token = ecr::get_ecr_token( - &config.region, - &config.access_key_id, - &config.secret_access_key, - ) - .await - .context("failed to create aws ecr login token")?; - (version, Some(token), Some(AwsEcrConfig::from(config))) - } - ImageRegistry::Standard(params) => { - if deployment.config.image_registry_account.is_empty() { - deployment.config.image_registry_account = - params.account - } - let token = core_config() - .docker_registries - .iter() - .find(|registry| registry.domain == params.domain) - .and_then(|provider| { - provider - .accounts - .iter() - .find(|account| { - account.username - == deployment.config.image_registry_account - }) - .map(|account| account.token.clone()) - }); - (version, token, None) + })?; + let token = ecr::get_ecr_token( + &config.region, + &config.access_key_id, + &config.secret_access_key, + ) + .await + .context("failed to create aws ecr login token")?; + (version, Some(token), Some(AwsEcrConfig::from(config))) + } + ImageRegistry::Standard(params) => { + if deployment.config.image_registry_account.is_empty() { + deployment.config.image_registry_account = + params.account } + let token = if !deployment + .config + .image_registry_account + .is_empty() + { + registry_token(¶ms.domain, &deployment.config.image_registry_account).await.with_context( + || format!("Failed to get git token in call to db. Stopping run. | {} | {}", params.domain, deployment.config.image_registry_account), + )? + } else { + None + }; + (version, token, None) } } - DeploymentImage::Image { image } => { - let domain = extract_registry_domain(image)?; - let token = - (!deployment.config.image_registry_account.is_empty()) - .then(|| { - core_config() - .docker_registries - .iter() - .find(|registry| registry.domain == domain) - .and_then(|provider| { - provider - .accounts - .iter() - .find(|account| { - account.username - == deployment.config.image_registry_account - }) - .map(|account| account.token.clone()) - }) - }) - .flatten(); - (Version::default(), token, None) - } - }; + } + DeploymentImage::Image { image } => { + let domain = extract_registry_domain(image)?; + let token = if !deployment + .config + .image_registry_account + .is_empty() + { + registry_token(&domain, &deployment.config.image_registry_account).await.with_context( + || format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account), + )? + } else { + None + }; + (Version::default(), token, None) + } + }; let secret_replacers = if !deployment.config.skip_secret_interp { - // Interpolate variables into environment - let variables = get_global_variables().await?; - let core_config = core_config(); - - let mut global_replacers = HashSet::new(); - let mut secret_replacers = HashSet::new(); - - for env in &mut deployment.config.environment { - // first pass - global variables - let (res, more_replacers) = svi::interpolate_variables( - &env.value, - &variables, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate global variables")?; - global_replacers.extend(more_replacers); - // second pass - core secrets - let (res, more_replacers) = svi::interpolate_variables( - &res, - &core_config.secrets, - svi::Interpolator::DoubleBrackets, - false, - ) - .context("failed to interpolate core secrets")?; - secret_replacers.extend(more_replacers); - - // set env value with the result - env.value = res; - } - - // Show which variables were interpolated - if !global_replacers.is_empty() { - update.push_simple_log( - "interpolate global variables", - global_replacers - .into_iter() - .map(|(value, variable)| format!("{variable} => {value}")) - .collect::>() - .join("\n"), - ); - } - - if !secret_replacers.is_empty() { - update.push_simple_log( - "interpolate core secrets", - secret_replacers - .iter() - .map(|(_, variable)| format!("replaced: {variable}")) - .collect::>() - .join("\n"), - ); - } - - secret_replacers + interpolate_variables_secrets_into_environment( + &mut deployment.config.environment, + &mut update, + ) + .await? } else { Default::default() }; @@ -279,35 +233,11 @@ impl Resolve for State { StartContainer { deployment }: StartContainer, (user, mut update): (User, Update), ) -> anyhow::Result { - let deployment = resource::get_check_permissions::( - &deployment, - &user, - PermissionLevel::Execute, - ) - .await?; - - // get the action state for the deployment (or insert default). - let action_state = action_states() - .deployment - .get_or_insert_default(&deployment.id) - .await; - - // Will check to ensure deployment not already busy before updating, and return Err if so. - // The returned guard will set the action state back to default when dropped. - let _action_guard = - action_state.update(|state| state.starting = true)?; - - if deployment.config.server_id.is_empty() { - return Err(anyhow!("deployment has no server configured")); - } - - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.starting = true + }) + .await?; let periphery = periphery_client(&server)?; @@ -333,6 +263,121 @@ impl Resolve for State { } } +impl Resolve for State { + #[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + RestartContainer { deployment }: RestartContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.restarting = true + }) + .await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::RestartContainer { + name: deployment.name.clone(), + }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "restart container", + format_serror( + &e.context("failed to restart container").into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PauseContainer { deployment }: PauseContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.pausing = true + }) + .await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::PauseContainer { + name: deployment.name.clone(), + }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "pause container", + format_serror(&e.context("failed to pause container").into()), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + UnpauseContainer { deployment }: UnpauseContainer, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.unpausing = true + }) + .await?; + + let periphery = periphery_client(&server)?; + + let log = match periphery + .request(api::container::UnpauseContainer { + name: deployment.name.clone(), + }) + .await + { + Ok(log) => log, + Err(e) => Log::error( + "unpause container", + format_serror( + &e.context("failed to unpause container").into(), + ), + ), + }; + + update.logs.push(log); + update_cache_for_server(&server).await; + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + impl Resolve for State { #[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( @@ -344,35 +389,11 @@ impl Resolve for State { }: StopContainer, (user, mut update): (User, Update), ) -> anyhow::Result { - let deployment = resource::get_check_permissions::( - &deployment, - &user, - PermissionLevel::Execute, - ) - .await?; - - // get the action state for the deployment (or insert default). - let action_state = action_states() - .deployment - .get_or_insert_default(&deployment.id) - .await; - - // Will check to ensure deployment not already busy before updating, and return Err if so. - // The returned guard will set the action state back to default when dropped. - let _action_guard = - action_state.update(|state| state.stopping = true)?; - - if deployment.config.server_id.is_empty() { - return Err(anyhow!("deployment has no server configured")); - } - - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.stopping = true + }) + .await?; let periphery = periphery_client(&server)?; @@ -404,94 +425,6 @@ impl Resolve for State { } } -impl Resolve for State { - #[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] - async fn resolve( - &self, - StopAllContainers { server }: StopAllContainers, - (user, mut update): (User, Update), - ) -> anyhow::Result { - let (server, status) = get_server_with_status(&server).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } - - // get the action state for the server (or insert default). - let action_state = action_states() - .server - .get_or_insert_default(&server.id) - .await; - - // Will check to ensure server not already busy before updating, and return Err if so. - // The returned guard will set the action state back to default when dropped. - let _action_guard = action_state - .update(|state| state.stopping_containers = true)?; - - let deployments = find_collect( - &db_client().await.deployments, - doc! { - "config.server_id": &server.id - }, - None, - ) - .await - .context("failed to find deployments on server")?; - - let futures = deployments.iter().map(|deployment| async { - let req = super::ExecuteRequest::StopContainer(StopContainer { - deployment: deployment.id.clone(), - signal: None, - time: None, - }); - ( - async { - let update = init_execution_update(&req, &user).await?; - State - .resolve( - StopContainer { - deployment: deployment.id.clone(), - signal: None, - time: None, - }, - (user.clone(), update), - ) - .await - } - .await, - deployment.name.clone(), - deployment.id.clone(), - ) - }); - let results = join_all(futures).await; - let deployment_names = deployments - .iter() - .map(|d| format!("{} ({})", d.name, d.id)) - .collect::>() - .join("\n"); - update.push_simple_log("stopping containers", deployment_names); - for (res, name, id) in results { - if let Err(e) = res { - update.push_error_log( - "stop container failure", - format_serror( - &e.context(format!( - "failed to stop container {name} ({id})" - )) - .into(), - ), - ); - } - } - - update.finalize(); - update_update(update.clone()).await?; - - Ok(update) - } -} - impl Resolve for State { #[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( @@ -503,35 +436,11 @@ impl Resolve for State { }: RemoveContainer, (user, mut update): (User, Update), ) -> anyhow::Result { - let deployment = resource::get_check_permissions::( - &deployment, - &user, - PermissionLevel::Execute, - ) - .await?; - - // get the action state for the deployment (or insert default). - let action_state = action_states() - .deployment - .get_or_insert_default(&deployment.id) - .await; - - // Will check to ensure deployment not already busy before updating, and return Err if so. - // The returned guard will set the action state back to default when dropped. - let _action_guard = - action_state.update(|state| state.removing = true)?; - - if deployment.config.server_id.is_empty() { - return Err(anyhow!("deployment has no server configured")); - } - - let (server, status) = - get_server_with_status(&deployment.config.server_id).await?; - if status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } + let (deployment, server) = + setup_deployment_execution(&deployment, &user, |state| { + state.removing = true + }) + .await?; let periphery = periphery_client(&server)?; diff --git a/bin/core/src/api/execute/mod.rs b/bin/core/src/api/execute/mod.rs index 20ad2769a..8401536fc 100644 --- a/bin/core/src/api/execute/mod.rs +++ b/bin/core/src/api/execute/mod.rs @@ -29,6 +29,7 @@ mod procedure; mod repo; mod server; mod server_template; +mod stack; mod sync; #[typeshare] @@ -38,6 +39,7 @@ mod sync; #[serde(tag = "type", content = "params")] pub enum ExecuteRequest { // ==== SERVER ==== + StopAllContainers(StopAllContainers), PruneContainers(PruneContainers), PruneImages(PruneImages), PruneNetworks(PruneNetworks), @@ -45,10 +47,21 @@ pub enum ExecuteRequest { // ==== DEPLOYMENT ==== Deploy(Deploy), StartContainer(StartContainer), + RestartContainer(RestartContainer), + PauseContainer(PauseContainer), + UnpauseContainer(UnpauseContainer), StopContainer(StopContainer), - StopAllContainers(StopAllContainers), RemoveContainer(RemoveContainer), + // ==== STACK ==== + DeployStack(DeployStack), + StartStack(StartStack), + RestartStack(RestartStack), + StopStack(StopStack), + PauseStack(PauseStack), + UnpauseStack(UnpauseStack), + DestroyStack(DestroyStack), + // ==== BUILD ==== RunBuild(RunBuild), CancelBuild(CancelBuild), @@ -56,6 +69,8 @@ pub enum ExecuteRequest { // ==== REPO ==== CloneRepo(CloneRepo), PullRepo(PullRepo), + BuildRepo(BuildRepo), + CancelRepoBuild(CancelRepoBuild), // ==== PROCEDURE ==== RunProcedure(RunProcedure), diff --git a/bin/core/src/api/execute/repo.rs b/bin/core/src/api/execute/repo.rs index 481ba7fa1..ce00be504 100644 --- a/bin/core/src/api/execute/repo.rs +++ b/bin/core/src/api/execute/repo.rs @@ -1,30 +1,45 @@ -use anyhow::anyhow; +use std::{future::IntoFuture, time::Duration}; + +use anyhow::{anyhow, Context}; use formatting::format_serror; use monitor_client::{ api::execute::*, entities::{ + alert::{Alert, AlertData}, + builder::{Builder, BuilderConfig}, monitor_timestamp, optional_string, permission::PermissionLevel, repo::Repo, - server::Server, + server::{stats::SeverityLevel, Server}, update::{Log, Update}, user::User, }, }; use mungos::{ by_id::update_one_by_id, - mongodb::bson::{doc, to_document}, + mongodb::{ + bson::{doc, to_document}, + options::FindOneOptions, + }, }; -use periphery_client::api; +use periphery_client::api::{self, git::RepoActionResponseV1_13}; use resolver_api::Resolve; +use tokio_util::sync::CancellationToken; use crate::{ - config::core_config, - helpers::{periphery_client, update::update_update}, + helpers::{ + alert::send_alerts, + builder::{cleanup_builder_instance, get_builder_periphery}, + channel::repo_cancel_channel, + git_token, periphery_client, + update::update_update, + }, resource::{self, refresh_repo_state_cache}, state::{action_states, db_client, State}, }; +use super::ExecuteRequest; + impl Resolve for State { #[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( @@ -39,6 +54,16 @@ impl Resolve for State { ) .await?; + let git_token = git_token( + &repo.config.git_provider, + &repo.config.git_account, + |https| repo.config.git_https = https, + ) + .await + .with_context( + || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account), + )?; + // get the action state for the repo (or insert default). let action_state = action_states().repo.get_or_insert_default(&repo.id).await; @@ -57,27 +82,20 @@ impl Resolve for State { let periphery = periphery_client(&server)?; - let git_token = core_config() - .git_providers - .iter() - .find(|provider| provider.domain == repo.config.git_provider) - .and_then(|provider| { - repo.config.git_https = provider.https; - provider - .accounts - .iter() - .find(|account| account.username == repo.config.git_account) - .map(|account| account.token.clone()) - }); - let logs = match periphery .request(api::git::CloneRepo { args: (&repo).into(), git_token, + environment: repo.config.environment, + env_file_path: repo.config.env_file_path, + skip_secret_interp: repo.config.skip_secret_interp, }) .await { - Ok(logs) => logs, + Ok(res) => { + let res: RepoActionResponseV1_13 = res.into(); + res.logs + } Err(e) => { vec![Log::error( "clone repo", @@ -93,7 +111,7 @@ impl Resolve for State { update_last_pulled_time(&repo.name).await; } - handle_update_return(update).await + handle_server_update_return(update).await } } @@ -135,10 +153,17 @@ impl Resolve for State { branch: optional_string(&repo.config.branch), commit: optional_string(&repo.config.commit), on_pull: repo.config.on_pull.into_option(), + environment: repo.config.environment, + env_file_path: repo.config.env_file_path, + skip_secret_interp: repo.config.skip_secret_interp, }) .await { - Ok(logs) => logs, + Ok(res) => { + let res: RepoActionResponseV1_13 = res.into(); + update.commit_hash = res.commit_hash.unwrap_or_default(); + res.logs + } Err(e) => { vec![Log::error( "pull repo", @@ -155,12 +180,12 @@ impl Resolve for State { update_last_pulled_time(&repo.name).await; } - handle_update_return(update).await + handle_server_update_return(update).await } } #[instrument(skip_all, fields(update_id = update.id))] -async fn handle_update_return( +async fn handle_server_update_return( update: Update, ) -> anyhow::Result { // Need to manually update the update before cache refresh, @@ -197,3 +222,364 @@ async fn update_last_pulled_time(repo_name: &str) { ); } } + +impl Resolve for State { + #[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + BuildRepo { repo }: BuildRepo, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let mut repo = resource::get_check_permissions::( + &repo, + &user, + PermissionLevel::Execute, + ) + .await?; + + if repo.config.builder_id.is_empty() { + return Err(anyhow!("Must attach builder to BuildRepo")); + } + + let git_token = git_token( + &repo.config.git_provider, + &repo.config.git_account, + |https| repo.config.git_https = https, + ) + .await + .with_context( + || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account), + )?; + + // get the action state for the repo (or insert default). + let action_state = + action_states().repo.get_or_insert_default(&repo.id).await; + + // This will set action state back to default when dropped. + // Will also check to ensure repo not already busy before updating. + let _action_guard = + action_state.update(|state| state.building = true)?; + + let cancel = CancellationToken::new(); + let cancel_clone = cancel.clone(); + let mut cancel_recv = + repo_cancel_channel().receiver.resubscribe(); + let repo_id = repo.id.clone(); + + let builder = + resource::get::(&repo.config.builder_id).await?; + + let is_server_builder = + matches!(&builder.config, BuilderConfig::Server(_)); + + tokio::spawn(async move { + let poll = async { + loop { + let (incoming_repo_id, mut update) = tokio::select! { + _ = cancel_clone.cancelled() => return Ok(()), + id = cancel_recv.recv() => id? + }; + if incoming_repo_id == repo_id { + if is_server_builder { + update.push_error_log("Cancel acknowledged", "Repo Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature."); + } else { + update.push_simple_log("Cancel acknowledged", "The repo build cancellation has been queued, it may still take some time."); + } + update.finalize(); + let id = update.id.clone(); + if let Err(e) = update_update(update).await { + warn!("failed to modify Update {id} on db | {e:#}"); + } + if !is_server_builder { + cancel_clone.cancel(); + } + return Ok(()); + } + } + #[allow(unreachable_code)] + anyhow::Ok(()) + }; + tokio::select! { + _ = cancel_clone.cancelled() => {} + _ = poll => {} + } + }); + + // GET BUILDER PERIPHERY + + let (periphery, cleanup_data) = match get_builder_periphery( + repo.name.clone(), + None, + builder, + &mut update, + ) + .await + { + Ok(builder) => builder, + Err(e) => { + warn!("failed to get builder for repo {} | {e:#}", repo.name); + update.logs.push(Log::error( + "get builder", + format_serror(&e.context("failed to get builder").into()), + )); + return handle_builder_early_return( + update, repo.id, repo.name, false, + ) + .await; + } + }; + + // CLONE REPO + + let res = tokio::select! { + res = periphery + .request(api::git::CloneRepo { + args: (&repo).into(), + git_token, + environment: Default::default(), + env_file_path: Default::default(), + skip_secret_interp: Default::default(), + }) => res, + _ = cancel.cancelled() => { + debug!("build cancelled during clone, cleaning up builder"); + update.push_error_log("build cancelled", String::from("user cancelled build during repo clone")); + cleanup_builder_instance(periphery, cleanup_data, &mut update) + .await; + info!("builder cleaned up"); + return handle_builder_early_return(update, repo.id, repo.name, true).await + }, + }; + + let commit_message = match res { + Ok(res) => { + debug!("finished repo clone"); + let res: RepoActionResponseV1_13 = res.into(); + update.logs.extend(res.logs); + update.commit_hash = res.commit_hash.unwrap_or_default(); + res.commit_message.unwrap_or_default() + } + Err(e) => { + update.push_error_log( + "clone repo", + format_serror(&e.context("failed to clone repo").into()), + ); + Default::default() + } + }; + + update.finalize(); + + let db = db_client().await; + + if update.success { + let _ = db + .repos + .update_one( + doc! { "name": &repo.name }, + doc! { "$set": { + "info.last_built_at": monitor_timestamp(), + "info.built_hash": &update.commit_hash, + "info.built_message": commit_message + }}, + ) + .await; + } + + // stop the cancel listening task from going forever + cancel.cancel(); + + cleanup_builder_instance(periphery, cleanup_data, &mut update) + .await; + + // Need to manually update the update before cache refresh, + // and before broadcast with add_update. + // The Err case of to_document should be unreachable, + // but will fail to update cache in that case. + if let Ok(update_doc) = to_document(&update) { + let _ = update_one_by_id( + &db.updates, + &update.id, + mungos::update::Update::Set(update_doc), + None, + ) + .await; + refresh_repo_state_cache().await; + } + + update_update(update.clone()).await?; + + if !update.success { + warn!("repo build unsuccessful, alerting..."); + let target = update.target.clone(); + tokio::spawn(async move { + let alert = Alert { + id: Default::default(), + target, + ts: monitor_timestamp(), + resolved_ts: Some(monitor_timestamp()), + resolved: true, + level: SeverityLevel::Warning, + data: AlertData::RepoBuildFailed { + id: repo.id, + name: repo.name, + }, + }; + send_alerts(&[alert]).await + }); + } + + Ok(update) + } +} + +#[instrument(skip(update))] +async fn handle_builder_early_return( + mut update: Update, + repo_id: String, + repo_name: String, + is_cancel: bool, +) -> anyhow::Result { + update.finalize(); + // Need to manually update the update before cache refresh, + // and before broadcast with add_update. + // The Err case of to_document should be unreachable, + // but will fail to update cache in that case. + if let Ok(update_doc) = to_document(&update) { + let _ = update_one_by_id( + &db_client().await.updates, + &update.id, + mungos::update::Update::Set(update_doc), + None, + ) + .await; + refresh_repo_state_cache().await; + } + update_update(update.clone()).await?; + if !update.success && !is_cancel { + warn!("repo build unsuccessful, alerting..."); + let target = update.target.clone(); + tokio::spawn(async move { + let alert = Alert { + id: Default::default(), + target, + ts: monitor_timestamp(), + resolved_ts: Some(monitor_timestamp()), + resolved: true, + level: SeverityLevel::Warning, + data: AlertData::RepoBuildFailed { + id: repo_id, + name: repo_name, + }, + }; + send_alerts(&[alert]).await + }); + } + Ok(update) +} + +#[instrument(skip_all)] +pub async fn validate_cancel_repo_build( + request: &ExecuteRequest, +) -> anyhow::Result<()> { + if let ExecuteRequest::CancelRepoBuild(req) = request { + let repo = resource::get::(&req.repo).await?; + + let db = db_client().await; + + let (latest_build, latest_cancel) = tokio::try_join!( + db.updates + .find_one(doc! { + "operation": "BuildRepo", + "target.id": &repo.id, + },) + .with_options( + FindOneOptions::builder() + .sort(doc! { "start_ts": -1 }) + .build() + ) + .into_future(), + db.updates + .find_one(doc! { + "operation": "CancelRepoBuild", + "target.id": &repo.id, + },) + .with_options( + FindOneOptions::builder() + .sort(doc! { "start_ts": -1 }) + .build() + ) + .into_future() + )?; + + match (latest_build, latest_cancel) { + (Some(build), Some(cancel)) => { + if cancel.start_ts > build.start_ts { + return Err(anyhow!( + "Repo build has already been cancelled" + )); + } + } + (None, _) => return Err(anyhow!("No repo build in progress")), + _ => {} + }; + } + Ok(()) +} + +impl Resolve for State { + #[instrument(name = "CancelRepoBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + CancelRepoBuild { repo }: CancelRepoBuild, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let repo = resource::get_check_permissions::( + &repo, + &user, + PermissionLevel::Execute, + ) + .await?; + + // make sure the build is building + if !action_states() + .repo + .get(&repo.id) + .await + .and_then(|s| s.get().ok().map(|s| s.building)) + .unwrap_or_default() + { + return Err(anyhow!("Repo is not building.")); + } + + update.push_simple_log( + "cancel triggered", + "the repo build cancel has been triggered", + ); + update_update(update.clone()).await?; + + repo_cancel_channel() + .sender + .lock() + .await + .send((repo.id, update.clone()))?; + + // Make sure cancel is set to complete after some time in case + // no reciever is there to do it. Prevents update stuck in InProgress. + let update_id = update.id.clone(); + tokio::spawn(async move { + tokio::time::sleep(Duration::from_secs(60)).await; + if let Err(e) = update_one_by_id( + &db_client().await.updates, + &update_id, + doc! { "$set": { "status": "Complete" } }, + None, + ) + .await + { + warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}") + } + }); + + Ok(update) + } +} diff --git a/bin/core/src/api/execute/server.rs b/bin/core/src/api/execute/server.rs index 00fe0a356..4a73dce7c 100644 --- a/bin/core/src/api/execute/server.rs +++ b/bin/core/src/api/execute/server.rs @@ -1,11 +1,11 @@ -use anyhow::Context; +use anyhow::{anyhow, Context}; use formatting::format_serror; use monitor_client::{ api::execute::*, entities::{ - monitor_timestamp, + all_logs_success, monitor_timestamp, permission::PermissionLevel, - server::Server, + server::{Server, ServerState}, update::{Log, Update, UpdateStatus}, user::User, }, @@ -14,11 +14,57 @@ use periphery_client::api; use resolver_api::Resolve; use crate::{ - helpers::{periphery_client, update::update_update}, + helpers::{ + periphery_client, query::get_server_with_status, + update::update_update, + }, resource, state::{action_states, State}, }; +impl Resolve for State { + #[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StopAllContainers { server }: StopAllContainers, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let (server, status) = get_server_with_status(&server).await?; + if status != ServerState::Ok { + return Err(anyhow!( + "cannot send action when server is unreachable or disabled" + )); + } + + // get the action state for the server (or insert default). + let action_state = action_states() + .server + .get_or_insert_default(&server.id) + .await; + + // Will check to ensure server not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state + .update(|state| state.stopping_containers = true)?; + + let logs = periphery_client(&server)? + .request(api::container::StopAllContainers {}) + .await + .context("failed to stop all container on host")?; + + update.logs.extend(logs); + + if all_logs_success(&update.logs) { + update.push_simple_log("stop all containers", String::from("All containers have successfully been stopped on the host.")); + } + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + impl Resolve for State { #[instrument(name = "PruneContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] async fn resolve( diff --git a/bin/core/src/api/execute/stack.rs b/bin/core/src/api/execute/stack.rs new file mode 100644 index 000000000..8eabe4ab2 --- /dev/null +++ b/bin/core/src/api/execute/stack.rs @@ -0,0 +1,353 @@ +use anyhow::Context; +use formatting::format_serror; +use monitor_client::{ + api::execute::*, + entities::{ + permission::PermissionLevel, stack::StackInfo, update::Update, + user::User, + }, +}; +use mungos::mongodb::bson::{doc, to_document}; +use periphery_client::api::compose::*; +use resolver_api::Resolve; + +use crate::{ + helpers::{ + interpolate_variables_secrets_into_environment, periphery_client, + stack::{ + execute::execute_compose, get_stack_and_server, + json::get_config_jsons, services::extract_services_into_res, + }, + update::update_update, + }, + monitor::update_cache_for_server, + state::{action_states, db_client, State}, +}; + +impl Resolve for State { + #[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DeployStack { stack, stop_time }: DeployStack, + (user, mut update): (User, Update), + ) -> anyhow::Result { + let (mut stack, server) = get_stack_and_server( + &stack, + &user, + PermissionLevel::Execute, + true, + ) + .await?; + + // get the action state for the stack (or insert default). + let action_state = + action_states().stack.get_or_insert_default(&stack.id).await; + + // Will check to ensure stack not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = + action_state.update(|state| state.deploying = true)?; + + let git_token = crate::helpers::git_token( + &stack.config.git_provider, + &stack.config.git_account, + |https| stack.config.git_https = https, + ).await.with_context( + || format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account), + )?; + + let registry_token = crate::helpers::registry_token( + &stack.config.registry_provider, + &stack.config.registry_account, + ).await.with_context( + || format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account), + )?; + + if !stack.config.skip_secret_interp { + interpolate_variables_secrets_into_environment( + &mut stack.config.environment, + &mut update, + ) + .await?; + } + + let ComposeUpResponse { + logs, + deployed, + file_contents, + missing_files, + remote_errors, + commit_hash, + commit_message, + } = periphery_client(&server)? + .request(ComposeUp { + stack: stack.clone(), + service: None, + git_token, + registry_token, + }) + .await?; + + update.logs.extend(logs); + + let update_info = async { + let (latest_services, json, json_errors) = if !file_contents + .is_empty() + { + let (jsons, json_errors) = + get_config_jsons(&file_contents).await; + let mut services = Vec::new(); + for contents in &file_contents { + if let Err(e) = extract_services_into_res( + &stack.project_name(true), + &contents.contents, + &mut services, + ) { + update.push_error_log( + "extract services", + format_serror(&e.context(format!("Failed to extract stack services for compose file path {}. Things probably won't work correctly", contents.path)).into()) + ); + } + } + (services, jsons, json_errors) + } else { + // maybe better to do something else here for services. + (stack.info.latest_services.clone(), Vec::new(), Vec::new()) + }; + + let project_name = stack.project_name(true); + + let ( + deployed_services, + deployed_contents, + deployed_json, + deployed_json_errors, + deployed_hash, + deployed_message, + ) = if deployed { + ( + Some(latest_services.clone()), + Some(file_contents.clone()), + Some(json.clone()), + Some(json_errors.clone()), + commit_hash.clone(), + commit_message.clone(), + ) + } else { + ( + stack.info.deployed_services, + stack.info.deployed_contents, + stack.info.deployed_json, + stack.info.deployed_json_errors, + stack.info.deployed_hash, + stack.info.deployed_message, + ) + }; + + let info = StackInfo { + missing_files, + deployed_project_name: project_name.into(), + deployed_services, + deployed_contents, + deployed_hash, + deployed_message, + deployed_json, + deployed_json_errors, + latest_services, + latest_json: json, + latest_json_errors: json_errors, + remote_contents: stack + .config + .file_contents + .is_empty() + .then_some(file_contents), + remote_errors: stack + .config + .file_contents + .is_empty() + .then_some(remote_errors), + latest_hash: commit_hash, + latest_message: commit_message, + }; + + let info = to_document(&info) + .context("failed to serialize stack info to bson")?; + + db_client() + .await + .stacks + .update_one( + doc! { "name": &stack.name }, + doc! { "$set": { "info": info } }, + ) + .await + .context("failed to update stack info on db")?; + anyhow::Ok(()) + }; + + // This will be weird with single service deploys. Come back to it. + if let Err(e) = update_info.await { + update.push_error_log( + "refresh stack info", + format_serror( + &e.context("failed to refresh stack info on db").into(), + ), + ) + } + + // Ensure cached stack state up to date by updating server cache + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StartStack { stack, service }: StartStack, + (user, update): (User, Update), + ) -> anyhow::Result { + let no_service = service.is_none(); + execute_compose::( + &stack, + service, + &user, + |state| { + if no_service { + state.starting = true + } + }, + update, + (), + ) + .await + } +} + +impl Resolve for State { + #[instrument(name = "RestartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + RestartStack { stack, service }: RestartStack, + (user, update): (User, Update), + ) -> anyhow::Result { + let no_service = service.is_none(); + execute_compose::( + &stack, + service, + &user, + |state| { + if no_service { + state.restarting = true; + } + }, + update, + (), + ) + .await + } +} + +impl Resolve for State { + #[instrument(name = "PauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + PauseStack { stack, service }: PauseStack, + (user, update): (User, Update), + ) -> anyhow::Result { + let no_service = service.is_none(); + execute_compose::( + &stack, + service, + &user, + |state| { + if no_service { + state.pausing = true + } + }, + update, + (), + ) + .await + } +} + +impl Resolve for State { + #[instrument(name = "UnpauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + UnpauseStack { stack, service }: UnpauseStack, + (user, update): (User, Update), + ) -> anyhow::Result { + let no_service = service.is_none(); + execute_compose::( + &stack, + service, + &user, + |state| { + if no_service { + state.unpausing = true + } + }, + update, + (), + ) + .await + } +} + +impl Resolve for State { + #[instrument(name = "StopStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + StopStack { + stack, + stop_time, + service, + }: StopStack, + (user, update): (User, Update), + ) -> anyhow::Result { + let no_service = service.is_none(); + execute_compose::( + &stack, + service, + &user, + |state| { + if no_service { + state.stopping = true + } + }, + update, + stop_time, + ) + .await + } +} + +impl Resolve for State { + #[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))] + async fn resolve( + &self, + DestroyStack { + stack, + remove_orphans, + stop_time, + }: DestroyStack, + (user, update): (User, Update), + ) -> anyhow::Result { + execute_compose::( + &stack, + None, + &user, + |state| state.destroying = true, + update, + (stop_time, remove_orphans), + ) + .await + } +} diff --git a/bin/core/src/api/execute/sync.rs b/bin/core/src/api/execute/sync.rs index 3e47f070c..a0b03ce9d 100644 --- a/bin/core/src/api/execute/sync.rs +++ b/bin/core/src/api/execute/sync.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::{anyhow, Context}; use formatting::{colored, format_serror, Color}; use mongo_indexed::doc; @@ -8,12 +10,14 @@ use monitor_client::{ alerter::Alerter, build::Build, builder::Builder, + deployment::Deployment, monitor_timestamp, permission::PermissionLevel, procedure::Procedure, repo::Repo, server::Server, server_template::ServerTemplate, + stack::Stack, update::{Log, Update}, user::{sync_user, User}, }, @@ -25,7 +29,9 @@ use crate::{ helpers::{ query::get_id_to_tags, sync::{ - deployment, + deploy::{ + build_deploy_cache, deploy_from_cache, SyncDeployParams, + }, resource::{ get_updates_for_execution, AllResourcesById, ResourceSync, }, @@ -62,8 +68,28 @@ impl Resolve for State { let resources = res?; - let all_resources = AllResourcesById::load().await?; let id_to_tags = get_id_to_tags(None).await?; + let all_resources = AllResourcesById::load().await?; + + let deployments_by_name = all_resources + .deployments + .values() + .map(|deployment| (deployment.name.clone(), deployment.clone())) + .collect::>(); + let stacks_by_name = all_resources + .stacks + .values() + .map(|stack| (stack.name.clone(), stack.clone())) + .collect::>(); + + let deploy_cache = build_deploy_cache(SyncDeployParams { + deployments: &resources.deployments, + deployment_map: &deployments_by_name, + stacks: &resources.stacks, + stack_map: &stacks_by_name, + all_resources: &all_resources, + }) + .await?; let (servers_to_create, servers_to_update, servers_to_delete) = get_updates_for_execution::( @@ -77,13 +103,21 @@ impl Resolve for State { deployments_to_create, deployments_to_update, deployments_to_delete, - ) = deployment::get_updates_for_execution( + ) = get_updates_for_execution::( resources.deployments, sync.config.delete, &all_resources, &id_to_tags, ) .await?; + let (stacks_to_create, stacks_to_update, stacks_to_delete) = + get_updates_for_execution::( + resources.stacks, + sync.config.delete, + &all_resources, + &id_to_tags, + ) + .await?; let (builds_to_create, builds_to_update, builds_to_delete) = get_updates_for_execution::( resources.builds, @@ -169,7 +203,8 @@ impl Resolve for State { ) .await?; - if resource_syncs_to_create.is_empty() + if deploy_cache.is_empty() + && resource_syncs_to_create.is_empty() && resource_syncs_to_update.is_empty() && resource_syncs_to_delete.is_empty() && server_templates_to_create.is_empty() @@ -181,6 +216,9 @@ impl Resolve for State { && deployments_to_create.is_empty() && deployments_to_update.is_empty() && deployments_to_delete.is_empty() + && stacks_to_create.is_empty() + && stacks_to_update.is_empty() + && stacks_to_delete.is_empty() && builds_to_create.is_empty() && builds_to_update.is_empty() && builds_to_delete.is_empty() @@ -305,15 +343,25 @@ impl Resolve for State { ); // Dependant on server / build - if let Some(res) = deployment::run_updates( - deployments_to_create, - deployments_to_update, - deployments_to_delete, - ) - .await - { - update.logs.extend(res); - } + maybe_extend( + &mut update.logs, + Deployment::run_updates( + deployments_to_create, + deployments_to_update, + deployments_to_delete, + ) + .await, + ); + // stack only depends on server, but maybe will depend on build later. + maybe_extend( + &mut update.logs, + Stack::run_updates( + stacks_to_create, + stacks_to_update, + stacks_to_delete, + ) + .await, + ); // Dependant on everything maybe_extend( @@ -326,6 +374,9 @@ impl Resolve for State { .await, ); + // Execute the deploy cache + deploy_from_cache(deploy_cache, &mut update.logs).await; + let db = db_client().await; if let Err(e) = update_one_by_id( diff --git a/bin/core/src/api/read/alert.rs b/bin/core/src/api/read/alert.rs index fd5952d9c..28f2cbdab 100644 --- a/bin/core/src/api/read/alert.rs +++ b/bin/core/src/api/read/alert.rs @@ -3,7 +3,7 @@ use monitor_client::{ api::read::{ GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse, }, - entities::{update::ResourceTargetVariant, user::User}, + entities::{deployment::Deployment, server::Server, user::User}, }; use mungos::{ by_id::find_one_by_id, @@ -14,7 +14,7 @@ use resolver_api::Resolve; use crate::{ config::core_config, - helpers::query::get_resource_ids_for_user, + resource::get_resource_ids_for_user, state::{db_client, State}, }; @@ -28,16 +28,10 @@ impl Resolve for State { ) -> anyhow::Result { let mut query = query.unwrap_or_default(); if !user.admin && !core_config().transparent_mode { - let server_ids = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Server, - ) - .await?; - let deployment_ids = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Deployment, - ) - .await?; + let server_ids = + get_resource_ids_for_user::(&user).await?; + let deployment_ids = + get_resource_ids_for_user::(&user).await?; query.extend(doc! { "$or": [ { "target.type": "Server", "target.id": { "$in": &server_ids } }, diff --git a/bin/core/src/api/read/alerter.rs b/bin/core/src/api/read/alerter.rs index 2d35b474f..9a825ac8e 100644 --- a/bin/core/src/api/read/alerter.rs +++ b/bin/core/src/api/read/alerter.rs @@ -5,7 +5,6 @@ use monitor_client::{ entities::{ alerter::{Alerter, AlerterListItem}, permission::PermissionLevel, - update::ResourceTargetVariant, user::User, }, }; @@ -13,7 +12,6 @@ use mungos::mongodb::bson::doc; use resolver_api::Resolve; use crate::{ - helpers::query::get_resource_ids_for_user, resource, state::{db_client, State}, }; @@ -59,17 +57,15 @@ impl Resolve for State { GetAlertersSummary {}: GetAlertersSummary, user: User, ) -> anyhow::Result { - let query = match get_resource_ids_for_user( - &user, - ResourceTargetVariant::Alerter, - ) - .await? - { - Some(ids) => doc! { - "_id": { "$in": ids } - }, - None => Document::new(), - }; + let query = + match resource::get_resource_ids_for_user::(&user) + .await? + { + Some(ids) => doc! { + "_id": { "$in": ids } + }, + None => Document::new(), + }; let total = db_client() .await .alerters diff --git a/bin/core/src/api/read/builder.rs b/bin/core/src/api/read/builder.rs index aa269486e..4dad0db6f 100644 --- a/bin/core/src/api/read/builder.rs +++ b/bin/core/src/api/read/builder.rs @@ -5,7 +5,6 @@ use monitor_client::{ entities::{ builder::{Builder, BuilderListItem}, permission::PermissionLevel, - update::ResourceTargetVariant, user::User, }, }; @@ -13,7 +12,6 @@ use mungos::mongodb::bson::doc; use resolver_api::Resolve; use crate::{ - helpers::query::get_resource_ids_for_user, resource, state::{db_client, State}, }; @@ -59,17 +57,15 @@ impl Resolve for State { GetBuildersSummary {}: GetBuildersSummary, user: User, ) -> anyhow::Result { - let query = match get_resource_ids_for_user( - &user, - ResourceTargetVariant::Builder, - ) - .await? - { - Some(ids) => doc! { - "_id": { "$in": ids } - }, - None => Document::new(), - }; + let query = + match resource::get_resource_ids_for_user::(&user) + .await? + { + Some(ids) => doc! { + "_id": { "$in": ids } + }, + None => Document::new(), + }; let total = db_client() .await .builders diff --git a/bin/core/src/api/read/mod.rs b/bin/core/src/api/read/mod.rs index d0fb9aa63..9c40dad80 100644 --- a/bin/core/src/api/read/mod.rs +++ b/bin/core/src/api/read/mod.rs @@ -36,10 +36,12 @@ mod builder; mod deployment; mod permission; mod procedure; +mod provider; mod repo; mod search; mod server; mod server_template; +mod stack; mod sync; mod tag; mod toml; @@ -61,8 +63,8 @@ enum ReadRequest { #[to_string_resolver] ListAwsEcrLabels(ListAwsEcrLabels), ListSecrets(ListSecrets), - ListGitProviders(ListGitProviders), - ListDockerRegistries(ListDockerRegistries), + ListGitProvidersFromConfig(ListGitProvidersFromConfig), + ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig), // ==== USER ==== GetUsername(GetUsername), @@ -99,13 +101,18 @@ enum ReadRequest { GetServer(GetServer), GetServerState(GetServerState), GetPeripheryVersion(GetPeripheryVersion), - GetDockerContainers(GetDockerContainers), - GetDockerImages(GetDockerImages), - GetDockerNetworks(GetDockerNetworks), GetServerActionState(GetServerActionState), GetHistoricalServerStats(GetHistoricalServerStats), ListServers(ListServers), ListFullServers(ListFullServers), + #[to_string_resolver] + ListDockerContainers(ListDockerContainers), + #[to_string_resolver] + ListDockerNetworks(ListDockerNetworks), + #[to_string_resolver] + ListDockerImages(ListDockerImages), + #[to_string_resolver] + ListComposeProjects(ListComposeProjects), // ==== DEPLOYMENT ==== GetDeploymentsSummary(GetDeploymentsSummary), @@ -146,6 +153,18 @@ enum ReadRequest { ListResourceSyncs(ListResourceSyncs), ListFullResourceSyncs(ListFullResourceSyncs), + // ==== STACK ==== + GetStacksSummary(GetStacksSummary), + GetStack(GetStack), + GetStackActionState(GetStackActionState), + GetStackWebhooksEnabled(GetStackWebhooksEnabled), + GetStackServiceLog(GetStackServiceLog), + SearchStackServiceLog(SearchStackServiceLog), + ListStacks(ListStacks), + ListFullStacks(ListFullStacks), + ListStackServices(ListStackServices), + ListCommonStackExtraArgs(ListCommonStackExtraArgs), + // ==== BUILDER ==== GetBuildersSummary(GetBuildersSummary), GetBuilder(GetBuilder), @@ -180,11 +199,17 @@ enum ReadRequest { #[to_string_resolver] GetSystemStats(GetSystemStats), #[to_string_resolver] - GetSystemProcesses(GetSystemProcesses), + ListSystemProcesses(ListSystemProcesses), // ==== VARIABLE ==== GetVariable(GetVariable), ListVariables(ListVariables), + + // ==== PROVIDER ==== + GetGitProviderAccount(GetGitProviderAccount), + ListGitProviderAccounts(ListGitProviderAccounts), + GetDockerRegistryAccount(GetDockerRegistryAccount), + ListDockerRegistryAccounts(ListDockerRegistryAccounts), } pub fn router() -> Router { @@ -351,12 +376,12 @@ impl Resolve for State { } } -impl Resolve for State { +impl Resolve for State { async fn resolve( &self, - ListGitProviders { target }: ListGitProviders, + ListGitProvidersFromConfig { target }: ListGitProvidersFromConfig, user: User, - ) -> anyhow::Result { + ) -> anyhow::Result { let mut providers = core_config().git_providers.clone(); if let Some(target) = target { @@ -442,12 +467,12 @@ impl Resolve for State { } } -impl Resolve for State { +impl Resolve for State { async fn resolve( &self, - ListDockerRegistries { target }: ListDockerRegistries, + ListDockerRegistriesFromConfig { target }: ListDockerRegistriesFromConfig, _: User, - ) -> anyhow::Result { + ) -> anyhow::Result { let mut registries = core_config().docker_registries.clone(); if let Some(target) = target { diff --git a/bin/core/src/api/read/permission.rs b/bin/core/src/api/read/permission.rs index 3baf00593..0369c2e98 100644 --- a/bin/core/src/api/read/permission.rs +++ b/bin/core/src/api/read/permission.rs @@ -11,7 +11,7 @@ use mungos::{find::find_collect, mongodb::bson::doc}; use resolver_api::Resolve; use crate::{ - helpers::query::get_user_permission_on_resource, + helpers::query::get_user_permission_on_target, state::{db_client, State}, }; @@ -43,8 +43,7 @@ impl Resolve for State { if user.admin { return Ok(PermissionLevel::Write); } - let (variant, id) = target.extract_variant_id(); - get_user_permission_on_resource(&user, variant, id).await + get_user_permission_on_target(&user, &target).await } } diff --git a/bin/core/src/api/read/provider.rs b/bin/core/src/api/read/provider.rs new file mode 100644 index 000000000..5fcfc2e99 --- /dev/null +++ b/bin/core/src/api/read/provider.rs @@ -0,0 +1,116 @@ +use anyhow::{anyhow, Context}; +use mongo_indexed::{doc, Document}; +use monitor_client::{ + api::read::{ + GetDockerRegistryAccount, GetDockerRegistryAccountResponse, + GetGitProviderAccount, GetGitProviderAccountResponse, + ListDockerRegistryAccounts, ListDockerRegistryAccountsResponse, + ListGitProviderAccounts, ListGitProviderAccountsResponse, + }, + entities::user::User, +}; +use mungos::{ + by_id::find_one_by_id, find::find_collect, + mongodb::options::FindOptions, +}; +use resolver_api::Resolve; + +use crate::state::{db_client, State}; + +impl Resolve for State { + async fn resolve( + &self, + GetGitProviderAccount { id }: GetGitProviderAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "Only admins can read git provider accounts" + )); + } + find_one_by_id(&db_client().await.git_accounts, &id) + .await + .context("failed to query db for git provider accounts")? + .context("did not find git provider account with the given id") + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListGitProviderAccounts { domain, username }: ListGitProviderAccounts, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "Only admins can read git provider accounts" + )); + } + let mut filter = Document::new(); + if let Some(domain) = domain { + filter.insert("domain", domain); + } + if let Some(username) = username { + filter.insert("username", username); + } + find_collect( + &db_client().await.git_accounts, + filter, + FindOptions::builder() + .sort(doc! { "domain": 1, "username": 1 }) + .build(), + ) + .await + .context("failed to query db for git provider accounts") + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetDockerRegistryAccount { id }: GetDockerRegistryAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "Only admins can read docker registry accounts" + )); + } + find_one_by_id(&db_client().await.registry_accounts, &id) + .await + .context("failed to query db for docker registry accounts")? + .context( + "did not find docker registry account with the given id", + ) + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListDockerRegistryAccounts { domain, username }: ListDockerRegistryAccounts, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "Only admins can read docker registry accounts" + )); + } + let mut filter = Document::new(); + if let Some(domain) = domain { + filter.insert("domain", domain); + } + if let Some(username) = username { + filter.insert("username", username); + } + find_collect( + &db_client().await.registry_accounts, + filter, + FindOptions::builder() + .sort(doc! { "domain": 1, "username": 1 }) + .build(), + ) + .await + .context("failed to query db for docker registry accounts") + } +} diff --git a/bin/core/src/api/read/server.rs b/bin/core/src/api/read/server.rs index 4cb040cbb..424aa319e 100644 --- a/bin/core/src/api/read/server.rs +++ b/bin/core/src/api/read/server.rs @@ -10,10 +10,8 @@ use async_timing_util::{ use monitor_client::{ api::read::*, entities::{ - deployment::ContainerSummary, permission::PermissionLevel, server::{ - docker_image::ImageSummary, docker_network::DockerNetwork, Server, ServerActionState, ServerListItem, ServerState, }, user::User, @@ -239,10 +237,10 @@ fn processes_cache() -> &'static ProcessesCache { PROCESSES_CACHE.get_or_init(Default::default) } -impl ResolveToString for State { +impl ResolveToString for State { async fn resolve_to_string( &self, - GetSystemProcesses { server }: GetSystemProcesses, + ListSystemProcesses { server }: ListSystemProcesses, user: User, ) -> anyhow::Result { let server = resource::get_check_permissions::( @@ -328,56 +326,98 @@ impl Resolve for State { } } -impl Resolve for State { - async fn resolve( +impl ResolveToString for State { + async fn resolve_to_string( &self, - GetDockerImages { server }: GetDockerImages, + ListDockerImages { server }: ListDockerImages, user: User, - ) -> anyhow::Result> { + ) -> anyhow::Result { let server = resource::get_check_permissions::( &server, &user, PermissionLevel::Read, ) .await?; - periphery_client(&server)? - .request(periphery::build::GetImageList {}) - .await + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if let Some(images) = &cache.images { + serde_json::to_string(images) + .context("failed to serialize response") + } else { + Ok(String::from("[]")) + } } } -impl Resolve for State { - async fn resolve( +impl ResolveToString for State { + async fn resolve_to_string( &self, - GetDockerNetworks { server }: GetDockerNetworks, + ListDockerNetworks { server }: ListDockerNetworks, user: User, - ) -> anyhow::Result> { + ) -> anyhow::Result { let server = resource::get_check_permissions::( &server, &user, PermissionLevel::Read, ) .await?; - periphery_client(&server)? - .request(periphery::network::GetNetworkList {}) - .await + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if let Some(networks) = &cache.networks { + serde_json::to_string(networks) + .context("failed to serialize response") + } else { + Ok(String::from("[]")) + } } } -impl Resolve for State { - async fn resolve( +impl ResolveToString for State { + async fn resolve_to_string( &self, - GetDockerContainers { server }: GetDockerContainers, + ListDockerContainers { server }: ListDockerContainers, user: User, - ) -> anyhow::Result> { + ) -> anyhow::Result { let server = resource::get_check_permissions::( &server, &user, PermissionLevel::Read, ) .await?; - periphery_client(&server)? - .request(periphery::container::GetContainerList {}) - .await + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if let Some(containers) = &cache.containers { + serde_json::to_string(containers) + .context("failed to serialize response") + } else { + Ok(String::from("[]")) + } + } +} + +impl ResolveToString for State { + async fn resolve_to_string( + &self, + ListComposeProjects { server }: ListComposeProjects, + user: User, + ) -> anyhow::Result { + let server = resource::get_check_permissions::( + &server, + &user, + PermissionLevel::Read, + ) + .await?; + let cache = server_status_cache() + .get_or_insert_default(&server.id) + .await; + if let Some(projects) = &cache.projects { + serde_json::to_string(projects) + .context("failed to serialize response") + } else { + Ok(String::from("[]")) + } } } diff --git a/bin/core/src/api/read/server_template.rs b/bin/core/src/api/read/server_template.rs index 9f9a6417b..7efe470aa 100644 --- a/bin/core/src/api/read/server_template.rs +++ b/bin/core/src/api/read/server_template.rs @@ -4,14 +4,13 @@ use monitor_client::{ api::read::*, entities::{ permission::PermissionLevel, server_template::ServerTemplate, - update::ResourceTargetVariant, user::User, + user::User, }, }; use mungos::mongodb::bson::doc; use resolver_api::Resolve; use crate::{ - helpers::query::get_resource_ids_for_user, resource, state::{db_client, State}, }; @@ -57,10 +56,9 @@ impl Resolve for State { GetServerTemplatesSummary {}: GetServerTemplatesSummary, user: User, ) -> anyhow::Result { - let query = match get_resource_ids_for_user( - &user, - ResourceTargetVariant::ServerTemplate, - ) + let query = match resource::get_resource_ids_for_user::< + ServerTemplate, + >(&user) .await? { Some(ids) => doc! { diff --git a/bin/core/src/api/read/stack.rs b/bin/core/src/api/read/stack.rs new file mode 100644 index 000000000..a749e2cf2 --- /dev/null +++ b/bin/core/src/api/read/stack.rs @@ -0,0 +1,311 @@ +use std::collections::HashSet; + +use anyhow::Context; +use monitor_client::{ + api::read::*, + entities::{ + config::core::CoreConfig, + permission::PermissionLevel, + stack::{Stack, StackActionState, StackListItem, StackState}, + user::User, + }, +}; +use periphery_client::api::compose::{ + GetComposeServiceLog, GetComposeServiceLogSearch, +}; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + helpers::{periphery_client, stack::get_stack_and_server}, + resource, + state::{action_states, github_client, stack_status_cache, State}, +}; + +impl Resolve for State { + async fn resolve( + &self, + GetStack { stack }: GetStack, + user: User, + ) -> anyhow::Result { + resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Read, + ) + .await + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListStackServices { stack }: ListStackServices, + user: User, + ) -> anyhow::Result { + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Read, + ) + .await?; + + let services = stack_status_cache() + .get(&stack.id) + .await + .unwrap_or_default() + .curr + .services + .clone(); + + Ok(services) + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetStackServiceLog { + stack, + service, + tail, + }: GetStackServiceLog, + user: User, + ) -> anyhow::Result { + let (stack, server) = get_stack_and_server( + &stack, + &user, + PermissionLevel::Read, + true, + ) + .await?; + periphery_client(&server)? + .request(GetComposeServiceLog { + project: stack.project_name(false), + service, + tail, + }) + .await + .context("failed to get stack service log from periphery") + } +} + +impl Resolve for State { + async fn resolve( + &self, + SearchStackServiceLog { + stack, + service, + terms, + combinator, + invert, + }: SearchStackServiceLog, + user: User, + ) -> anyhow::Result { + let (stack, server) = get_stack_and_server( + &stack, + &user, + PermissionLevel::Read, + true, + ) + .await?; + periphery_client(&server)? + .request(GetComposeServiceLogSearch { + project: stack.project_name(false), + service, + terms, + combinator, + invert, + }) + .await + .context("failed to get stack service log from periphery") + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs, + user: User, + ) -> anyhow::Result { + let stacks = resource::list_full_for_user::(query, &user) + .await + .context("failed to get resources matching query")?; + + // first collect with guaranteed uniqueness + let mut res = HashSet::::new(); + + for stack in stacks { + for extra_arg in stack.config.extra_args { + res.insert(extra_arg); + } + } + + let mut res = res.into_iter().collect::>(); + res.sort(); + Ok(res) + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListStacks { query }: ListStacks, + user: User, + ) -> anyhow::Result> { + resource::list_for_user::(query, &user).await + } +} + +impl Resolve for State { + async fn resolve( + &self, + ListFullStacks { query }: ListFullStacks, + user: User, + ) -> anyhow::Result { + resource::list_full_for_user::(query, &user).await + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetStackActionState { stack }: GetStackActionState, + user: User, + ) -> anyhow::Result { + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Read, + ) + .await?; + let action_state = action_states() + .stack + .get(&stack.id) + .await + .unwrap_or_default() + .get()?; + Ok(action_state) + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetStacksSummary {}: GetStacksSummary, + user: User, + ) -> anyhow::Result { + let stacks = resource::list_full_for_user::( + Default::default(), + &user, + ) + .await + .context("failed to get stacks from db")?; + + let mut res = GetStacksSummaryResponse::default(); + + let cache = stack_status_cache(); + + for stack in stacks { + res.total += 1; + match cache.get(&stack.id).await.unwrap_or_default().curr.state + { + StackState::Running => res.running += 1, + StackState::Paused => res.paused += 1, + StackState::Stopped => res.stopped += 1, + StackState::Restarting => res.restarting += 1, + StackState::Dead => res.dead += 1, + StackState::Unhealthy => res.unhealthy += 1, + StackState::Down => res.down += 1, + StackState::Unknown => res.unknown += 1, + } + } + + Ok(res) + } +} + +impl Resolve for State { + async fn resolve( + &self, + GetStackWebhooksEnabled { stack }: GetStackWebhooksEnabled, + user: User, + ) -> anyhow::Result { + let Some(github) = github_client() else { + return Ok(GetStackWebhooksEnabledResponse { + managed: false, + refresh_enabled: false, + deploy_enabled: false, + }); + }; + + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Read, + ) + .await?; + + if stack.config.git_provider != "github.com" + || stack.config.repo.is_empty() + { + return Ok(GetStackWebhooksEnabledResponse { + managed: false, + refresh_enabled: false, + deploy_enabled: false, + }); + } + + let mut split = stack.config.repo.split('/'); + let owner = split.next().context("Sync repo has no owner")?; + + let Some(github) = github.get(owner) else { + return Ok(GetStackWebhooksEnabledResponse { + managed: false, + refresh_enabled: false, + deploy_enabled: false, + }); + }; + + let repo_name = + split.next().context("Repo repo has no repo after the /")?; + + let github_repos = github.repos(); + + let webhooks = github_repos + .list_all_webhooks(owner, repo_name) + .await + .context("failed to list all webhooks on repo")? + .body; + + let CoreConfig { + host, + webhook_base_url, + .. + } = core_config(); + + let host = webhook_base_url.as_ref().unwrap_or(host); + let refresh_url = + format!("{host}/listener/github/stack/{}/refresh", stack.id); + let deploy_url = + format!("{host}/listener/github/stack/{}/deploy", stack.id); + + let mut refresh_enabled = false; + let mut deploy_enabled = false; + + for webhook in webhooks { + if webhook.active && webhook.config.url == refresh_url { + refresh_enabled = true + } + if webhook.active && webhook.config.url == deploy_url { + deploy_enabled = true + } + } + + Ok(GetStackWebhooksEnabledResponse { + managed: true, + refresh_enabled, + deploy_enabled, + }) + } +} diff --git a/bin/core/src/api/read/toml.rs b/bin/core/src/api/read/toml.rs index 7755c51c3..c25ee2db6 100644 --- a/bin/core/src/api/read/toml.rs +++ b/bin/core/src/api/read/toml.rs @@ -25,6 +25,7 @@ use monitor_client::{ resource::{Resource, ResourceQuery}, server::Server, server_template::ServerTemplate, + stack::Stack, sync::ResourceSync, toml::{ PermissionToml, ResourceToml, ResourcesToml, UserGroupToml, @@ -89,6 +90,15 @@ impl Resolve for State { .into_iter() .map(|resource| ResourceTarget::Deployment(resource.id)), ); + targets.extend( + resource::list_for_user::( + ResourceQuery::builder().tags(tags.clone()).build(), + &user, + ) + .await? + .into_iter() + .map(|resource| ResourceTarget::Stack(resource.id)), + ); targets.extend( resource::list_for_user::( ResourceQuery::builder().tags(tags.clone()).build(), @@ -301,6 +311,24 @@ impl Resolve for State { ); res.repos.push(convert_resource::(repo, &names.tags)) } + ResourceTarget::Stack(id) => { + let mut stack = resource::get_check_permissions::( + &id, + &user, + PermissionLevel::Read, + ) + .await?; + // replace stack server with name + stack.config.server_id.clone_from( + names + .servers + .get(&stack.config.server_id) + .unwrap_or(&String::new()), + ); + res + .stacks + .push(convert_resource::(stack, &names.tags)) + } ResourceTarget::Procedure(id) => { add_procedure(&id, &mut res, &user, &names) .await @@ -355,6 +383,9 @@ async fn add_procedure( Execution::RunBuild(exec) => exec.build.clone_from( names.builds.get(&exec.build).unwrap_or(&String::new()), ), + Execution::CancelBuild(exec) => exec.build.clone_from( + names.builds.get(&exec.build).unwrap_or(&String::new()), + ), Execution::Deploy(exec) => exec.deployment.clone_from( names .deployments @@ -369,6 +400,30 @@ async fn add_procedure( .unwrap_or(&String::new()), ) } + Execution::RestartContainer(exec) => { + exec.deployment.clone_from( + names + .deployments + .get(&exec.deployment) + .unwrap_or(&String::new()), + ) + } + Execution::PauseContainer(exec) => { + exec.deployment.clone_from( + names + .deployments + .get(&exec.deployment) + .unwrap_or(&String::new()), + ) + } + Execution::UnpauseContainer(exec) => { + exec.deployment.clone_from( + names + .deployments + .get(&exec.deployment) + .unwrap_or(&String::new()), + ) + } Execution::StopContainer(exec) => exec.deployment.clone_from( names .deployments @@ -389,6 +444,12 @@ async fn add_procedure( Execution::PullRepo(exec) => exec.repo.clone_from( names.repos.get(&exec.repo).unwrap_or(&String::new()), ), + Execution::BuildRepo(exec) => exec.repo.clone_from( + names.repos.get(&exec.repo).unwrap_or(&String::new()), + ), + Execution::CancelRepoBuild(exec) => exec.repo.clone_from( + names.repos.get(&exec.repo).unwrap_or(&String::new()), + ), Execution::StopAllContainers(exec) => exec.server.clone_from( names.servers.get(&exec.server).unwrap_or(&String::new()), ), @@ -404,6 +465,27 @@ async fn add_procedure( Execution::RunSync(exec) => exec.sync.clone_from( names.syncs.get(&exec.sync).unwrap_or(&String::new()), ), + Execution::DeployStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::StartStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::RestartStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::PauseStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::UnpauseStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::StopStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), + Execution::DestroyStack(exec) => exec.stack.clone_from( + names.stacks.get(&exec.stack).unwrap_or(&String::new()), + ), Execution::Sleep(_) | Execution::None(_) => {} } } @@ -424,6 +506,7 @@ struct ResourceNames { deployments: HashMap, procedures: HashMap, syncs: HashMap, + stacks: HashMap, alerters: HashMap, templates: HashMap, } @@ -480,6 +563,12 @@ impl ResourceNames { .into_iter() .map(|t| (t.id, t.name)) .collect::>(), + stacks: find_collect(&db.stacks, None, None) + .await + .context("failed to get all stacks")? + .into_iter() + .map(|t| (t.id, t.name)) + .collect::>(), alerters: find_collect(&db.alerters, None, None) .await .context("failed to get all alerters")? @@ -558,6 +647,9 @@ async fn add_user_groups( ResourceTarget::ResourceSync(id) => { *id = names.syncs.get(id).cloned().unwrap_or_default() } + ResourceTarget::Stack(id) => { + *id = names.stacks.get(id).cloned().unwrap_or_default() + } ResourceTarget::System(_) => {} } PermissionToml { @@ -597,6 +689,7 @@ fn convert_resource( description: resource.description, deploy: false, after: Default::default(), + latest_hash: false, config, } } @@ -693,6 +786,30 @@ fn serialize_resources_toml( ); } + for stack in &resources.stacks { + if !res.is_empty() { + res.push_str("\n\n##\n\n"); + } + res.push_str("[[stack]]\n"); + let mut parsed: OrderedHashMap = + serde_json::from_str(&serde_json::to_string(&stack)?)?; + let config = parsed + .get_mut("config") + .context("stack has no config?")? + .as_object_mut() + .context("config is not object?")?; + if let Some(environment) = &stack.config.environment { + config.insert( + "environment".to_string(), + Value::String(environment_vars_to_string(environment)), + ); + } + res.push_str( + &toml_pretty::to_string(&parsed, options) + .context("failed to serialize stacks to toml")?, + ); + } + for build in &resources.builds { if !res.is_empty() { res.push_str("\n\n##\n\n"); diff --git a/bin/core/src/api/read/update.rs b/bin/core/src/api/read/update.rs index 019eb66e0..ad8ccc4b6 100644 --- a/bin/core/src/api/read/update.rs +++ b/bin/core/src/api/read/update.rs @@ -13,10 +13,9 @@ use monitor_client::{ repo::Repo, server::Server, server_template::ServerTemplate, + stack::Stack, sync::ResourceSync, - update::{ - ResourceTarget, ResourceTargetVariant, Update, UpdateListItem, - }, + update::{ResourceTarget, Update, UpdateListItem}, user::User, }, }; @@ -29,7 +28,6 @@ use resolver_api::Resolve; use crate::{ config::core_config, - helpers::query::get_resource_ids_for_user, resource, state::{db_client, State}, }; @@ -45,44 +43,48 @@ impl Resolve for State { let query = if user.admin || core_config().transparent_mode { query } else { - let server_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Server, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Server", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Server" }); + let server_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Server", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Server" }); - let deployment_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Deployment, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Deployment", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Deployment" }); + let deployment_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Deployment", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Deployment" }); - let build_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Build, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Build", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Build" }); + let stack_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Stack", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Stack" }); + + let build_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Build", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Build" }); let repo_query = - get_resource_ids_for_user(&user, ResourceTargetVariant::Repo) + resource::get_resource_ids_for_user::(&user) .await? .map(|ids| { doc! { @@ -91,45 +93,38 @@ impl Resolve for State { }) .unwrap_or_else(|| doc! { "target.type": "Repo" }); - let procedure_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Procedure, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Procedure", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Procedure" }); + let procedure_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Procedure", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Procedure" }); - let builder_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Builder, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Builder", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Builder" }); + let builder_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Builder", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Builder" }); - let alerter_query = get_resource_ids_for_user( - &user, - ResourceTargetVariant::Alerter, - ) - .await? - .map(|ids| { - doc! { - "target.type": "Alerter", "target.id": { "$in": ids } - } - }) - .unwrap_or_else(|| doc! { "target.type": "Alerter" }); + let alerter_query = + resource::get_resource_ids_for_user::(&user) + .await? + .map(|ids| { + doc! { + "target.type": "Alerter", "target.id": { "$in": ids } + } + }) + .unwrap_or_else(|| doc! { "target.type": "Alerter" }); - let server_template_query = get_resource_ids_for_user( + let server_template_query = resource::get_resource_ids_for_user::( &user, - ResourceTargetVariant::ServerTemplate, ) .await? .map(|ids| { @@ -139,9 +134,8 @@ impl Resolve for State { }) .unwrap_or_else(|| doc! { "target.type": "ServerTemplate" }); - let resource_sync_query = get_resource_ids_for_user( + let resource_sync_query = resource::get_resource_ids_for_user::( &user, - ResourceTargetVariant::ResourceSync, ) .await? .map(|ids| { @@ -155,8 +149,9 @@ impl Resolve for State { query.extend(doc! { "$or": [ server_query, - build_query, deployment_query, + stack_query, + build_query, repo_query, procedure_query, alerter_query, @@ -313,6 +308,14 @@ impl Resolve for State { ) .await?; } + ResourceTarget::Stack(id) => { + resource::get_check_permissions::( + id, + &user, + PermissionLevel::Read, + ) + .await?; + } } Ok(update) } diff --git a/bin/core/src/api/user.rs b/bin/core/src/api/user.rs index de68f679f..af76ec0ce 100644 --- a/bin/core/src/api/user.rs +++ b/bin/core/src/api/user.rs @@ -20,8 +20,8 @@ use typeshare::typeshare; use uuid::Uuid; use crate::{ - auth::{auth_request, random_string}, - helpers::query::get_user, + auth::auth_request, + helpers::{query::get_user, random_string}, state::{db_client, State}, }; diff --git a/bin/core/src/api/write/build.rs b/bin/core/src/api/write/build.rs index 561ed9b56..25178f040 100644 --- a/bin/core/src/api/write/build.rs +++ b/bin/core/src/api/write/build.rs @@ -1,14 +1,16 @@ use anyhow::{anyhow, Context}; +use mongo_indexed::doc; use monitor_client::{ api::write::*, entities::{ - build::{Build, PartialBuildConfig}, + build::{Build, BuildInfo, PartialBuildConfig}, config::core::CoreConfig, permission::PermissionLevel, user::User, - NoData, + CloneArgs, NoData, }, }; +use mungos::mongodb::bson::to_document; use octorust::types::{ ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig, }; @@ -16,8 +18,9 @@ use resolver_api::Resolve; use crate::{ config::core_config, + helpers::{git_token, random_string}, resource, - state::{github_client, State}, + state::{db_client, github_client, State}, }; impl Resolve for State { @@ -73,6 +76,89 @@ impl Resolve for State { } } +impl Resolve for State { + #[instrument(name = "RefreshBuildCache", skip(self, user))] + async fn resolve( + &self, + RefreshBuildCache { build }: RefreshBuildCache, + user: User, + ) -> anyhow::Result { + // Even though this is a write request, this doesn't change any config. Anyone that can execute the + // build should be able to do this. + let build = resource::get_check_permissions::( + &build, + &user, + PermissionLevel::Execute, + ) + .await?; + + let config = core_config(); + + let repo_dir = config.repo_directory.join(random_string(10)); + let mut clone_args: CloneArgs = (&build).into(); + clone_args.destination = Some(repo_dir.display().to_string()); + + let access_token = match (&clone_args.account, &clone_args.provider) + { + (None, _) => None, + (Some(_), None) => { + return Err(anyhow!( + "Account is configured, but provider is empty" + )) + } + (Some(username), Some(provider)) => { + git_token(provider, username, |https| { + clone_args.https = https + }) + .await + .with_context( + || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + )? + } + }; + + let (_, latest_hash, latest_message, _) = git::clone( + clone_args, + &config.repo_directory, + access_token, + &[], + "", + None, + ) + .await + .context("failed to clone build repo")?; + + let info = BuildInfo { + last_built_at: build.info.last_built_at, + built_hash: build.info.built_hash, + built_message: build.info.built_message, + latest_hash, + latest_message, + }; + + let info = to_document(&info) + .context("failed to serialize build info to bson")?; + + db_client() + .await + .builds + .update_one( + doc! { "name": &build.name }, + doc! { "$set": { "info": info } }, + ) + .await + .context("failed to update build info on db")?; + + if repo_dir.exists() { + if let Err(e) = std::fs::remove_dir_all(&repo_dir) { + warn!("failed to remove build cache update repo directory | {e:?}") + } + } + + Ok(NoData {}) + } +} + impl Resolve for State { #[instrument(name = "CreateBuildWebhook", skip(self, user))] async fn resolve( diff --git a/bin/core/src/api/write/description.rs b/bin/core/src/api/write/description.rs index 642149a5f..74c06ca1d 100644 --- a/bin/core/src/api/write/description.rs +++ b/bin/core/src/api/write/description.rs @@ -4,7 +4,7 @@ use monitor_client::{ entities::{ alerter::Alerter, build::Build, builder::Builder, deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, + server::Server, server_template::ServerTemplate, stack::Stack, sync::ResourceSync, update::ResourceTarget, user::User, }, }; @@ -100,6 +100,14 @@ impl Resolve for State { ) .await?; } + ResourceTarget::Stack(id) => { + resource::update_description::( + &id, + &description, + &user, + ) + .await?; + } } Ok(UpdateDescriptionResponse {}) } diff --git a/bin/core/src/api/write/mod.rs b/bin/core/src/api/write/mod.rs index 2a752c028..4646ddff2 100644 --- a/bin/core/src/api/write/mod.rs +++ b/bin/core/src/api/write/mod.rs @@ -19,10 +19,12 @@ mod deployment; mod description; mod permissions; mod procedure; +mod provider; mod repo; mod server; mod server_template; mod service_user; +mod stack; mod sync; mod tag; mod user_group; @@ -76,6 +78,7 @@ pub enum WriteRequest { CopyBuild(CopyBuild), DeleteBuild(DeleteBuild), UpdateBuild(UpdateBuild), + RefreshBuildCache(RefreshBuildCache), CreateBuildWebhook(CreateBuildWebhook), DeleteBuildWebhook(DeleteBuildWebhook), @@ -96,6 +99,7 @@ pub enum WriteRequest { CopyRepo(CopyRepo), DeleteRepo(DeleteRepo), UpdateRepo(UpdateRepo), + RefreshRepoCache(RefreshRepoCache), CreateRepoWebhook(CreateRepoWebhook), DeleteRepoWebhook(DeleteRepoWebhook), @@ -120,6 +124,16 @@ pub enum WriteRequest { CreateSyncWebhook(CreateSyncWebhook), DeleteSyncWebhook(DeleteSyncWebhook), + // ==== STACK ==== + CreateStack(CreateStack), + CopyStack(CopyStack), + DeleteStack(DeleteStack), + UpdateStack(UpdateStack), + RenameStack(RenameStack), + RefreshStackCache(RefreshStackCache), + CreateStackWebhook(CreateStackWebhook), + DeleteStackWebhook(DeleteStackWebhook), + // ==== TAG ==== CreateTag(CreateTag), DeleteTag(DeleteTag), @@ -131,6 +145,14 @@ pub enum WriteRequest { UpdateVariableValue(UpdateVariableValue), UpdateVariableDescription(UpdateVariableDescription), DeleteVariable(DeleteVariable), + + // ==== PROVIDERS ==== + CreateGitProviderAccount(CreateGitProviderAccount), + UpdateGitProviderAccount(UpdateGitProviderAccount), + DeleteGitProviderAccount(DeleteGitProviderAccount), + CreateDockerRegistryAccount(CreateDockerRegistryAccount), + UpdateDockerRegistryAccount(UpdateDockerRegistryAccount), + DeleteDockerRegistryAccount(DeleteDockerRegistryAccount), } pub fn router() -> Router { diff --git a/bin/core/src/api/write/permissions.rs b/bin/core/src/api/write/permissions.rs index aee407c36..0f9e86467 100644 --- a/bin/core/src/api/write/permissions.rs +++ b/bin/core/src/api/write/permissions.rs @@ -389,5 +389,20 @@ async fn extract_resource_target_with_validation( .id; Ok((ResourceTargetVariant::ResourceSync, id)) } + ResourceTarget::Stack(ident) => { + let filter = match ObjectId::from_str(ident) { + Ok(id) => doc! { "_id": id }, + Err(_) => doc! { "name": ident }, + }; + let id = db_client() + .await + .stacks + .find_one(filter) + .await + .context("failed to query db for stacks")? + .context("no matching stack found")? + .id; + Ok((ResourceTargetVariant::Stack, id)) + } } } diff --git a/bin/core/src/api/write/provider.rs b/bin/core/src/api/write/provider.rs new file mode 100644 index 000000000..246e13217 --- /dev/null +++ b/bin/core/src/api/write/provider.rs @@ -0,0 +1,402 @@ +use anyhow::{anyhow, Context}; +use monitor_client::{ + api::write::*, + entities::{ + provider::{DockerRegistryAccount, GitProviderAccount}, + update::ResourceTarget, + user::User, + Operation, + }, +}; +use mungos::{ + by_id::{delete_one_by_id, find_one_by_id, update_one_by_id}, + mongodb::bson::{doc, to_document}, +}; +use resolver_api::Resolve; + +use crate::{ + helpers::update::{add_update, make_update}, + state::{db_client, State}, +}; + +impl Resolve for State { + async fn resolve( + &self, + CreateGitProviderAccount { account }: CreateGitProviderAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can create git provider accounts" + )); + } + + let mut account: GitProviderAccount = account.into(); + + if account.domain.is_empty() { + return Err(anyhow!("domain cannot be empty string.")); + } + + if account.username.is_empty() { + return Err(anyhow!("username cannot be empty string.")); + } + + let mut update = make_update( + ResourceTarget::system(), + Operation::CreateGitProviderAccount, + &user, + ); + + account.id = db_client() + .await + .git_accounts + .insert_one(&account) + .await + .context("failed to create git provider account on db")? + .inserted_id + .as_object_id() + .context("inserted id is not ObjectId")? + .to_string(); + + update.push_simple_log( + "create git provider account", + format!( + "Created git provider account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for create git provider account | {e:#}") + }) + .ok(); + + Ok(account) + } +} + +impl Resolve for State { + async fn resolve( + &self, + UpdateGitProviderAccount { id, mut account }: UpdateGitProviderAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can update git provider accounts" + )); + } + + if let Some(domain) = &account.domain { + if domain.is_empty() { + return Err(anyhow!( + "cannot update git provider with empty domain" + )); + } + } + + if let Some(username) = &account.username { + if username.is_empty() { + return Err(anyhow!( + "cannot update git provider with empty username" + )); + } + } + + // Ensure update does not change id + account.id = None; + + let mut update = make_update( + ResourceTarget::system(), + Operation::UpdateGitProviderAccount, + &user, + ); + + let account = to_document(&account).context( + "failed to serialize partial git provider account to bson", + )?; + let db = db_client().await; + update_one_by_id( + &db.git_accounts, + &id, + doc! { "$set": account }, + None, + ) + .await + .context("failed to update git provider account on db")?; + + let Some(account) = + find_one_by_id(&db.git_accounts, &id) + .await + .context("failed to query db for git accounts")? + else { + return Err(anyhow!("no account found with given id")); + }; + + update.push_simple_log( + "update git provider account", + format!( + "Updated git provider account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for update git provider account | {e:#}") + }) + .ok(); + + Ok(account) + } +} + +impl Resolve for State { + async fn resolve( + &self, + DeleteGitProviderAccount { id }: DeleteGitProviderAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can delete git provider accounts" + )); + } + + let mut update = make_update( + ResourceTarget::system(), + Operation::UpdateGitProviderAccount, + &user, + ); + + let db = db_client().await; + let Some(account) = + find_one_by_id(&db.git_accounts, &id) + .await + .context("failed to query db for git accounts")? + else { + return Err(anyhow!("no account found with given id")); + }; + delete_one_by_id(&db.git_accounts, &id, None) + .await + .context("failed to delete git account on db")?; + + update.push_simple_log( + "delete git provider account", + format!( + "Deleted git provider account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for delete git provider account | {e:#}") + }) + .ok(); + + Ok(account) + } +} + +impl Resolve for State { + async fn resolve( + &self, + CreateDockerRegistryAccount { account }: CreateDockerRegistryAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can create docker registry account accounts" + )); + } + + let mut account: DockerRegistryAccount = account.into(); + + if account.domain.is_empty() { + return Err(anyhow!("domain cannot be empty string.")); + } + + if account.username.is_empty() { + return Err(anyhow!("username cannot be empty string.")); + } + + let mut update = make_update( + ResourceTarget::system(), + Operation::CreateDockerRegistryAccount, + &user, + ); + + account.id = db_client() + .await + .registry_accounts + .insert_one(&account) + .await + .context( + "failed to create docker registry account account on db", + )? + .inserted_id + .as_object_id() + .context("inserted id is not ObjectId")? + .to_string(); + + update.push_simple_log( + "create docker registry account", + format!( + "Created docker registry account account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for create docker registry account | {e:#}") + }) + .ok(); + + Ok(account) + } +} + +impl Resolve for State { + async fn resolve( + &self, + UpdateDockerRegistryAccount { id, mut account }: UpdateDockerRegistryAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can update docker registry accounts" + )); + } + + if let Some(domain) = &account.domain { + if domain.is_empty() { + return Err(anyhow!( + "cannot update docker registry account with empty domain" + )); + } + } + + if let Some(username) = &account.username { + if username.is_empty() { + return Err(anyhow!( + "cannot update docker registry account with empty username" + )); + } + } + + account.id = None; + + let mut update = make_update( + ResourceTarget::system(), + Operation::UpdateDockerRegistryAccount, + &user, + ); + + let account = to_document(&account).context( + "failed to serialize partial docker registry account account to bson", + )?; + + let db = db_client().await; + update_one_by_id( + &db.registry_accounts, + &id, + doc! { "$set": account }, + None, + ) + .await + .context( + "failed to update docker registry account account on db", + )?; + + let Some(account) = find_one_by_id(&db.registry_accounts, &id) + .await + .context("failed to query db for registry accounts")? + else { + return Err(anyhow!("no account found with given id")); + }; + + update.push_simple_log( + "update docker registry account", + format!( + "Updated docker registry account account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for update docker registry account | {e:#}") + }) + .ok(); + + Ok(account) + } +} + +impl Resolve for State { + async fn resolve( + &self, + DeleteDockerRegistryAccount { id }: DeleteDockerRegistryAccount, + user: User, + ) -> anyhow::Result { + if !user.admin { + return Err(anyhow!( + "only admins can delete docker registry accounts" + )); + } + + let mut update = make_update( + ResourceTarget::system(), + Operation::UpdateDockerRegistryAccount, + &user, + ); + + let db = db_client().await; + let Some(account) = find_one_by_id(&db.registry_accounts, &id) + .await + .context("failed to query db for git accounts")? + else { + return Err(anyhow!("no account found with given id")); + }; + delete_one_by_id(&db.registry_accounts, &id, None) + .await + .context("failed to delete registry account on db")?; + + update.push_simple_log( + "delete registry account", + format!( + "Deleted registry account for {} with username {}", + account.domain, account.username + ), + ); + + update.finalize(); + + add_update(update) + .await + .inspect_err(|e| { + error!("failed to add update for delete docker registry account | {e:#}") + }) + .ok(); + + Ok(account) + } +} diff --git a/bin/core/src/api/write/repo.rs b/bin/core/src/api/write/repo.rs index 938f987e3..eca9b1c25 100644 --- a/bin/core/src/api/write/repo.rs +++ b/bin/core/src/api/write/repo.rs @@ -1,14 +1,16 @@ use anyhow::{anyhow, Context}; +use mongo_indexed::doc; use monitor_client::{ api::write::*, entities::{ config::core::CoreConfig, permission::PermissionLevel, - repo::{PartialRepoConfig, Repo}, + repo::{PartialRepoConfig, Repo, RepoInfo}, user::User, - NoData, + CloneArgs, NoData, }, }; +use mungos::mongodb::bson::to_document; use octorust::types::{ ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig, }; @@ -16,8 +18,9 @@ use resolver_api::Resolve; use crate::{ config::core_config, + helpers::{git_token, random_string}, resource, - state::{github_client, State}, + state::{db_client, github_client, State}, }; impl Resolve for State { @@ -71,6 +74,92 @@ impl Resolve for State { } } +impl Resolve for State { + #[instrument(name = "RefreshRepoCache", skip(self, user))] + async fn resolve( + &self, + RefreshRepoCache { repo }: RefreshRepoCache, + user: User, + ) -> anyhow::Result { + // Even though this is a write request, this doesn't change any config. Anyone that can execute the + // repo should be able to do this. + let repo = resource::get_check_permissions::( + &repo, + &user, + PermissionLevel::Execute, + ) + .await?; + + let config = core_config(); + + let repo_dir = config.repo_directory.join(random_string(10)); + let mut clone_args: CloneArgs = (&repo).into(); + clone_args.destination = Some(repo_dir.display().to_string()); + + let access_token = match (&clone_args.account, &clone_args.provider) + { + (None, _) => None, + (Some(_), None) => { + return Err(anyhow!( + "Account is configured, but provider is empty" + )) + } + (Some(username), Some(provider)) => { + git_token(provider, username, |https| { + clone_args.https = https + }) + .await + .with_context( + || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + )? + } + }; + + let (_, latest_hash, latest_message, _) = git::clone( + clone_args, + &config.repo_directory, + access_token, + &[], + "", + None, + ) + .await + .context("failed to clone repo (the resource) repo")?; + + let info = RepoInfo { + last_pulled_at: repo.info.last_pulled_at, + last_built_at: repo.info.last_built_at, + built_hash: repo.info.built_hash, + built_message: repo.info.built_message, + latest_hash, + latest_message, + }; + + let info = to_document(&info) + .context("failed to serialize repo info to bson")?; + + db_client() + .await + .repos + .update_one( + doc! { "name": &repo.name }, + doc! { "$set": { "info": info } }, + ) + .await + .context("failed to update repo info on db")?; + + if repo_dir.exists() { + if let Err(e) = std::fs::remove_dir_all(&repo_dir) { + warn!( + "failed to remove repo (resource) cache update repo directory | {e:?}" + ) + } + } + + Ok(NoData {}) + } +} + impl Resolve for State { #[instrument(name = "CreateRepoWebhook", skip(self, user))] async fn resolve( diff --git a/bin/core/src/api/write/stack.rs b/bin/core/src/api/write/stack.rs new file mode 100644 index 000000000..8a97082fa --- /dev/null +++ b/bin/core/src/api/write/stack.rs @@ -0,0 +1,507 @@ +use anyhow::{anyhow, Context}; +use formatting::format_serror; +use monitor_client::{ + api::write::*, + entities::{ + config::core::CoreConfig, + monitor_timestamp, + permission::PermissionLevel, + stack::{ComposeContents, PartialStackConfig, Stack, StackInfo}, + update::Update, + user::User, + NoData, Operation, + }, +}; +use mungos::{ + by_id::update_one_by_id, + mongodb::bson::{doc, to_document}, +}; +use octorust::types::{ + ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig, +}; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + helpers::{ + stack::{ + json::get_config_jsons, remote::get_remote_compose_contents, + services::extract_services_into_res, + }, + update::{add_update, make_update}, + }, + monitor::update_cache_for_stack, + resource, + state::{db_client, github_client, State}, +}; + +impl Resolve for State { + #[instrument(name = "CreateStack", skip(self, user))] + async fn resolve( + &self, + CreateStack { name, config }: CreateStack, + user: User, + ) -> anyhow::Result { + let res = resource::create::(&name, config, &user).await; + if let Ok(stack) = &res { + if let Err(e) = self + .resolve(RefreshStackCache { stack: name }, user.clone()) + .await + { + let mut update = + make_update(stack, Operation::RefreshStackCache, &user); + update.push_error_log( + "refresh stack cache", + format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into()) + ); + add_update(update).await.ok(); + }; + update_cache_for_stack(stack).await; + } + res + } +} + +impl Resolve for State { + #[instrument(name = "CopyStack", skip(self, user))] + async fn resolve( + &self, + CopyStack { name, id }: CopyStack, + user: User, + ) -> anyhow::Result { + let Stack { config, .. } = + resource::get_check_permissions::( + &id, + &user, + PermissionLevel::Write, + ) + .await?; + let res = + resource::create::(&name, config.into(), &user).await; + if let Ok(stack) = &res { + if let Err(e) = self + .resolve(RefreshStackCache { stack: name }, user.clone()) + .await + { + let mut update = + make_update(stack, Operation::RefreshStackCache, &user); + update.push_error_log( + "refresh stack cache", + format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into()) + ); + add_update(update).await.ok(); + }; + update_cache_for_stack(stack).await; + } + res + } +} + +impl Resolve for State { + #[instrument(name = "DeleteStack", skip(self, user))] + async fn resolve( + &self, + DeleteStack { id }: DeleteStack, + user: User, + ) -> anyhow::Result { + resource::delete::(&id, &user).await + } +} + +impl Resolve for State { + #[instrument(name = "UpdateStack", skip(self, user))] + async fn resolve( + &self, + UpdateStack { id, config }: UpdateStack, + user: User, + ) -> anyhow::Result { + let res = resource::update::(&id, config, &user).await; + if let Ok(stack) = &res { + if let Err(e) = self + .resolve(RefreshStackCache { stack: id }, user.clone()) + .await + { + let mut update = + make_update(stack, Operation::RefreshStackCache, &user); + update.push_error_log( + "refresh stack cache", + format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into()) + ); + add_update(update).await.ok(); + }; + update_cache_for_stack(stack).await; + } + res + } +} + +impl Resolve for State { + #[instrument(name = "RenameStack", skip(self, user))] + async fn resolve( + &self, + RenameStack { id, name }: RenameStack, + user: User, + ) -> anyhow::Result { + let stack = resource::get_check_permissions::( + &id, + &user, + PermissionLevel::Write, + ) + .await?; + + let mut update = + make_update(&stack, Operation::RenameStack, &user); + + update_one_by_id( + &db_client().await.stacks, + &stack.id, + mungos::update::Update::Set( + doc! { "name": &name, "updated_at": monitor_timestamp() }, + ), + None, + ) + .await + .context("failed to update stack name on db")?; + + update.push_simple_log( + "rename stack", + format!("renamed stack from {} to {}", stack.name, name), + ); + update.finalize(); + + add_update(update.clone()).await?; + + Ok(update) + } +} + +impl Resolve for State { + #[instrument(name = "RefreshStackCache", level = "debug", skip(self, user))] + async fn resolve( + &self, + RefreshStackCache { stack }: RefreshStackCache, + user: User, + ) -> anyhow::Result { + // Even though this is a write request, this doesn't change any config. Anyone that can execute the + // stack should be able to do this. + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Execute, + ) + .await?; + + let file_contents_empty = stack.config.file_contents.is_empty(); + + if file_contents_empty && stack.config.repo.is_empty() { + // Nothing to do without one of these + return Ok(NoData {}); + } + + let mut missing_files = Vec::new(); + + let ( + latest_services, + latest_json, + latest_json_errors, + remote_contents, + remote_errors, + latest_hash, + latest_message, + ) = if file_contents_empty { + // REPO BASED STACK + let ( + remote_contents, + remote_errors, + _, + latest_hash, + latest_message, + ) = + get_remote_compose_contents(&stack, Some(&mut missing_files)) + .await + .context("failed to clone remote compose file")?; + let project_name = stack.project_name(true); + + let mut services = Vec::new(); + + for contents in &remote_contents { + if let Err(e) = extract_services_into_res( + &project_name, + &contents.contents, + &mut services, + ) { + warn!( + "failed to extract stack services, things won't works correctly. stack: {} | {e:#}", + stack.name + ); + } + } + + let (jsons, json_errors) = + get_config_jsons(&remote_contents).await; + + ( + services, + jsons, + json_errors, + Some(remote_contents), + Some(remote_errors), + latest_hash, + latest_message, + ) + } else { + let mut services = Vec::new(); + if let Err(e) = extract_services_into_res( + // this should latest (not deployed), so make the project name fresh. + &stack.project_name(true), + &stack.config.file_contents, + &mut services, + ) { + warn!( + "failed to extract stack services, things won't works correctly. stack: {} | {e:#}", + stack.name + ); + services.extend(stack.info.latest_services); + }; + let (json, json_errors) = + get_config_jsons(&[ComposeContents { + path: stack + .config + .file_paths + .first() + .map(String::as_str) + .unwrap_or("compose.yaml") + .to_string(), + contents: stack.config.file_contents, + }]) + .await; + (services, json, json_errors, None, None, None, None) + }; + + let info = StackInfo { + missing_files, + deployed_services: stack.info.deployed_services, + deployed_project_name: stack.info.deployed_project_name, + deployed_contents: stack.info.deployed_contents, + deployed_hash: stack.info.deployed_hash, + deployed_message: stack.info.deployed_message, + deployed_json: stack.info.deployed_json, + deployed_json_errors: stack.info.deployed_json_errors, + latest_services, + latest_json, + latest_json_errors, + remote_contents, + remote_errors, + latest_hash, + latest_message, + }; + + let info = to_document(&info) + .context("failed to serialize stack info to bson")?; + + db_client() + .await + .stacks + .update_one( + doc! { "name": &stack.name }, + doc! { "$set": { "info": info } }, + ) + .await + .context("failed to update stack info on db")?; + + Ok(NoData {}) + } +} + +impl Resolve for State { + #[instrument(name = "CreateStackWebhook", skip(self, user))] + async fn resolve( + &self, + CreateStackWebhook { stack, action }: CreateStackWebhook, + user: User, + ) -> anyhow::Result { + let Some(github) = github_client() else { + return Err(anyhow!( + "github_webhook_app is not configured in core config toml" + )); + }; + + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Write, + ) + .await?; + + if stack.config.repo.is_empty() { + return Err(anyhow!( + "No repo configured, can't create webhook" + )); + } + + let mut split = stack.config.repo.split('/'); + let owner = split.next().context("Stack repo has no owner")?; + + let Some(github) = github.get(owner) else { + return Err(anyhow!( + "Cannot manage repo webhooks under owner {owner}" + )); + }; + + let repo = + split.next().context("Stack repo has no repo after the /")?; + + let github_repos = github.repos(); + + // First make sure the webhook isn't already created (inactive ones are ignored) + let webhooks = github_repos + .list_all_webhooks(owner, repo) + .await + .context("failed to list all webhooks on repo")? + .body; + + let CoreConfig { + host, + webhook_base_url, + webhook_secret, + .. + } = core_config(); + + let host = webhook_base_url.as_ref().unwrap_or(host); + let url = match action { + StackWebhookAction::Refresh => { + format!("{host}/listener/github/stack/{}/refresh", stack.id) + } + StackWebhookAction::Deploy => { + format!("{host}/listener/github/stack/{}/deploy", stack.id) + } + }; + + for webhook in webhooks { + if webhook.active && webhook.config.url == url { + return Ok(NoData {}); + } + } + + // Now good to create the webhook + let request = ReposCreateWebhookRequest { + active: Some(true), + config: Some(ReposCreateWebhookRequestConfig { + url, + secret: webhook_secret.to_string(), + content_type: String::from("json"), + insecure_ssl: None, + digest: Default::default(), + token: Default::default(), + }), + events: vec![String::from("push")], + name: String::from("web"), + }; + github_repos + .create_webhook(owner, repo, &request) + .await + .context("failed to create webhook")?; + + if !stack.config.webhook_enabled { + self + .resolve( + UpdateStack { + id: stack.id, + config: PartialStackConfig { + webhook_enabled: Some(true), + ..Default::default() + }, + }, + user, + ) + .await + .context("failed to update stack to enable webhook")?; + } + + Ok(NoData {}) + } +} + +impl Resolve for State { + #[instrument(name = "DeleteStackWebhook", skip(self, user))] + async fn resolve( + &self, + DeleteStackWebhook { stack, action }: DeleteStackWebhook, + user: User, + ) -> anyhow::Result { + let Some(github) = github_client() else { + return Err(anyhow!( + "github_webhook_app is not configured in core config toml" + )); + }; + + let stack = resource::get_check_permissions::( + &stack, + &user, + PermissionLevel::Write, + ) + .await?; + + if stack.config.git_provider != "github.com" { + return Err(anyhow!( + "Can only manage github.com repo webhooks" + )); + } + + if stack.config.repo.is_empty() { + return Err(anyhow!( + "No repo configured, can't create webhook" + )); + } + + let mut split = stack.config.repo.split('/'); + let owner = split.next().context("Stack repo has no owner")?; + + let Some(github) = github.get(owner) else { + return Err(anyhow!( + "Cannot manage repo webhooks under owner {owner}" + )); + }; + + let repo = + split.next().context("Sync repo has no repo after the /")?; + + let github_repos = github.repos(); + + // First make sure the webhook isn't already created (inactive ones are ignored) + let webhooks = github_repos + .list_all_webhooks(owner, repo) + .await + .context("failed to list all webhooks on repo")? + .body; + + let CoreConfig { + host, + webhook_base_url, + .. + } = core_config(); + + let host = webhook_base_url.as_ref().unwrap_or(host); + let url = match action { + StackWebhookAction::Refresh => { + format!("{host}/listener/github/stack/{}/refresh", stack.id) + } + StackWebhookAction::Deploy => { + format!("{host}/listener/github/stack/{}/deploy", stack.id) + } + }; + + for webhook in webhooks { + if webhook.active && webhook.config.url == url { + github_repos + .delete_webhook(owner, repo, webhook.id) + .await + .context("failed to delete webhook")?; + return Ok(NoData {}); + } + } + + // No webhook to delete, all good + Ok(NoData {}) + } +} diff --git a/bin/core/src/api/write/sync.rs b/bin/core/src/api/write/sync.rs index 768b90478..a6c4a82a0 100644 --- a/bin/core/src/api/write/sync.rs +++ b/bin/core/src/api/write/sync.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::{anyhow, Context}; use formatting::format_serror; use monitor_client::{ @@ -9,12 +11,14 @@ use monitor_client::{ build::Build, builder::Builder, config::core::CoreConfig, + deployment::Deployment, monitor_timestamp, permission::PermissionLevel, procedure::Procedure, repo::Repo, server::{stats::SeverityLevel, Server}, server_template::ServerTemplate, + stack::Stack, sync::{ PartialResourceSyncConfig, PendingSyncUpdates, PendingSyncUpdatesData, PendingSyncUpdatesDataErr, @@ -40,7 +44,7 @@ use crate::{ alert::send_alerts, query::get_id_to_tags, sync::{ - deployment, + deploy::SyncDeployParams, resource::{get_updates_for_view, AllResourcesById}, }, }, @@ -101,6 +105,7 @@ impl Resolve for State { } impl Resolve for State { + #[instrument(name = "RefreshResourceSyncPending", level = "debug", skip(self, user))] async fn resolve( &self, RefreshResourceSyncPending { sync }: RefreshResourceSyncPending, @@ -124,8 +129,33 @@ impl Resolve for State { .context("failed to get remote resources")?; let resources = res?; - let all_resources = AllResourcesById::load().await?; let id_to_tags = get_id_to_tags(None).await?; + let all_resources = AllResourcesById::load().await?; + + let deployments_by_name = all_resources + .deployments + .values() + .map(|deployment| { + (deployment.name.clone(), deployment.clone()) + }) + .collect::>(); + let stacks_by_name = all_resources + .stacks + .values() + .map(|stack| (stack.name.clone(), stack.clone())) + .collect::>(); + + let deploy_updates = + crate::helpers::sync::deploy::get_updates_for_view( + SyncDeployParams { + deployments: &resources.deployments, + deployment_map: &deployments_by_name, + stacks: &resources.stacks, + stack_map: &stacks_by_name, + all_resources: &all_resources, + }, + ) + .await; let data = PendingSyncUpdatesDataOk { server_updates: get_updates_for_view::( @@ -136,7 +166,7 @@ impl Resolve for State { ) .await .context("failed to get server updates")?, - deployment_updates: deployment::get_updates_for_view( + deployment_updates: get_updates_for_view::( resources.deployments, sync.config.delete, &all_resources, @@ -144,6 +174,14 @@ impl Resolve for State { ) .await .context("failed to get deployment updates")?, + stack_updates: get_updates_for_view::( + resources.stacks, + sync.config.delete, + &all_resources, + &id_to_tags, + ) + .await + .context("failed to get stack updates")?, build_updates: get_updates_for_view::( resources.builds, sync.config.delete, @@ -218,6 +256,7 @@ impl Resolve for State { ) .await .context("failed to get user group updates")?, + deploy_updates, }; anyhow::Ok((hash, message, data)) } diff --git a/bin/core/src/api/write/tag.rs b/bin/core/src/api/write/tag.rs index 520441f69..a7e0d8593 100644 --- a/bin/core/src/api/write/tag.rs +++ b/bin/core/src/api/write/tag.rs @@ -10,8 +10,8 @@ use monitor_client::{ alerter::Alerter, build::Build, builder::Builder, deployment::Deployment, permission::PermissionLevel, procedure::Procedure, repo::Repo, server::Server, - server_template::ServerTemplate, sync::ResourceSync, tag::Tag, - update::ResourceTarget, user::User, + server_template::ServerTemplate, stack::Stack, + sync::ResourceSync, tag::Tag, update::ResourceTarget, user::User, }, }; use mungos::{ @@ -200,6 +200,15 @@ impl Resolve for State { .await?; resource::update_tags::(&id, tags, user).await? } + ResourceTarget::Stack(id) => { + resource::get_check_permissions::( + &id, + &user, + PermissionLevel::Write, + ) + .await?; + resource::update_tags::(&id, tags, user).await? + } }; Ok(UpdateTagsOnResourceResponse {}) } diff --git a/bin/core/src/api/write/variable.rs b/bin/core/src/api/write/variable.rs index b78dd5e55..388e93b77 100644 --- a/bin/core/src/api/write/variable.rs +++ b/bin/core/src/api/write/variable.rs @@ -71,7 +71,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { if !user.admin { - return Err(anyhow!("only admins can create variables")); + return Err(anyhow!("only admins can update variables")); } let variable = get_variable(&name).await?; @@ -118,7 +118,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { if !user.admin { - return Err(anyhow!("only admins can create variables")); + return Err(anyhow!("only admins can update variables")); } db_client() .await @@ -140,7 +140,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { if !user.admin { - return Err(anyhow!("only admins can create variables")); + return Err(anyhow!("only admins can delete variables")); } let variable = get_variable(&name).await?; db_client() diff --git a/bin/core/src/auth/github/client.rs b/bin/core/src/auth/github/client.rs index 2fde0093e..0afb90c39 100644 --- a/bin/core/src/auth/github/client.rs +++ b/bin/core/src/auth/github/client.rs @@ -9,8 +9,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tokio::sync::Mutex; use crate::{ - auth::{random_string, STATE_PREFIX_LENGTH}, - config::core_config, + auth::STATE_PREFIX_LENGTH, config::core_config, + helpers::random_string, }; pub fn github_oauth_client() -> &'static Option { diff --git a/bin/core/src/auth/github/mod.rs b/bin/core/src/auth/github/mod.rs index 1f33e59c2..4688c8f64 100644 --- a/bin/core/src/auth/github/mod.rs +++ b/bin/core/src/auth/github/mod.rs @@ -81,7 +81,7 @@ async fn callback( let user = User { id: Default::default(), username: github_user.login, - enabled: no_users_exist, + enabled: no_users_exist || core_config().enable_new_users, admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, diff --git a/bin/core/src/auth/google/client.rs b/bin/core/src/auth/google/client.rs index d753deaa6..6dc2eb560 100644 --- a/bin/core/src/auth/google/client.rs +++ b/bin/core/src/auth/google/client.rs @@ -11,8 +11,8 @@ use serde_json::Value; use tokio::sync::Mutex; use crate::{ - auth::{random_string, STATE_PREFIX_LENGTH}, - config::core_config, + auth::STATE_PREFIX_LENGTH, config::core_config, + helpers::random_string, }; pub fn google_oauth_client() -> &'static Option { diff --git a/bin/core/src/auth/google/mod.rs b/bin/core/src/auth/google/mod.rs index 0336c5d10..36bf323f5 100644 --- a/bin/core/src/auth/google/mod.rs +++ b/bin/core/src/auth/google/mod.rs @@ -96,7 +96,7 @@ async fn callback( .first() .unwrap() .to_string(), - enabled: no_users_exist, + enabled: no_users_exist || core_config().enable_new_users, admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, diff --git a/bin/core/src/auth/jwt.rs b/bin/core/src/auth/jwt.rs index 2e2a7b937..b9b5a05a1 100644 --- a/bin/core/src/auth/jwt.rs +++ b/bin/core/src/auth/jwt.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use sha2::Sha256; use tokio::sync::Mutex; -use super::random_string; +use crate::helpers::random_string; type ExchangeTokenMap = Mutex>; @@ -25,26 +25,31 @@ pub struct JwtClaims { pub struct JwtClient { pub key: Hmac, - valid_for_ms: u128, + ttl_ms: u128, exchange_tokens: ExchangeTokenMap, } impl JwtClient { - pub fn new(config: &CoreConfig) -> JwtClient { - let key = Hmac::new_from_slice(random_string(40).as_bytes()) - .expect("failed at taking HmacSha256 of jwt secret"); - JwtClient { + pub fn new(config: &CoreConfig) -> anyhow::Result { + let secret = if config.jwt_secret.is_empty() { + random_string(40) + } else { + config.jwt_secret.clone() + }; + let key = Hmac::new_from_slice(secret.as_bytes()) + .context("failed at taking HmacSha256 of jwt secret")?; + Ok(JwtClient { key, - valid_for_ms: get_timelength_in_ms( - config.jwt_valid_for.to_string().parse().unwrap(), + ttl_ms: get_timelength_in_ms( + config.jwt_ttl.to_string().parse()?, ), exchange_tokens: Default::default(), - } + }) } pub fn generate(&self, user_id: String) -> anyhow::Result { let iat = unix_timestamp_ms(); - let exp = iat + self.valid_for_ms; + let exp = iat + self.ttl_ms; let claims = JwtClaims { id: user_id, iat, diff --git a/bin/core/src/auth/local.rs b/bin/core/src/auth/local.rs index 43bd551d4..0969ba5f3 100644 --- a/bin/core/src/auth/local.rs +++ b/bin/core/src/auth/local.rs @@ -29,7 +29,9 @@ impl Resolve for State { CreateLocalUser { username, password }: CreateLocalUser, _: HeaderMap, ) -> anyhow::Result { - if !core_config().local_auth { + let core_config = core_config(); + + if !core_config.local_auth { return Err(anyhow!("local auth is not enabled")); } @@ -41,6 +43,10 @@ impl Resolve for State { return Err(anyhow!("username cannot be valid ObjectId")); } + if password.is_empty() { + return Err(anyhow!("password cannot be empty string")); + } + let password = bcrypt::hash(password, BCRYPT_COST) .context("failed to hash password")?; @@ -56,7 +62,7 @@ impl Resolve for State { let user = User { id: Default::default(), username, - enabled: no_users_exist, + enabled: no_users_exist || core_config.enable_new_users, admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, diff --git a/bin/core/src/auth/mod.rs b/bin/core/src/auth/mod.rs index e62886318..9cfe5f35c 100644 --- a/bin/core/src/auth/mod.rs +++ b/bin/core/src/auth/mod.rs @@ -7,7 +7,6 @@ use axum::{ }; use monitor_client::entities::{monitor_timestamp, user::User}; use mungos::mongodb::bson::doc; -use rand::{distributions::Alphanumeric, thread_rng, Rng}; use reqwest::StatusCode; use serde::Deserialize; use serror::AddStatusCode; @@ -45,14 +44,6 @@ pub async fn auth_request( Ok(next.run(req).await) } -pub fn random_string(length: usize) -> String { - thread_rng() - .sample_iter(&Alphanumeric) - .take(length) - .map(char::from) - .collect() -} - #[instrument(level = "debug")] pub async fn get_user_id_from_headers( headers: &HeaderMap, diff --git a/bin/core/src/config.rs b/bin/core/src/config.rs index d7714ba8e..202752ae3 100644 --- a/bin/core/src/config.rs +++ b/bin/core/src/config.rs @@ -77,15 +77,28 @@ pub fn core_config() -> &'static CoreConfig { host: env.monitor_host.unwrap_or(config.host), port: env.monitor_port.unwrap_or(config.port), passkey: env.monitor_passkey.unwrap_or(config.passkey), - jwt_valid_for: env - .monitor_jwt_valid_for - .unwrap_or(config.jwt_valid_for), - sync_directory: env - .monitor_sync_directory + jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret), + jwt_ttl: env + .monitor_jwt_ttl + .unwrap_or(config.jwt_ttl), + repo_directory: env + .monitor_repo_directory .map(|dir| dir.parse() - .context("failed to parse env MONITOR_SYNC_DIRECTORY as valid path").unwrap()) - .unwrap_or(config.sync_directory), + .context("failed to parse env MONITOR_REPO_DIRECTORY as valid path").unwrap()) + .unwrap_or(config.repo_directory), + stack_poll_interval: env + .monitor_stack_poll_interval + .unwrap_or(config.stack_poll_interval), + sync_poll_interval: env + .monitor_sync_poll_interval + .unwrap_or(config.sync_poll_interval), + build_poll_interval: env + .monitor_build_poll_interval + .unwrap_or(config.build_poll_interval), + repo_poll_interval: env + .monitor_repo_poll_interval + .unwrap_or(config.repo_poll_interval), monitoring_interval: env .monitor_monitoring_interval .unwrap_or(config.monitoring_interval), @@ -107,6 +120,8 @@ pub fn core_config() -> &'static CoreConfig { ui_write_disabled: env .monitor_ui_write_disabled .unwrap_or(config.ui_write_disabled), + enable_new_users: env.monitor_enable_new_users + .unwrap_or(config.enable_new_users), local_auth: env.monitor_local_auth.unwrap_or(config.local_auth), google_oauth: OauthCredentials { enabled: env diff --git a/bin/core/src/db.rs b/bin/core/src/db.rs index a585ae3b3..93abfd540 100644 --- a/bin/core/src/db.rs +++ b/bin/core/src/db.rs @@ -9,9 +9,11 @@ use monitor_client::entities::{ deployment::Deployment, permission::Permission, procedure::Procedure, + provider::{DockerRegistryAccount, GitProviderAccount}, repo::Repo, server::{stats::SystemStatsRecord, Server}, server_template::ServerTemplate, + stack::Stack, sync::ResourceSync, tag::Tag, update::Update, @@ -31,6 +33,8 @@ pub struct DbClient { pub api_keys: Collection, pub tags: Collection, pub variables: Collection, + pub git_accounts: Collection, + pub registry_accounts: Collection, pub updates: Collection, pub alerts: Collection, pub stats: Collection, @@ -44,6 +48,7 @@ pub struct DbClient { pub alerters: Collection, pub server_templates: Collection, pub resource_syncs: Collection, + pub stacks: Collection, // pub db: Database, } @@ -90,6 +95,8 @@ impl DbClient { api_keys: mongo_indexed::collection(&db, true).await?, tags: mongo_indexed::collection(&db, true).await?, variables: mongo_indexed::collection(&db, true).await?, + git_accounts: mongo_indexed::collection(&db, true).await?, + registry_accounts: mongo_indexed::collection(&db, true).await?, updates: mongo_indexed::collection(&db, true).await?, alerts: mongo_indexed::collection(&db, true).await?, stats: mongo_indexed::collection(&db, true).await?, @@ -105,6 +112,7 @@ impl DbClient { .await?, resource_syncs: resource_collection(&db, "ResourceSync") .await?, + stacks: resource_collection(&db, "Stack").await?, // db, }; diff --git a/bin/core/src/helpers/action_state.rs b/bin/core/src/helpers/action_state.rs index 58702a147..2d4ae3148 100644 --- a/bin/core/src/helpers/action_state.rs +++ b/bin/core/src/helpers/action_state.rs @@ -6,7 +6,8 @@ use monitor_client::{ entities::{ build::BuildActionState, deployment::DeploymentActionState, procedure::ProcedureActionState, repo::RepoActionState, - server::ServerActionState, sync::ResourceSyncActionState, + server::ServerActionState, stack::StackActionState, + sync::ResourceSyncActionState, }, }; @@ -23,6 +24,7 @@ pub struct ActionStates { Cache>>, pub resource_sync: Cache>>, + pub stack: Cache>>, } /// Need to be able to check "busy" with write lock acquired. diff --git a/bin/core/src/helpers/alert.rs b/bin/core/src/helpers/alert.rs index cdb370b3b..3f232c95f 100644 --- a/bin/core/src/helpers/alert.rs +++ b/bin/core/src/helpers/alert.rs @@ -6,6 +6,7 @@ use monitor_client::entities::{ alerter::*, deployment::DeploymentState, server::stats::SeverityLevel, + stack::StackState, update::ResourceTargetVariant, }; use mungos::{find::find_collect, mongodb::bson::doc}; @@ -305,7 +306,7 @@ async fn send_slack_alert( .. } => { let to = fmt_docker_container_state(to); - let text = format!("📦 container *{name}* is now {to}"); + let text = format!("📦 Container *{name}* is now {to}"); let blocks = vec![ Block::header(text.clone()), Block::section(format!( @@ -318,6 +319,28 @@ async fn send_slack_alert( ]; (text, blocks.into()) } + AlertData::StackStateChange { + name, + server_name, + from, + to, + id, + .. + } => { + let to = fmt_stack_state(to); + let text = format!("🥞 Stack *{name}* is now {to}"); + let blocks = vec![ + Block::header(text.clone()), + Block::section(format!( + "server: {server_name}\nprevious: {from}", + )), + Block::section(resource_link( + ResourceTargetVariant::Stack, + id, + )), + ]; + (text, blocks.into()) + } AlertData::AwsBuilderTerminationFailed { instance_id, message, @@ -359,6 +382,21 @@ async fn send_slack_alert( ]; (text, blocks.into()) } + AlertData::RepoBuildFailed { id, name } => { + let text = + format!("{level} | Repo build for {name} has failed"); + let blocks = vec![ + Block::header(text.clone()), + Block::section(format!( + "repo id: *{id}*\nrepo name: *{name}*", + )), + Block::section(resource_link( + ResourceTargetVariant::Repo, + id, + )), + ]; + (text, blocks.into()) + } AlertData::None {} => Default::default(), }; if !text.is_empty() { @@ -385,6 +423,16 @@ fn fmt_docker_container_state(state: &DeploymentState) -> String { } } +fn fmt_stack_state(state: &StackState) -> String { + match state { + StackState::Running => String::from("Running ▶️"), + StackState::Stopped => String::from("Stopped 🛑"), + StackState::Restarting => String::from("Restarting 🔄"), + StackState::Down => String::from("Down ⬇️"), + _ => state.to_string(), + } +} + fn fmt_level(level: SeverityLevel) -> &'static str { match level { SeverityLevel::Critical => "CRITICAL 🚨", @@ -406,6 +454,9 @@ fn resource_link( ResourceTargetVariant::Deployment => { format!("/deployments/{id}") } + ResourceTargetVariant::Stack => { + format!("/stacks/{id}") + } ResourceTargetVariant::Server => { format!("/servers/{id}") } diff --git a/bin/core/src/helpers/build.rs b/bin/core/src/helpers/build.rs new file mode 100644 index 000000000..e52cea32b --- /dev/null +++ b/bin/core/src/helpers/build.rs @@ -0,0 +1,49 @@ +use async_timing_util::{wait_until_timelength, Timelength}; +use monitor_client::{ + api::write::RefreshBuildCache, entities::user::build_user, +}; +use mungos::find::find_collect; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + state::{db_client, State}, +}; + +pub fn spawn_build_refresh_loop() { + let interval: Timelength = core_config() + .build_poll_interval + .try_into() + .expect("Invalid build poll interval"); + tokio::spawn(async move { + refresh_builds().await; + loop { + wait_until_timelength(interval, 2000).await; + refresh_builds().await; + } + }); +} + +async fn refresh_builds() { + let Ok(builds) = + find_collect(&db_client().await.builds, None, None) + .await + .inspect_err(|e| { + warn!("failed to get builds from db in refresh task | {e:#}") + }) + else { + return; + }; + for build in builds { + State + .resolve( + RefreshBuildCache { build: build.id }, + build_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("failed to refresh build cache in refresh task | build: {} | {e:#}", build.name) + }) + .ok(); + } +} diff --git a/bin/core/src/helpers/builder.rs b/bin/core/src/helpers/builder.rs new file mode 100644 index 000000000..8543b577b --- /dev/null +++ b/bin/core/src/helpers/builder.rs @@ -0,0 +1,211 @@ +use std::time::Duration; + +use anyhow::{anyhow, Context}; +use formatting::muted; +use monitor_client::entities::{ + builder::{AwsBuilderConfig, Builder, BuilderConfig}, + monitor_timestamp, + server::Server, + server_template::aws::AwsServerTemplateConfig, + update::{Log, Update}, + Version, +}; +use periphery_client::{ + api::{self, GetVersionResponse}, + PeripheryClient, +}; + +use crate::{ + cloud::{ + aws::ec2::{ + launch_ec2_instance, terminate_ec2_instance_with_retry, + Ec2Instance, + }, + BuildCleanupData, + }, + config::core_config, + helpers::update::update_update, + resource, +}; + +use super::periphery_client; + +const BUILDER_POLL_RATE_SECS: u64 = 2; +const BUILDER_POLL_MAX_TRIES: usize = 30; + +#[instrument(skip_all, fields(builder_id = builder.id, update_id = update.id))] +pub async fn get_builder_periphery( + // build: &Build, + resource_name: String, + version: Option, + builder: Builder, + update: &mut Update, +) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> { + match builder.config { + BuilderConfig::Server(config) => { + if config.server_id.is_empty() { + return Err(anyhow!("builder has not configured a server")); + } + let server = resource::get::(&config.server_id).await?; + let periphery = periphery_client(&server)?; + Ok(( + periphery, + BuildCleanupData::Server { + repo_name: resource_name, + }, + )) + } + BuilderConfig::Aws(config) => { + get_aws_builder(&resource_name, version, config, update).await + } + } +} + +#[instrument(skip_all, fields(resource_name, update_id = update.id))] +async fn get_aws_builder( + resource_name: &str, + version: Option, + config: AwsBuilderConfig, + update: &mut Update, +) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> { + let start_create_ts = monitor_timestamp(); + + let version = version.map(|v| format!("-v{v}")).unwrap_or_default(); + let instance_name = format!("BUILDER-{resource_name}{version}"); + let Ec2Instance { instance_id, ip } = launch_ec2_instance( + &instance_name, + AwsServerTemplateConfig::from_builder_config(&config), + ) + .await?; + + info!("ec2 instance launched"); + + let log = Log { + stage: "start build instance".to_string(), + success: true, + stdout: start_aws_builder_log(&instance_id, &ip, &config), + start_ts: start_create_ts, + end_ts: monitor_timestamp(), + ..Default::default() + }; + + update.logs.push(log); + + update_update(update.clone()).await?; + + let periphery_address = format!("http://{ip}:{}", config.port); + let periphery = + PeripheryClient::new(&periphery_address, &core_config().passkey); + + let start_connect_ts = monitor_timestamp(); + let mut res = Ok(GetVersionResponse { + version: String::new(), + }); + for _ in 0..BUILDER_POLL_MAX_TRIES { + let version = periphery + .request(api::GetVersion {}) + .await + .context("failed to reach periphery client on builder"); + if let Ok(GetVersionResponse { version }) = &version { + let connect_log = Log { + stage: "build instance connected".to_string(), + success: true, + stdout: format!( + "established contact with periphery on builder\nperiphery version: v{}", + version + ), + start_ts: start_connect_ts, + end_ts: monitor_timestamp(), + ..Default::default() + }; + update.logs.push(connect_log); + update_update(update.clone()).await?; + return Ok(( + periphery, + BuildCleanupData::Aws { + instance_id, + region: config.region, + }, + )); + } + res = version; + tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)) + .await; + } + + // Spawn terminate task in failure case (if loop is passed without return) + tokio::spawn(async move { + let _ = + terminate_ec2_instance_with_retry(config.region, &instance_id) + .await; + }); + + // Unwrap is safe, only way to get here is after check Ok / early return, so it must be err + Err( + res.err().unwrap().context( + "failed to start usable builder. terminating instance.", + ), + ) +} + +#[instrument(skip(periphery, update))] +pub async fn cleanup_builder_instance( + periphery: PeripheryClient, + cleanup_data: BuildCleanupData, + update: &mut Update, +) { + match cleanup_data { + BuildCleanupData::Server { repo_name } => { + let _ = periphery + .request(api::git::DeleteRepo { name: repo_name }) + .await; + } + BuildCleanupData::Aws { + instance_id, + region, + } => { + let _instance_id = instance_id.clone(); + tokio::spawn(async move { + let _ = + terminate_ec2_instance_with_retry(region, &_instance_id) + .await; + }); + update.push_simple_log( + "terminate instance", + format!("termination queued for instance id {instance_id}"), + ); + } + } +} + +pub fn start_aws_builder_log( + instance_id: &str, + ip: &str, + config: &AwsBuilderConfig, +) -> String { + let AwsBuilderConfig { + ami_id, + instance_type, + volume_gb, + subnet_id, + assign_public_ip, + security_group_ids, + use_public_ip, + .. + } = config; + + let readable_sec_group_ids = security_group_ids.join(", "); + + [ + format!("{}: {instance_id}", muted("instance id")), + format!("{}: {ip}", muted("ip")), + format!("{}: {ami_id}", muted("ami id")), + format!("{}: {instance_type}", muted("instance type")), + format!("{}: {volume_gb} GB", muted("volume size")), + format!("{}: {subnet_id}", muted("subnet id")), + format!("{}: {readable_sec_group_ids}", muted("security groups")), + format!("{}: {assign_public_ip}", muted("assign public ip")), + format!("{}: {use_public_ip}", muted("use public ip")), + ] + .join("\n") +} diff --git a/bin/core/src/helpers/channel.rs b/bin/core/src/helpers/channel.rs index 467ee5d2a..e7e7b8fa0 100644 --- a/bin/core/src/helpers/channel.rs +++ b/bin/core/src/helpers/channel.rs @@ -12,6 +12,15 @@ pub fn build_cancel_channel( BUILD_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100)) } +/// A channel sending (repo_id, update_id) +pub fn repo_cancel_channel( +) -> &'static BroadcastChannel<(String, Update)> { + static REPO_CANCEL_CHANNEL: OnceLock< + BroadcastChannel<(String, Update)>, + > = OnceLock::new(); + REPO_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100)) +} + pub fn update_channel() -> &'static BroadcastChannel { static UPDATE_CHANNEL: OnceLock> = OnceLock::new(); diff --git a/bin/core/src/helpers/mod.rs b/bin/core/src/helpers/mod.rs index cf3c5db08..2159472ff 100644 --- a/bin/core/src/helpers/mod.rs +++ b/bin/core/src/helpers/mod.rs @@ -1,26 +1,32 @@ -use std::time::Duration; +use std::{collections::HashSet, time::Duration}; use anyhow::{anyhow, Context}; use mongo_indexed::Document; use monitor_client::entities::{ permission::{Permission, PermissionLevel, UserTarget}, server::Server, - update::ResourceTarget, + update::{Log, ResourceTarget, Update}, user::User, + EnvironmentVar, }; -use mungos::mongodb::bson::{doc, Bson}; +use mungos::mongodb::bson::{doc, to_document, Bson}; use periphery_client::PeripheryClient; -use rand::{thread_rng, Rng}; +use query::get_global_variables; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; use crate::{config::core_config, state::db_client}; pub mod action_state; pub mod alert; +pub mod build; +pub mod builder; pub mod cache; pub mod channel; pub mod procedure; pub mod prune; pub mod query; +pub mod repo; +pub mod stack; pub mod sync; pub mod update; @@ -42,6 +48,78 @@ pub fn random_duration(min_ms: u64, max_ms: u64) -> Duration { Duration::from_millis(thread_rng().gen_range(min_ms..max_ms)) } +pub fn random_string(length: usize) -> String { + thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect() +} + +/// First checks db for token, then checks core config. +/// Only errors if db call errors. +/// Returns (token, use_https) +pub async fn git_token( + provider_domain: &str, + account_username: &str, + mut on_https_found: impl FnMut(bool), +) -> anyhow::Result> { + let db_provider = db_client() + .await + .git_accounts + .find_one(doc! { "domain": provider_domain, "username": account_username }) + .await + .context("failed to query db for git provider accounts")?; + if let Some(provider) = db_provider { + on_https_found(provider.https); + return Ok(Some(provider.token)); + } + Ok( + core_config() + .git_providers + .iter() + .find(|provider| provider.domain == provider_domain) + .and_then(|provider| { + on_https_found(provider.https); + provider + .accounts + .iter() + .find(|account| account.username == account_username) + .map(|account| account.token.clone()) + }), + ) +} + +/// First checks db for token, then checks core config. +/// Only errors if db call errors. +pub async fn registry_token( + provider_domain: &str, + account_username: &str, +) -> anyhow::Result> { + let provider = db_client() + .await + .registry_accounts + .find_one(doc! { "domain": provider_domain, "username": account_username }) + .await + .context("failed to query db for docker registry accounts")?; + if let Some(provider) = provider { + return Ok(Some(provider.token)); + } + Ok( + core_config() + .docker_registries + .iter() + .find(|provider| provider.domain == provider_domain) + .and_then(|provider| { + provider + .accounts + .iter() + .find(|account| account.username == account_username) + .map(|account| account.token.clone()) + }), + ) +} + #[instrument] pub async fn remove_from_recently_viewed(resource: T) where @@ -134,3 +212,99 @@ pub fn flatten_document(doc: Document) -> Document { target } + +/// Returns the secret replacers +pub async fn interpolate_variables_secrets_into_environment( + environment: &mut Vec, + update: &mut Update, +) -> anyhow::Result> { + // Interpolate variables into environment + let variables = get_global_variables().await?; + let core_config = core_config(); + + let mut global_replacers = HashSet::new(); + let mut secret_replacers = HashSet::new(); + + for env in environment { + // first pass - global variables + let (res, more_replacers) = svi::interpolate_variables( + &env.value, + &variables, + svi::Interpolator::DoubleBrackets, + false, + ) + .with_context(|| { + format!( + "failed to interpolate global variables - {}", + env.variable + ) + })?; + global_replacers.extend(more_replacers); + // second pass - core secrets + let (res, more_replacers) = svi::interpolate_variables( + &res, + &core_config.secrets, + svi::Interpolator::DoubleBrackets, + false, + ) + .context("failed to interpolate core secrets")?; + secret_replacers.extend(more_replacers); + + // set env value with the result + env.value = res; + } + + // Show which variables were interpolated + if !global_replacers.is_empty() { + update.push_simple_log( + "interpolate global variables", + global_replacers + .into_iter() + .map(|(value, variable)| format!("{variable} => {value}")) + .collect::>() + .join("\n"), + ); + } + + if !secret_replacers.is_empty() { + update.push_simple_log( + "interpolate core secrets", + secret_replacers + .iter() + .map(|(_, variable)| format!("replaced: {variable}")) + .collect::>() + .join("\n"), + ); + } + + Ok(secret_replacers) +} + +/// Run on startup, as no updates should be in progress on startup +pub async fn startup_in_progress_update_cleanup() { + let log = Log::error( + "monitor shutdown", + String::from("Monitor shutdown during execution. If this is a build, the builder may not have been terminated.") + ); + // This static log won't fail to serialize, unwrap ok. + let log = to_document(&log).unwrap(); + if let Err(e) = db_client() + .await + .updates + .update_many( + doc! { "status": "InProgress" }, + doc! { + "$set": { + "status": "Complete", + "success": false, + }, + "$push": { + "logs": log + } + }, + ) + .await + { + error!("failed to cleanup in progress updates on startup | {e:#}") + } +} diff --git a/bin/core/src/helpers/procedure.rs b/bin/core/src/helpers/procedure.rs index 8541970d9..10f4af115 100644 --- a/bin/core/src/helpers/procedure.rs +++ b/bin/core/src/helpers/procedure.rs @@ -161,6 +161,22 @@ async fn execute_execution( ) .await? } + Execution::CancelBuild(req) => { + let req = ExecuteRequest::CancelBuild(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::CancelBuild(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at CancelBuild"), + &update_id, + ) + .await? + } Execution::Deploy(req) => { let req = ExecuteRequest::Deploy(req); let update = init_execution_update(&req, &user).await?; @@ -193,6 +209,54 @@ async fn execute_execution( ) .await? } + Execution::RestartContainer(req) => { + let req = ExecuteRequest::RestartContainer(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::RestartContainer(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RestartContainer"), + &update_id, + ) + .await? + } + Execution::PauseContainer(req) => { + let req = ExecuteRequest::PauseContainer(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PauseContainer(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PauseContainer"), + &update_id, + ) + .await? + } + Execution::UnpauseContainer(req) => { + let req = ExecuteRequest::UnpauseContainer(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::UnpauseContainer(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at UnpauseContainer"), + &update_id, + ) + .await? + } Execution::StopContainer(req) => { let req = ExecuteRequest::StopContainer(req); let update = init_execution_update(&req, &user).await?; @@ -273,6 +337,38 @@ async fn execute_execution( ) .await? } + Execution::BuildRepo(req) => { + let req = ExecuteRequest::BuildRepo(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::BuildRepo(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at BuildRepo"), + &update_id, + ) + .await? + } + Execution::CancelRepoBuild(req) => { + let req = ExecuteRequest::CancelRepoBuild(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::CancelRepoBuild(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at CancelRepoBuild"), + &update_id, + ) + .await? + } Execution::PruneNetworks(req) => { let req = ExecuteRequest::PruneNetworks(req); let update = init_execution_update(&req, &user).await?; @@ -337,6 +433,118 @@ async fn execute_execution( ) .await? } + Execution::DeployStack(req) => { + let req = ExecuteRequest::DeployStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DeployStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at DeployStack"), + &update_id, + ) + .await? + } + Execution::StartStack(req) => { + let req = ExecuteRequest::StartStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::StartStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at StartStack"), + &update_id, + ) + .await? + } + Execution::RestartStack(req) => { + let req = ExecuteRequest::RestartStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::RestartStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at RestartStack"), + &update_id, + ) + .await? + } + Execution::PauseStack(req) => { + let req = ExecuteRequest::PauseStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::PauseStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at PauseStack"), + &update_id, + ) + .await? + } + Execution::UnpauseStack(req) => { + let req = ExecuteRequest::UnpauseStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::UnpauseStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at UnpauseStack"), + &update_id, + ) + .await? + } + Execution::StopStack(req) => { + let req = ExecuteRequest::StopStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::StopStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at StopStack"), + &update_id, + ) + .await? + } + Execution::DestroyStack(req) => { + let req = ExecuteRequest::DestroyStack(req); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DestroyStack(req) = req else { + unreachable!() + }; + let update_id = update.id.clone(); + handle_resolve_result( + State + .resolve(req, (user, update)) + .await + .context("failed at DestroyStack"), + &update_id, + ) + .await? + } Execution::Sleep(req) => { tokio::time::sleep(Duration::from_millis( req.duration_ms as u64, diff --git a/bin/core/src/helpers/query.rs b/bin/core/src/helpers/query.rs index 7f2218299..78df498d5 100644 --- a/bin/core/src/helpers/query.rs +++ b/bin/core/src/helpers/query.rs @@ -1,15 +1,20 @@ -use std::{ - collections::{HashMap, HashSet}, - str::FromStr, -}; +use std::{collections::HashMap, str::FromStr}; use anyhow::{anyhow, Context}; use monitor_client::entities::{ - deployment::{Deployment, DeploymentState}, + alerter::Alerter, + build::Build, + builder::Builder, + deployment::{ContainerSummary, Deployment, DeploymentState}, permission::PermissionLevel, + procedure::Procedure, + repo::Repo, server::{Server, ServerState}, + server_template::ServerTemplate, + stack::{Stack, StackServiceNames, StackState}, + sync::ResourceSync, tag::Tag, - update::{ResourceTargetVariant, Update}, + update::{ResourceTarget, ResourceTargetVariant, Update}, user::{admin_service_user, User}, user_group::UserGroup, variable::Variable, @@ -23,7 +28,15 @@ use mungos::{ }, }; -use crate::{config::core_config, resource, state::db_client}; +use crate::{ + resource::{self, get_user_permission_on_resource}, + state::db_client, +}; + +use super::stack::{ + compose_container_match_regex, + services::extract_services_from_stack, +}; #[instrument(level = "debug")] // user: Id or username @@ -84,6 +97,85 @@ pub async fn get_deployment_state( Ok(state) } +/// Can pass all the containers from the same server +pub fn get_stack_state_from_containers( + services: &[StackServiceNames], + containers: &[ContainerSummary], +) -> StackState { + // first filter the containers to only ones which match the service + let containers = containers.iter().filter(|container| { + services.iter().any(|StackServiceNames { service_name, container_name }| { + match compose_container_match_regex(container_name) + .with_context(|| format!("failed to construct container name matching regex for service {service_name}")) + { + Ok(regex) => regex, + Err(e) => { + warn!("{e:#}"); + return false + } + }.is_match(&container.name) + }) + }).collect::>(); + if containers.is_empty() { + return StackState::Down; + } + if services.len() != containers.len() { + return StackState::Unhealthy; + } + let running = containers + .iter() + .all(|container| container.state == DeploymentState::Running); + if running { + return StackState::Running; + } + let paused = containers + .iter() + .all(|container| container.state == DeploymentState::Paused); + if paused { + return StackState::Paused; + } + let stopped = containers + .iter() + .all(|container| container.state == DeploymentState::Exited); + if stopped { + return StackState::Stopped; + } + let restarting = containers + .iter() + .all(|container| container.state == DeploymentState::Restarting); + if restarting { + return StackState::Restarting; + } + let dead = containers + .iter() + .all(|container| container.state == DeploymentState::Dead); + if dead { + return StackState::Dead; + } + StackState::Unhealthy +} + +#[instrument(level = "debug")] +pub async fn get_stack_state( + stack: &Stack, +) -> anyhow::Result { + if stack.config.server_id.is_empty() { + return Ok(StackState::Down); + } + let (server, status) = + get_server_with_status(&stack.config.server_id).await?; + if status != ServerState::Ok { + return Ok(StackState::Unknown); + } + let containers = super::periphery_client(&server)? + .request(periphery_client::api::container::GetContainerList {}) + .await?; + + let services = extract_services_from_stack(stack, false).await?; + + Ok(get_stack_state_from_containers(&services, &containers)) +} + #[instrument(level = "debug")] pub async fn get_tag(id_or_name: &str) -> anyhow::Result { let query = match ObjectId::from_str(id_or_name) { @@ -166,121 +258,44 @@ pub fn user_target_query( Ok(user_target_query) } -#[instrument(level = "debug")] -pub async fn get_user_permission_on_resource( +pub async fn get_user_permission_on_target( user: &User, - resource_variant: ResourceTargetVariant, - resource_id: &str, + target: &ResourceTarget, ) -> anyhow::Result { - if user.admin { - return Ok(PermissionLevel::Write); - } - - // Start with base of Read or None - let mut base = if core_config().transparent_mode { - PermissionLevel::Read - } else { - PermissionLevel::None - }; - - // Overlay users base on resource variant - if let Some(level) = user.all.get(&resource_variant).cloned() { - if level > base { - base = level; + match target { + ResourceTarget::System(_) => Ok(PermissionLevel::None), + ResourceTarget::Build(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Builder(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Deployment(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Server(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Repo(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Alerter(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Procedure(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::ServerTemplate(id) => { + get_user_permission_on_resource::(user, id) + .await + } + ResourceTarget::ResourceSync(id) => { + get_user_permission_on_resource::(user, id).await + } + ResourceTarget::Stack(id) => { + get_user_permission_on_resource::(user, id).await } } - if base == PermissionLevel::Write { - // No reason to keep going if already Write at this point. - return Ok(PermissionLevel::Write); - } - - // Overlay any user groups base on resource variant - let groups = get_user_user_groups(&user.id).await?; - for group in &groups { - if let Some(level) = group.all.get(&resource_variant).cloned() { - if level > base { - base = level; - } - } - } - if base == PermissionLevel::Write { - // No reason to keep going if already Write at this point. - return Ok(PermissionLevel::Write); - } - - // Overlay any specific permissions - let permission = find_collect( - &db_client().await.permissions, - doc! { - "$or": user_target_query(&user.id, &groups)?, - "resource_target.type": resource_variant.as_ref(), - "resource_target.id": resource_id - }, - None, - ) - .await - .context("failed to query db for permissions")? - .into_iter() - // get the max permission user has between personal / any user groups - .fold(base, |level, permission| { - if permission.level > level { - permission.level - } else { - level - } - }); - Ok(permission) -} - -/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access). -#[instrument(level = "debug")] -pub async fn get_resource_ids_for_user( - user: &User, - resource_type: ResourceTargetVariant, -) -> anyhow::Result>> { - // Check admin or transparent mode - if user.admin || core_config().transparent_mode { - return Ok(None); - } - - // Check user 'all' on variant - if let Some(level) = user.all.get(&resource_type).cloned() { - if level > PermissionLevel::None { - return Ok(None); - } - } - - // Check user groups 'all' on variant - let groups = get_user_user_groups(&user.id).await?; - for group in &groups { - if let Some(level) = group.all.get(&resource_type).cloned() { - if level > PermissionLevel::None { - return Ok(None); - } - } - } - - // Get specific ids - let ids = find_collect( - &db_client().await.permissions, - doc! { - "$or": user_target_query(&user.id, &groups)?, - "resource_target.type": resource_type.as_ref(), - "level": { "$in": ["Read", "Execute", "Write"] } - }, - None, - ) - .await - .context("failed to query permissions on db")? - .into_iter() - .map(|p| p.resource_target.extract_variant_id().1.to_string()) - // collect into hashset first to remove any duplicates - .collect::>() - .into_iter() - .flat_map(|id| ObjectId::from_str(&id)) - .collect::>(); - - Ok(Some(ids)) } pub fn id_or_name_filter(id_or_name: &str) -> Document { diff --git a/bin/core/src/helpers/repo.rs b/bin/core/src/helpers/repo.rs new file mode 100644 index 000000000..13b440e75 --- /dev/null +++ b/bin/core/src/helpers/repo.rs @@ -0,0 +1,48 @@ +use async_timing_util::{wait_until_timelength, Timelength}; +use monitor_client::{ + api::write::RefreshRepoCache, entities::user::repo_user, +}; +use mungos::find::find_collect; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + state::{db_client, State}, +}; + +pub fn spawn_repo_refresh_loop() { + let interval: Timelength = core_config() + .repo_poll_interval + .try_into() + .expect("Invalid repo poll interval"); + tokio::spawn(async move { + refresh_repos().await; + loop { + wait_until_timelength(interval, 1000).await; + refresh_repos().await; + } + }); +} + +async fn refresh_repos() { + let Ok(repos) = find_collect(&db_client().await.repos, None, None) + .await + .inspect_err(|e| { + warn!("failed to get repos from db in refresh task | {e:#}") + }) + else { + return; + }; + for repo in repos { + State + .resolve( + RefreshRepoCache { repo: repo.id }, + repo_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("failed to refresh repo cache in refresh task | repo: {} | {e:#}", repo.name) + }) + .ok(); + } +} diff --git a/bin/core/src/helpers/stack/execute.rs b/bin/core/src/helpers/stack/execute.rs new file mode 100644 index 000000000..2ec955544 --- /dev/null +++ b/bin/core/src/helpers/stack/execute.rs @@ -0,0 +1,208 @@ +use monitor_client::{ + api::execute::*, + entities::{ + permission::PermissionLevel, + stack::{Stack, StackActionState}, + update::{Log, Update}, + user::User, + }, +}; +use periphery_client::{api::compose::*, PeripheryClient}; + +use crate::{ + helpers::{periphery_client, update::update_update}, + monitor::update_cache_for_server, + state::action_states, +}; + +use super::get_stack_and_server; + +pub trait ExecuteCompose { + type Extras; + + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + extras: Self::Extras, + ) -> anyhow::Result; +} + +pub async fn execute_compose( + stack: &str, + service: Option, + user: &User, + set_in_progress: impl Fn(&mut StackActionState), + mut update: Update, + extras: T::Extras, +) -> anyhow::Result { + let (stack, server) = + get_stack_and_server(stack, user, PermissionLevel::Execute, true) + .await?; + + // get the action state for the stack (or insert default). + let action_state = + action_states().stack.get_or_insert_default(&stack.id).await; + + // Will check to ensure stack not already busy before updating, and return Err if so. + // The returned guard will set the action state back to default when dropped. + let _action_guard = action_state.update(set_in_progress)?; + + let periphery = periphery_client(&server)?; + + if let Some(service) = &service { + update.logs.push(Log::simple( + &format!("Service: {service}"), + format!("Execution requested for service stack {service}"), + )) + } + + update + .logs + .push(T::execute(periphery, stack, service, extras).await?); + + // Ensure cached stack state up to date by updating server cache + update_cache_for_server(&server).await; + + update.finalize(); + update_update(update.clone()).await?; + + Ok(update) +} + +impl ExecuteCompose for StartStack { + type Extras = (); + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + _: Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!("start{service}"), + }) + .await + } +} + +impl ExecuteCompose for RestartStack { + type Extras = (); + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + _: Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!("restart{service}"), + }) + .await + } +} + +impl ExecuteCompose for PauseStack { + type Extras = (); + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + _: Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!("pause{service}"), + }) + .await + } +} + +impl ExecuteCompose for UnpauseStack { + type Extras = (); + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + _: Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!("unpause{service}"), + }) + .await + } +} + +impl ExecuteCompose for StopStack { + type Extras = Option; + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + timeout: Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + let maybe_timeout = maybe_timeout(timeout); + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!("stop{maybe_timeout}{service}"), + }) + .await + } +} + +impl ExecuteCompose for DestroyStack { + type Extras = (Option, bool); + async fn execute( + periphery: PeripheryClient, + stack: Stack, + service: Option, + (timeout, remove_orphans): Self::Extras, + ) -> anyhow::Result { + let service = service + .map(|service| format!(" {service}")) + .unwrap_or_default(); + let maybe_timeout = maybe_timeout(timeout); + let maybe_remove_orphans = if remove_orphans { + " --remove-orphans" + } else { + "" + }; + periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: format!( + "down{maybe_timeout}{maybe_remove_orphans}{service}" + ), + }) + .await + } +} + +pub fn maybe_timeout(timeout: Option) -> String { + if let Some(timeout) = timeout { + format!(" --timeout {timeout}") + } else { + String::new() + } +} diff --git a/bin/core/src/helpers/stack/json.rs b/bin/core/src/helpers/stack/json.rs new file mode 100644 index 000000000..e755209be --- /dev/null +++ b/bin/core/src/helpers/stack/json.rs @@ -0,0 +1,80 @@ +use anyhow::Context; +use formatting::format_serror; +use monitor_client::entities::stack::ComposeContents; +use run_command::async_run_command; +use tokio::fs; + +use crate::{config::core_config, helpers::random_string}; + +// Returns (Jsons, Errors) +pub async fn get_config_jsons( + contents: &[ComposeContents], +) -> (Vec, Vec) { + let mut oks = Vec::new(); + let mut errs = Vec::new(); + for contents in contents { + match get_config_json(&contents.contents).await { + (Some(json), _) => oks.push(ComposeContents { + path: contents.path.to_string(), + contents: json, + }), + (_, Some(err)) => errs.push(ComposeContents { + path: contents.path.to_string(), + contents: err, + }), + _ => unreachable!(), + } + } + (oks, errs) +} + +pub async fn get_config_json( + compose_contents: &str, +) -> (Option, Option) { + match get_config_json_inner(compose_contents).await { + Ok(res) => (Some(res), None), + Err(e) => ( + None, + Some(format_serror( + &e.context("failed to get config json").into(), + )), + ), + } +} + +async fn get_config_json_inner( + compose_contents: &str, +) -> anyhow::Result { + // create a new folder to prevent collisions + let dir = core_config().repo_directory.join(random_string(10)); + + fs::create_dir_all(&dir) + .await + .context("failed to create compose file directory")?; + let file = dir.join("compose.yaml"); + + fs::write(&file, compose_contents).await.with_context(|| { + format!("failed to write compose contents to file file: {file:?}") + })?; + + let res = async_run_command(&format!( + "cd {} && docker-compose config --format json", + dir.display() + )) + .await; + + // Don't fail the function call here, just log on this maintenance related information. + fs::remove_dir_all(&dir) + .await + .with_context(|| { + format!("failed to clean up compose directory: {dir:?}") + }) + .inspect_err(|e| error!("{e:#}")) + .ok(); + + if res.success() { + Ok(res.stdout) + } else { + Err(anyhow::Error::msg(res.stderr)) + } +} diff --git a/bin/core/src/helpers/stack/mod.rs b/bin/core/src/helpers/stack/mod.rs new file mode 100644 index 000000000..cf322b768 --- /dev/null +++ b/bin/core/src/helpers/stack/mod.rs @@ -0,0 +1,98 @@ +use anyhow::{anyhow, Context}; +use async_timing_util::{wait_until_timelength, Timelength}; +use monitor_client::{ + api::write::RefreshStackCache, + entities::{ + permission::PermissionLevel, + server::{Server, ServerState}, + stack::Stack, + user::{stack_user, User}, + }, +}; +use mungos::find::find_collect; +use regex::Regex; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + resource, + state::{db_client, State}, +}; + +use super::query::get_server_with_status; + +pub mod execute; +pub mod json; +pub mod remote; +pub mod services; + +pub fn spawn_stack_refresh_loop() { + let interval: Timelength = core_config() + .stack_poll_interval + .try_into() + .expect("Invalid stack poll interval"); + tokio::spawn(async move { + refresh_stacks().await; + loop { + wait_until_timelength(interval, 3000).await; + refresh_stacks().await; + } + }); +} + +async fn refresh_stacks() { + let Ok(stacks) = + find_collect(&db_client().await.stacks, None, None) + .await + .inspect_err(|e| { + warn!("failed to get stacks from db in refresh task | {e:#}") + }) + else { + return; + }; + for stack in stacks { + State + .resolve( + RefreshStackCache { stack: stack.id }, + stack_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("failed to refresh stack cache in refresh task | stack: {} | {e:#}", stack.name) + }) + .ok(); + } +} + +pub async fn get_stack_and_server( + stack: &str, + user: &User, + permission_level: PermissionLevel, + block_if_server_unreachable: bool, +) -> anyhow::Result<(Stack, Server)> { + let stack = resource::get_check_permissions::( + stack, + user, + permission_level, + ) + .await?; + + if stack.config.server_id.is_empty() { + return Err(anyhow!("Stack has no server configured")); + } + + let (server, status) = + get_server_with_status(&stack.config.server_id).await?; + if block_if_server_unreachable && status != ServerState::Ok { + return Err(anyhow!( + "cannot send action when server is unreachable or disabled" + )); + } + + Ok((stack, server)) +} + +pub fn compose_container_match_regex(container_name: &str) -> anyhow::Result { + let regex = format!("^{container_name}-?[0-9]*$"); + Regex::new(®ex).with_context(|| format!("failed to construct valid regex from {regex}")) +} \ No newline at end of file diff --git a/bin/core/src/helpers/stack/remote.rs b/bin/core/src/helpers/stack/remote.rs new file mode 100644 index 000000000..25a6cff51 --- /dev/null +++ b/bin/core/src/helpers/stack/remote.rs @@ -0,0 +1,117 @@ +use std::{fs, path::Path}; + +use anyhow::{anyhow, Context}; +use formatting::format_serror; +use monitor_client::entities::{ + stack::{ComposeContents, Stack}, + update::Log, + CloneArgs, +}; + +use crate::{ + config::core_config, + helpers::{git_token, random_string}, +}; + +/// Returns Result<(read paths, error paths, logs, short hash, commit message)> +pub async fn get_remote_compose_contents( + stack: &Stack, + // Collect any files which are missing in the repo. + mut missing_files: Option<&mut Vec>, +) -> anyhow::Result<( + // Successful contents + Vec, + // error contents + Vec, + // logs + Vec, + // commit short hash + Option, + // commit message + Option, +)> { + let repo_path = + core_config().repo_directory.join(random_string(10)); + + let (logs, hash, message) = clone_remote_repo(&repo_path, stack) + .await + .context("failed to clone stack repo")?; + + let run_directory = repo_path.join(&stack.config.run_directory); + + let mut oks = Vec::new(); + let mut errs = Vec::new(); + + for path in stack.file_paths() { + let file_path = run_directory.join(path); + if !file_path.exists() { + if let Some(missing_files) = &mut missing_files { + missing_files.push(path.to_string()); + } + } + // If file does not exist, will show up in err case so the log is handled + match fs::read_to_string(&file_path).with_context(|| { + format!("failed to read file contents from {file_path:?}") + }) { + Ok(contents) => oks.push(ComposeContents { + path: path.to_string(), + contents, + }), + Err(e) => errs.push(ComposeContents { + path: path.to_string(), + contents: format_serror(&e.into()), + }), + } + } + + if repo_path.exists() { + if let Err(e) = std::fs::remove_dir_all(&repo_path) { + warn!("failed to remove stack repo directory | {e:?}") + } + } + + Ok((oks, errs, logs, hash, message)) +} + +/// Returns (logs, hash, message) +pub async fn clone_remote_repo( + repo_path: &Path, + stack: &Stack, +) -> anyhow::Result<(Vec, Option, Option)> { + let mut clone_args: CloneArgs = stack.into(); + + let config = core_config(); + + let access_token = match (&clone_args.account, &clone_args.provider) + { + (None, _) => None, + (Some(_), None) => { + return Err(anyhow!( + "Account is configured, but provider is empty" + )) + } + (Some(username), Some(provider)) => { + git_token(provider, username, |https| { + clone_args.https = https + }) + .await + .with_context( + || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + )? + } + }; + + clone_args.destination = Some(repo_path.display().to_string()); + + git::clone( + clone_args, + &config.repo_directory, + access_token, + &[], + "", + None, + ) + .await + .context("failed to clone stack repo") + .map(|(a, b, c, _)| (a, b, c)) +} diff --git a/bin/core/src/helpers/stack/services.rs b/bin/core/src/helpers/stack/services.rs new file mode 100644 index 000000000..13d161796 --- /dev/null +++ b/bin/core/src/helpers/stack/services.rs @@ -0,0 +1,81 @@ +use anyhow::Context; +use monitor_client::entities::stack::{ + ComposeContents, ComposeFile, ComposeService, Stack, + StackServiceNames, +}; + +use crate::helpers::stack::remote::get_remote_compose_contents; + +/// Passing fresh will re-extract services from compose file, whether local or remote (repo) +pub async fn extract_services_from_stack( + stack: &Stack, + fresh: bool, +) -> anyhow::Result> { + if !fresh { + if let Some(services) = &stack.info.deployed_services { + return Ok(services.clone()); + } else { + return Ok(stack.info.latest_services.clone()); + } + } + + let compose_contents = if stack.config.file_contents.is_empty() { + let (contents, errors, _, _, _) = + get_remote_compose_contents(stack, None).await.context( + "failed to get remote compose files to extract services", + )?; + if !errors.is_empty() { + let mut e = anyhow::Error::msg("Trace root"); + for err in errors { + e = e.context(format!("{}: {}", err.path, err.contents)); + } + return Err( + e.context("Failed to read one or more remote compose files"), + ); + } + contents + } else { + vec![ComposeContents { + path: String::from("compose.yaml"), + contents: stack.config.file_contents.clone(), + }] + }; + + let mut res = Vec::new(); + for ComposeContents { path, contents } in &compose_contents { + extract_services_into_res( + &stack.project_name(true), + contents, + &mut res, + ) + .with_context(|| { + format!("failed to extract services from file at path: {path}") + })?; + } + + Ok(res) +} + +pub fn extract_services_into_res( + project_name: &str, + compose_contents: &str, + res: &mut Vec, +) -> anyhow::Result<()> { + let compose = serde_yaml::from_str::(compose_contents) + .context("failed to parse service names from compose contents")?; + + let services = compose.services.into_iter().map( + |(service_name, ComposeService { container_name, .. })| { + StackServiceNames { + container_name: container_name.unwrap_or_else(|| { + format!("{project_name}-{service_name}") + }), + service_name, + } + }, + ); + + res.extend(services); + + Ok(()) +} diff --git a/bin/core/src/helpers/sync/deploy.rs b/bin/core/src/helpers/sync/deploy.rs new file mode 100644 index 000000000..60fc5c03e --- /dev/null +++ b/bin/core/src/helpers/sync/deploy.rs @@ -0,0 +1,814 @@ +use std::{collections::HashMap, time::Duration}; + +use anyhow::{anyhow, Context}; +use formatting::{bold, colored, format_serror, muted, Color}; +use futures::future::join_all; +use monitor_client::{ + api::{ + execute::{Deploy, DeployStack}, + read::ListBuildVersions, + }, + entities::{ + deployment::{ + Deployment, DeploymentConfig, DeploymentImage, DeploymentState, + PartialDeploymentConfig, + }, + stack::{PartialStackConfig, Stack, StackConfig, StackState}, + sync::SyncDeployUpdate, + toml::ResourceToml, + update::{Log, ResourceTarget}, + user::sync_user, + }, +}; +use resolver_api::Resolve; + +use crate::{ + api::execute::ExecuteRequest, + config::core_config, + helpers::{ + random_string, stack::remote::clone_remote_repo, + update::init_execution_update, + }, + state::{deployment_status_cache, stack_status_cache, State}, +}; + +use super::resource::{AllResourcesById, ResourceSync}; + +/// All entries in here are due to be deployed, +/// after the given dependencies, +/// with the given reason. +pub type ToDeployCache = + Vec<(ResourceTarget, String, Vec)>; + +#[derive(Clone, Copy)] +pub struct SyncDeployParams<'a> { + pub deployments: &'a [ResourceToml], + // Names to deployments + pub deployment_map: &'a HashMap, + pub stacks: &'a [ResourceToml], + // Names to stacks + pub stack_map: &'a HashMap, + pub all_resources: &'a AllResourcesById, +} + +pub async fn deploy_from_cache( + mut to_deploy: ToDeployCache, + logs: &mut Vec, +) { + let mut log = format!( + "{}: running executions to sync deployment / stack state", + muted("INFO") + ); + let mut round = 1; + let user = sync_user(); + + while !to_deploy.is_empty() { + // Collect all waiting deployments without waiting dependencies. + let good_to_deploy = to_deploy + .iter() + .filter(|(_, _, after)| { + to_deploy + .iter() + .all(|(target, _, _)| !after.contains(target)) + }) + // The target / reason need the be cloned out to to_deploy is not borrowed from. + // to_deploy will be mutably accessed later. + .map(|(target, reason, _)| (target.clone(), reason.clone())) + .collect::>(); + + // Deploy the ones ready for deployment + let res = join_all(good_to_deploy.iter().map( + |(target, reason)| async move { + let res = async { + match &target { + ResourceTarget::Deployment(name) => { + let req = ExecuteRequest::Deploy(Deploy { + deployment: name.to_string(), + stop_signal: None, + stop_time: None, + }); + + let update = init_execution_update(&req, user).await?; + let ExecuteRequest::Deploy(req) = req else { + unreachable!() + }; + State.resolve(req, (user.to_owned(), update)).await + } + ResourceTarget::Stack(name) => { + let req = ExecuteRequest::DeployStack(DeployStack { + stack: name.to_string(), + stop_time: None, + }); + + let update = init_execution_update(&req, user).await?; + let ExecuteRequest::DeployStack(req) = req else { + unreachable!() + }; + State.resolve(req, (user.to_owned(), update)).await + } + _ => unreachable!(), + } + } + .await; + (target, reason, res) + }, + )) + .await; + + let mut has_error = false; + + // Log results of deploy + for (target, reason, res) in res { + let (resource, name) = target.extract_variant_id(); + if let Err(e) = res { + has_error = true; + log.push_str(&format!( + "\n{}: failed to deploy {resource} '{}' in round {} | {e:#}", + colored("ERROR", Color::Red), + bold(name), + bold(round) + )); + } else { + log.push_str(&format!( + "\n{}: deployed {resource} '{}' in round {} with reason: {reason}", + muted("INFO"), + bold(name), + bold(round) + )); + } + } + + // Early exit if any deploy has errors + if has_error { + log.push_str(&format!( + "\n{}: exited in round {} {}", + muted("INFO"), + bold(round), + colored("with errors", Color::Red) + )); + logs.push(Log::error("Sync Deploy", log)); + return; + } + + // Remove the deployed ones from 'to_deploy' + to_deploy + .retain(|(target, _, _)| !good_to_deploy.contains_key(target)); + + // If there must be another round, these are dependent on the first round. + // Sleep for 1s to allow for first round to startup + if !to_deploy.is_empty() { + // Increment the round + round += 1; + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + log.push_str(&format!( + "\n{}: finished after {} round{}", + muted("INFO"), + bold(round), + (round > 1).then_some("s").unwrap_or_default() + )); + + logs.push(Log::simple("Sync Deploy", log)); +} + +pub async fn get_updates_for_view( + params: SyncDeployParams<'_>, +) -> Option { + let inner = async { + let mut update = SyncDeployUpdate { + to_deploy: 0, + log: String::from("Deploy Updates\n-------------------\n"), + }; + let mut lines = Vec::::new(); + for (target, reason, after) in build_deploy_cache(params).await? { + update.to_deploy += 1; + let mut line = format!( + "{}: {}. reason: {reason}", + colored("Deploy", Color::Green), + bold(format!("{target:?}")), + ); + if !after.is_empty() { + line.push_str(&format!( + "\n{}: {}", + colored("After", Color::Blue), + after + .iter() + .map(|target| format!("{target:?}")) + .collect::>() + .join(", ") + )) + } + lines.push(line); + } + + update.log.push_str(&lines.join("\n-------------------\n")); + + anyhow::Ok((update.to_deploy > 0).then_some(update)) + }; + match inner.await { + Ok(res) => res, + Err(e) => Some(SyncDeployUpdate { + to_deploy: 0, + log: format_serror( + &e.context("failed to get deploy updates for view").into(), + ), + }), + } +} + +/// Entries are keyed by ResourceTargets wrapping "name" instead of "id". +/// If entry is None, it is confirmed no-deploy. +/// If it is Some, it is confirmed deploy with provided reason and dependencies. +/// +/// Used to build up resources to deploy earlier in the sync. +type ToDeployCacheInner = + HashMap)>>; + +/// Maps build ids to latest versions as string. +type BuildVersionCache = HashMap; + +pub async fn build_deploy_cache( + params: SyncDeployParams<'_>, +) -> anyhow::Result { + let mut cache = ToDeployCacheInner::new(); + let mut build_version_cache = BuildVersionCache::new(); + + // Just ensure they are all in the cache by looping through them all + for deployment in params.deployments { + build_cache_for_deployment( + deployment, + params, + &mut cache, + &mut build_version_cache, + ) + .await?; + } + for stack in params.stacks { + build_cache_for_stack( + stack, + params, + &mut cache, + &mut build_version_cache, + ) + .await?; + } + + let cache = cache + .into_iter() + .filter_map(|(target, entry)| { + let (reason, after) = entry?; + Some((target, (reason, after))) + }) + .collect::>(); + + // Have to clone here to use it after 'into_iter' below. + // All entries in cache at this point are deploying. + let clone = cache.clone(); + + Ok( + cache + .into_iter() + .map(|(target, (reason, mut after))| { + // Only keep targets which are deploying. + after.retain(|target| clone.contains_key(target)); + (target, reason, after) + }) + .collect(), + ) +} + +type BuildRes<'a> = std::pin::Pin< + Box< + dyn std::future::Future> + Send + 'a, + >, +>; + +fn build_cache_for_deployment<'a>( + deployment: &'a ResourceToml, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }: SyncDeployParams<'a>, + cache: &'a mut ToDeployCacheInner, + build_version_cache: &'a mut BuildVersionCache, +) -> BuildRes<'a> { + Box::pin(async move { + let target = ResourceTarget::Deployment(deployment.name.clone()); + + // First check existing, and continue if already handled. + if cache.contains_key(&target) { + return Ok(()); + } + + // Check if deployment doesn't have "deploy" enabled. + if !deployment.deploy { + cache.insert(target, None); + return Ok(()); + } + + let after = get_after_as_resource_targets( + &deployment.name, + &deployment.after, + deployment_map, + deployments, + stack_map, + stacks, + )?; + + let Some(original) = deployment_map.get(&deployment.name) else { + // This block is the None case, deployment is not created, should definitely deploy + cache.insert( + target, + Some((String::from("deploy on creation"), after)), + ); + return Ok(()); + }; + + let status = &deployment_status_cache() + .get_or_insert_default(&original.id) + .await + .curr; + let state = status.state; + + match state { + DeploymentState::Unknown => { + // Can't do anything with unknown state + cache.insert(target, None); + return Ok(()); + } + DeploymentState::Running => { + // Here can diff the changes, to see if they merit a redeploy. + + // First merge toml resource config (partial) onto default resource config. + // Makes sure things that aren't defined in toml (come through as None) actually get removed. + let config: DeploymentConfig = + deployment.config.clone().into(); + let mut config: PartialDeploymentConfig = config.into(); + + Deployment::validate_partial_config(&mut config); + + let mut diff = Deployment::get_diff( + original.config.clone(), + config, + all_resources, + )?; + + Deployment::validate_diff(&mut diff); + // Needs to only check config fields that affect docker run + let changed = diff.server_id.is_some() + || diff.image.is_some() + || diff.image_registry_account.is_some() + || diff.skip_secret_interp.is_some() + || diff.network.is_some() + || diff.restart.is_some() + || diff.command.is_some() + || diff.extra_args.is_some() + || diff.ports.is_some() + || diff.volumes.is_some() + || diff.environment.is_some() + || diff.labels.is_some(); + if changed { + cache.insert( + target, + Some(( + String::from("deployment config has changed"), + after, + )), + ); + return Ok(()); + } + } + // All other cases will require Deploy to enter Running state. + _ => { + cache.insert( + target, + Some(( + format!( + "deployment has {} state", + colored(state, Color::Red) + ), + after, + )), + ); + return Ok(()); + } + }; + + // We know the config hasn't changed at this point, but still need + // to check if attached build has updated. Can check original for this (know it hasn't changed) + if let DeploymentImage::Build { build_id, version } = + &original.config.image + { + // check if version is none, ie use latest build + if !version.is_none() { + let deployed_version = status + .container + .as_ref() + .and_then(|c| c.image.split(':').last()) + .unwrap_or("0.0.0"); + match build_version_cache.get(build_id) { + Some(version) if deployed_version != version => { + cache.insert( + target, + Some(( + format!("build has new version: {version}"), + after, + )), + ); + return Ok(()); + } + // Build version is the same, still need to check 'after' + Some(_) => {} + None => { + let Some(version) = State + .resolve( + ListBuildVersions { + build: build_id.to_string(), + limit: Some(1), + ..Default::default() + }, + sync_user().to_owned(), + ) + .await + .context("failed to get build versions")? + .pop() + else { + // The build has never been built. + // Skip deploy regardless of 'after' (it can't be deployed) + // Not sure how this would be reached on Running deployment... + cache.insert(target, None); + return Ok(()); + }; + let version = version.version.to_string(); + build_version_cache + .insert(build_id.to_string(), version.clone()); + if deployed_version != version { + // Same as 'Some' case out of the cache + cache.insert( + target, + Some(( + format!("build has new version: {version}"), + after, + )), + ); + return Ok(()); + } + } + } + } + }; + + // Check 'after' to see if they deploy. + insert_target_using_after_list( + target, + after, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }, + cache, + build_version_cache, + ) + .await + }) +} + +fn build_cache_for_stack<'a>( + stack: &'a ResourceToml, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }: SyncDeployParams<'a>, + cache: &'a mut ToDeployCacheInner, + build_version_cache: &'a mut BuildVersionCache, +) -> BuildRes<'a> { + Box::pin(async move { + let target = ResourceTarget::Stack(stack.name.clone()); + + // First check existing, and continue if already handled. + if cache.contains_key(&target) { + return Ok(()); + } + + // Check if stack doesn't have "deploy" enabled. + if !stack.deploy { + cache.insert(target, None); + return Ok(()); + } + + let after = get_after_as_resource_targets( + &stack.name, + &stack.after, + deployment_map, + deployments, + stack_map, + stacks, + )?; + + let Some(original) = stack_map.get(&stack.name) else { + // This block is the None case, deployment is not created, should definitely deploy + cache.insert( + target, + Some((String::from("deploy on creation"), after)), + ); + return Ok(()); + }; + + let status = &stack_status_cache() + .get_or_insert_default(&original.id) + .await + .curr; + let state = status.state; + + match state { + StackState::Unknown => { + // Can't do anything with unknown state + cache.insert(target, None); + return Ok(()); + } + StackState::Running => { + // Here can diff the changes, to see if they merit a redeploy. + + // First merge toml resource config (partial) onto default resource config. + // Makes sure things that aren't defined in toml (come through as None) actually get removed. + let config: StackConfig = stack.config.clone().into(); + let mut config: PartialStackConfig = config.into(); + + Stack::validate_partial_config(&mut config); + + let mut diff = Stack::get_diff( + original.config.clone(), + config, + all_resources, + )?; + + Stack::validate_diff(&mut diff); + // Needs to only check config fields that affect docker compose command + let changed = diff.server_id.is_some() + || diff.project_name.is_some() + || diff.run_directory.is_some() + || diff.file_paths.is_some() + || diff.file_contents.is_some() + || diff.skip_secret_interp.is_some() + || diff.extra_args.is_some() + || diff.environment.is_some() + || diff.env_file_path.is_some() + || diff.repo.is_some() + || diff.branch.is_some() + || diff.commit.is_some(); + if changed { + cache.insert( + target, + Some((String::from("stack config has changed"), after)), + ); + return Ok(()); + } + } + // All other cases will require Deploy to enter Running state. + _ => { + cache.insert( + target, + Some(( + format!("stack has {} state", colored(state, Color::Red)), + after, + )), + ); + return Ok(()); + } + }; + + // We know the config hasn't changed at this point, but still need + // to check if its a repo based stack, and the hash has updated. + // Can use 'original' for this (config hasn't changed) + if stack.latest_hash { + if let Some(deployed_hash) = &original.info.deployed_hash { + let repo_path = + core_config().repo_directory.join(random_string(10)); + let (_, hash, _) = clone_remote_repo(&repo_path, original) + .await + .context("failed to get latest hash for repo based stack") + .with_context(|| { + format!( + "Stack {} {}", + bold(&stack.name), + colored("has errors", Color::Red) + ) + })?; + if let Some(hash) = hash { + if &hash != deployed_hash { + cache.insert( + target, + Some(( + format!( + "outdated hash. deployed: {} -> latest: {}", + colored(deployed_hash, Color::Red), + colored(hash, Color::Green) + ), + after, + )), + ); + return Ok(()); + } + } + } + } + + // Check 'after' to see if they deploy. + insert_target_using_after_list( + target, + after, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }, + cache, + build_version_cache, + ) + .await + }) +} + +async fn insert_target_using_after_list<'a>( + target: ResourceTarget, + after: Vec, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }: SyncDeployParams<'a>, + cache: &'a mut ToDeployCacheInner, + build_version_cache: &'a mut BuildVersionCache, +) -> anyhow::Result<()> { + for parent in &after { + match cache.get(parent) { + Some(Some(_)) => { + // a parent will deploy + let (variant, name) = parent.extract_variant_id(); + cache.insert( + target.to_owned(), + Some(( + format!( + "{variant} parent dependency '{}' is deploying", + bold(name) + ), + after, + )), + ); + return Ok(()); + } + // The parent will not deploy, do nothing here. + Some(None) => {} + None => { + match parent { + ResourceTarget::Deployment(name) => { + let Some(parent_deployment) = + deployments.iter().find(|d| &d.name == name) + else { + // The parent is not in the sync, so won't be deploying + // Note that cross-sync deploy dependencies are not currently supported. + continue; + }; + // Recurse to add the parent to cache, then check again. + build_cache_for_deployment( + parent_deployment, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }, + cache, + build_version_cache, + ) + .await?; + match cache.get(parent) { + Some(Some(_)) => { + // Same as the 'Some' case above + let (variant, name) = parent.extract_variant_id(); + cache.insert( + target.to_owned(), + Some(( + format!( + "{variant} parent dependency '{}' is deploying", + bold(name) + ), + after, + )), + ); + return Ok(()); + }, + // The parent will not deploy, do nothing here. + Some(None) => {}, + None => return Err(anyhow!("Did not find parent in cache after build recursion. This should not happen.")) + } + } + ResourceTarget::Stack(name) => { + let Some(parent_stack) = + stacks.iter().find(|d| &d.name == name) + else { + // The parent is not in the sync, so won't be deploying + // Note that cross-sync deploy dependencies are not currently supported. + continue; + }; + // Recurse to add the parent to cache, then check again. + build_cache_for_stack( + parent_stack, + SyncDeployParams { + deployments, + deployment_map, + stacks, + stack_map, + all_resources, + }, + cache, + build_version_cache, + ) + .await?; + match cache.get(parent) { + Some(Some(_)) => { + // Same as the 'Some' case above + let (variant, name) = parent.extract_variant_id(); + cache.insert( + target.to_owned(), + Some(( + format!( + "{variant} parent dependency '{}' is deploying", + bold(name) + ), + after, + )), + ); + return Ok(()); + }, + // The parent will not deploy, do nothing here. + Some(None) => {}, + None => return Err(anyhow!("Did not find parent in cache after build recursion. This should not happen.")) + } + } + _ => unreachable!(), + } + } + } + } + + // If it has reached here, its not deploying + cache.insert(target, None); + Ok(()) +} + +fn get_after_as_resource_targets( + resource_name: &str, + after: &[String], + // Names to deployments + deployment_map: &HashMap, + deployments: &[ResourceToml], + // Names to stacks + stack_map: &HashMap, + stacks: &[ResourceToml], +) -> anyhow::Result> { + after + .iter() + .map(|name| match deployment_map.get(name) { + Some(_) => Ok(ResourceTarget::Deployment(name.clone())), + None => { + if deployments + .iter() + .any(|deployment| deployment.name.as_str() == resource_name) + { + Ok(ResourceTarget::Deployment(name.clone())) + } else { + match stack_map.get(name) { + Some(_) => Ok(ResourceTarget::Stack(name.clone())), + None => { + if stacks + .iter() + .any(|stack| stack.name.as_str() == resource_name) + { + Ok(ResourceTarget::Stack(name.clone())) + } else { + Err(anyhow!("failed to match deploy dependency in 'after' list | resource: {resource_name} | dependency: {name}")) + } + } + } + } + } + }) + .collect() +} diff --git a/bin/core/src/helpers/sync/deployment.rs b/bin/core/src/helpers/sync/deployment.rs deleted file mode 100644 index a4ece2a19..000000000 --- a/bin/core/src/helpers/sync/deployment.rs +++ /dev/null @@ -1,858 +0,0 @@ -use std::{collections::HashMap, time::Duration}; - -use anyhow::Context; -use formatting::{bold, colored, muted, Color}; -use futures::future::join_all; -use monitor_client::{ - api::{execute::Deploy, read::ListBuildVersions}, - entities::{ - deployment::{ - Deployment, DeploymentConfig, DeploymentImage, DeploymentState, - PartialDeploymentConfig, - }, - sync::SyncUpdate, - tag::Tag, - toml::ResourceToml, - update::{Log, ResourceTarget}, - user::sync_user, - }, -}; -use mungos::find::find_collect; -use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff}; -use resolver_api::Resolve; - -use crate::{ - api::execute::ExecuteRequest, - helpers::update::init_execution_update, - resource::MonitorResource, - state::{deployment_status_cache, State}, -}; - -use super::resource::{ - run_update_description, run_update_tags, AllResourcesById, - ResourceSync, -}; - -pub type ToUpdate = Vec; -pub type ToCreate = Vec>; -/// Vec of resource names -pub type ToDelete = Vec; - -type UpdatesResult = (ToCreate, ToUpdate, ToDelete); - -pub struct ToUpdateItem { - pub id: String, - pub resource: ResourceToml, - pub update_description: bool, - pub update_tags: bool, - pub deploy: bool, -} - -/// Turns all the diffs into a readable string -pub async fn get_updates_for_view( - resources: Vec>, - delete: bool, - all_resources: &AllResourcesById, - id_to_tags: &HashMap, -) -> anyhow::Result> { - let map = find_collect(Deployment::coll().await, None, None) - .await - .context("failed to get deployments from db")? - .into_iter() - .map(|r| (r.name.clone(), r)) - .collect::>(); - - let mut update = SyncUpdate { - log: format!("{} Updates", Deployment::resource_type()), - ..Default::default() - }; - - let mut to_delete = Vec::::new(); - if delete { - for resource in map.values() { - if !resources.iter().any(|r| r.name == resource.name) { - update.to_delete += 1; - to_delete.push(resource.name.clone()) - } - } - } - - let mut to_deploy_cache = HashMap::::new(); - let mut to_deploy_build_cache = HashMap::::new(); - - for mut resource in resources.clone() { - match map.get(&resource.name) { - Some(original) => { - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: DeploymentConfig = resource.config.into(); - resource.config = config.into(); - - Deployment::validate_partial_config(&mut resource.config); - - let mut diff = Deployment::get_diff( - original.config.clone(), - resource.config, - all_resources, - )?; - - Deployment::validate_diff(&mut diff); - - let original_tags = original - .tags - .iter() - .filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone())) - .collect::>(); - - let (to_deploy, state, reason) = extract_to_deploy_and_state( - all_resources, - &map, - &resources, - resource.name.clone(), - &mut to_deploy_cache, - &mut to_deploy_build_cache, - ) - .await?; - - // Only proceed if there are any fields to update, - // or a change to tags / description - if diff.is_none() - && !to_deploy - && resource.description == original.description - && resource.tags == original_tags - { - if state == DeploymentState::Unknown { - update.log.push_str(&format!( - "\n\n{}: {}: '{}'\nDeployment sync actions could not be computed due to Unknown deployment state\n-------------------", - colored("ERROR", Color::Red), - Deployment::resource_type(), - bold(&resource.name) - )); - } - continue; - } - - update.to_update += 1; - - update.log.push_str(&format!( - "\n\n{}: {}: '{}'\n-------------------", - colored("UPDATE", Color::Blue), - Deployment::resource_type(), - bold(&resource.name) - )); - - let mut lines = Vec::::new(); - if resource.description != original.description { - lines.push(format!( - "{}: 'description'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(&original.description, Color::Red), - muted("to"), - colored(&resource.description, Color::Green) - )); - } - if resource.tags != original_tags { - let from = - colored(&format!("{:?}", original_tags), Color::Red); - let to = - colored(&format!("{:?}", resource.tags), Color::Green); - lines.push(format!( - "{}: 'tags'\n{}: {from}\n{}: {to}", - muted("field"), - muted("from"), - muted("to"), - )); - } - lines.extend(diff.iter_field_diffs().map( - |FieldDiff { field, from, to }| { - format!( - "{}: '{field}'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(from, Color::Red), - muted("to"), - colored(to, Color::Green) - ) - }, - )); - if state == DeploymentState::Unknown { - lines.push(format!( - "{}: Deployment sync actions {} due to Unknown deployment state", - colored("ERROR", Color::Red), - bold("could not be computed") - )); - } else if to_deploy { - let mut line = if state == DeploymentState::Running { - format!( - "{}: {reason}, {}", - muted("deploy"), - bold("sync will trigger deploy") - ) - } else { - format!( - "{}: deployment is currently in {} state, {}", - muted("deploy"), - colored(&state.to_string(), Color::Red), - bold("sync will trigger deploy") - ) - }; - if !resource.after.is_empty() { - line.push_str(&format!( - "\n{}: {:?}", - muted("deploy after"), - resource.after - )); - } - lines.push(line); - } - update.log.push('\n'); - update.log.push_str(&lines.join("\n-------------------\n")); - } - None => { - update.to_create += 1; - let mut lines = vec![ - format!( - "{}: {}", - muted("description"), - resource.description, - ), - format!("{}: {:?}", muted("tags"), resource.tags,), - format!( - "{}: {}", - muted("config"), - serde_json::to_string_pretty(&resource.config) - .context("failed to serialize config to json")? - ), - ]; - if resource.deploy { - lines.push(format!( - "{}: {}", - muted("will deploy"), - colored("true", Color::Green) - )); - if !resource.after.is_empty() { - lines.push(format!( - "{}: {:?}", - muted("deploy after"), - resource.after - )); - } - } - update.log.push_str(&format!( - "\n\n{}: {}: {}\n{}", - colored("CREATE", Color::Green), - Deployment::resource_type(), - bold(&resource.name), - lines.join("\n") - )) - } - } - } - - for name in to_delete { - update.log.push_str(&format!( - "\n\n{}: {}: '{}'\n-------------------", - colored("DELETE", Color::Red), - Deployment::resource_type(), - bold(&name) - )); - } - - let any_change = update.to_create > 0 - || update.to_update > 0 - || update.to_delete > 0; - - Ok(any_change.then_some(update)) -} - -/// Gets all the resources to update. For use in sync execution. -pub async fn get_updates_for_execution( - resources: Vec>, - delete: bool, - all_resources: &AllResourcesById, - id_to_tags: &HashMap, -) -> anyhow::Result { - let map = find_collect(Deployment::coll().await, None, None) - .await - .context("failed to get deployments from db")? - .into_iter() - .map(|r| (r.name.clone(), r)) - .collect::>(); - - let mut to_create = ToCreate::new(); - let mut to_update = ToUpdate::new(); - let mut to_delete = ToDelete::new(); - - if delete { - for resource in map.values() { - if !resources.iter().any(|r| r.name == resource.name) { - to_delete.push(resource.name.clone()); - } - } - } - - let mut to_deploy_cache = HashMap::::new(); - let mut to_deploy_build_cache = HashMap::::new(); - - for mut resource in resources.clone() { - match map.get(&resource.name) { - Some(original) => { - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: DeploymentConfig = resource.config.into(); - resource.config = config.into(); - - Deployment::validate_partial_config(&mut resource.config); - - let mut diff = Deployment::get_diff( - original.config.clone(), - resource.config, - all_resources, - )?; - - Deployment::validate_diff(&mut diff); - - let original_tags = original - .tags - .iter() - .filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone())) - .collect::>(); - - let (to_deploy, _state, _reason) = - extract_to_deploy_and_state( - all_resources, - &map, - &resources, - resource.name.clone(), - &mut to_deploy_cache, - &mut to_deploy_build_cache, - ) - .await?; - - // Only proceed if there are any fields to update, - // or a change to tags / description - if diff.is_none() - && !to_deploy - && resource.description == original.description - && resource.tags == original_tags - { - continue; - } - - // Minimizes updates through diffing. - resource.config = diff.into(); - - let update = ToUpdateItem { - id: original.id.clone(), - update_description: resource.description - != original.description, - update_tags: resource.tags != original_tags, - resource, - deploy: to_deploy, - }; - - to_update.push(update); - } - None => to_create.push(resource), - } - } - - Ok((to_create, to_update, to_delete)) -} - -type Res<'a> = std::pin::Pin< - Box< - dyn std::future::Future< - Output = anyhow::Result<(bool, DeploymentState, String)>, - > + Send - + 'a, - >, ->; - -fn extract_to_deploy_and_state<'a>( - all_resources: &'a AllResourcesById, - map: &'a HashMap, - resources: &'a [ResourceToml], - name: String, - // name to 'to_deploy' - cache: &'a mut HashMap, - // build id to latest built version string - build_cache: &'a mut HashMap, -) -> Res<'a> { - Box::pin(async move { - let mut reason = String::new(); - let Some(deployment) = resources.iter().find(|r| r.name == name) - else { - // this case should be unreachable, the names come off of a loop over resources - cache.insert(name, false); - return Ok((false, DeploymentState::Unknown, reason)); - }; - if deployment.deploy { - let Some(original) = map.get(&name) else { - // not created, definitely deploy - cache.insert(name, true); - // Don't need reason here, will be populated automatically - return Ok((true, DeploymentState::NotDeployed, reason)); - }; - - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: DeploymentConfig = deployment.config.clone().into(); - let mut config: PartialDeploymentConfig = config.into(); - - Deployment::validate_partial_config(&mut config); - - let mut diff = Deployment::get_diff( - original.config.clone(), - config, - all_resources, - )?; - - Deployment::validate_diff(&mut diff); - - let status = &deployment_status_cache() - .get_or_insert_default(&original.id) - .await - .curr; - let state = status.state; - - let mut to_deploy = match state { - DeploymentState::Unknown => false, - DeploymentState::Running => { - // Needs to only check config fields that affect docker run - let changed = diff.server_id.is_some() - || diff.image.is_some() - || diff.image_registry_account.is_some() - || diff.skip_secret_interp.is_some() - || diff.network.is_some() - || diff.restart.is_some() - || diff.command.is_some() - || diff.extra_args.is_some() - || diff.ports.is_some() - || diff.volumes.is_some() - || diff.environment.is_some() - || diff.labels.is_some(); - if changed { - reason = String::from("deployment config has changed") - } - changed - } - // All other cases will require Deploy to enter Running state. - // Don't need reason here as this case is handled outside, using returned state. - _ => true, - }; - - // Check if build attached, version latest, and there is a new build. - if !to_deploy { - // only need to check original, if diff.image was Some, to_deploy would be true. - if let DeploymentImage::Build { build_id, version } = - &original.config.image - { - // check if version is none, ie use latest build - if version.is_none() { - let deployed_version = status - .container - .as_ref() - .and_then(|c| c.image.split(':').last()) - .unwrap_or("0.0.0"); - match build_cache.get(build_id) { - Some(version) if deployed_version != version => { - to_deploy = true; - reason = format!( - "attached build has new version ({version})" - ); - } - Some(_) => {} - None => { - let Some(version) = State - .resolve( - ListBuildVersions { - build: build_id.to_string(), - limit: Some(1), - ..Default::default() - }, - sync_user().to_owned(), - ) - .await - .context("failed to get build versions")? - .pop() - else { - // this case shouldn't ever happen, how would deployment be deployed if build was never built? - return Ok(( - false, - DeploymentState::NotDeployed, - reason, - )); - }; - let version = version.version.to_string(); - build_cache - .insert(build_id.to_string(), version.clone()); - if deployed_version != version { - to_deploy = true; - reason = format!( - "attached build has new version ({version})" - ); - } - } - }; - } - } - } - - // Still need to check 'after' if they need deploy - if !to_deploy { - for name in &deployment.after { - match cache.get(name) { - Some(will_deploy) if *will_deploy => { - to_deploy = true; - reason = format!( - "parent dependency '{}' is deploying", - bold(name) - ); - break; - } - Some(_) => {} - None => { - let (will_deploy, _, _) = extract_to_deploy_and_state( - all_resources, - map, - resources, - name.to_string(), - cache, - build_cache, - ) - .await?; - if will_deploy { - to_deploy = true; - reason = format!( - "parent dependency '{}' is deploying", - bold(name) - ); - break; - } - } - } - } - } - - cache.insert(name, to_deploy); - Ok((to_deploy, state, reason)) - } else { - // The state in this case doesn't matter and won't be read (as long as it isn't 'Unknown' which will log in all cases) - cache.insert(name, false); - Ok((false, DeploymentState::NotDeployed, reason)) - } - }) -} - -pub async fn run_updates( - to_create: ToCreate, - to_update: ToUpdate, - to_delete: ToDelete, -) -> Option> { - if to_create.is_empty() - && to_update.is_empty() - && to_delete.is_empty() - { - return None; - } - - let mut has_error = false; - let mut log = String::new(); - - // Collect all the deployment names that need to be deployed - // and their 'after' dependencies - let mut to_deploy = Vec::<(String, Vec)>::new(); - - for resource in to_create { - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - let id = match crate::resource::create::( - &resource.name, - resource.config, - sync_user(), - ) - .await - { - Ok(resource) => resource.id, - Err(e) => { - has_error = true; - log.push_str(&format!( - "\n{}: failed to create {} '{}' | {e:#}", - colored("ERROR", Color::Red), - Deployment::resource_type(), - bold(&name) - )); - continue; - } - }; - run_update_tags::( - id.clone(), - &name, - tags, - &mut log, - &mut has_error, - ) - .await; - run_update_description::( - id, - &name, - description, - &mut log, - &mut has_error, - ) - .await; - log.push_str(&format!( - "\n{}: {} {} '{}'", - muted("INFO"), - colored("created", Color::Green), - Deployment::resource_type(), - bold(&name) - )); - if resource.deploy { - to_deploy.push((resource.name, resource.after)); - } - } - - for ToUpdateItem { - id, - resource, - update_description, - update_tags, - deploy, - } in to_update - { - // Update resource - let name = resource.name.clone(); - let tags = resource.tags.clone(); - let description = resource.description.clone(); - - if update_description { - run_update_description::( - id.clone(), - &name, - description, - &mut log, - &mut has_error, - ) - .await; - } - - if update_tags { - run_update_tags::( - id.clone(), - &name, - tags, - &mut log, - &mut has_error, - ) - .await; - } - - let mut config_update_error = false; - if !resource.config.is_none() { - if let Err(e) = crate::resource::update::( - &id, - resource.config, - sync_user(), - ) - .await - { - has_error = true; - config_update_error = true; - log.push_str(&format!( - "\n{}: failed to update config on {} '{}' | {e:#}", - colored("ERROR", Color::Red), - Deployment::resource_type(), - bold(&name), - )) - } else { - log.push_str(&format!( - "\n{}: {} {} '{}' configuration", - muted("INFO"), - colored("updated", Color::Blue), - Deployment::resource_type(), - bold(&name) - )); - } - } - - if !config_update_error && deploy { - to_deploy.push((resource.name, resource.after)); - } - } - - for resource in to_delete { - if let Err(e) = - crate::resource::delete::(&resource, sync_user()) - .await - { - has_error = true; - log.push_str(&format!( - "\n{}: failed to delete {} '{}' | {e:#}", - colored("ERROR", Color::Red), - Deployment::resource_type(), - bold(&resource), - )) - } else { - log.push_str(&format!( - "\n{}: {} {} '{}'", - muted("INFO"), - colored("deleted", Color::Red), - Deployment::resource_type(), - bold(&resource) - )); - } - } - - let mut logs = Vec::with_capacity(1); - - let stage = format!("Update {}s", Deployment::resource_type()); - if has_error { - let log = format!( - "running updates on {}s{log}", - Deployment::resource_type() - ); - logs.push(Log::error(&stage, log)); - return Some(logs); - } else if !log.is_empty() { - let log = format!( - "running updates on {}s{log}", - Deployment::resource_type() - ); - logs.push(Log::simple(&stage, log)); - } - - if to_deploy.is_empty() { - return Some(logs); - } - - let mut log = format!( - "{}: running executions to sync deployment state", - muted("INFO") - ); - let mut round = 1; - - while !to_deploy.is_empty() { - // Collect all waiting deployments without waiting dependencies. - let good_to_deploy = to_deploy - .iter() - .filter(|(_, after)| { - to_deploy.iter().all(|(name, _)| !after.contains(name)) - }) - .map(|(name, _)| name.clone()) - .collect::>(); - - // Deploy the ones ready for deployment - let res = - join_all(good_to_deploy.iter().map(|name| async move { - let res = async { - let req = ExecuteRequest::Deploy(Deploy { - deployment: name.to_string(), - stop_signal: None, - stop_time: None, - }); - let user = sync_user(); - let update = init_execution_update(&req, user).await?; - let ExecuteRequest::Deploy(req) = req else { - unreachable!() - }; - State.resolve(req, (user.to_owned(), update)).await - } - .await; - (name, res) - })) - .await; - - // Log results of deploy - for (name, res) in res { - if let Err(e) = res { - has_error = true; - log.push_str(&format!( - "\n{}: failed to deploy '{}' in round {} | {e:#}", - colored("ERROR", Color::Red), - bold(name), - bold(round) - )); - } else { - log.push_str(&format!( - "\n{}: deployed '{}' in round {}", - muted("INFO"), - bold(name), - bold(round) - )); - } - } - - // Early exit if any deploy has errors - if has_error { - log.push_str(&format!( - "\n{}: exited in round {} {}", - muted("INFO"), - bold(round), - colored("with errors", Color::Red) - )); - logs.push(Log::error("Sync Deployment State", log)); - return Some(logs); - } - - // Remove the deployed ones from 'to_deploy' - to_deploy.retain(|(name, _)| !good_to_deploy.contains(name)); - - // If there must be another round, these are dependent on the first round. - // Sleep for 1s to allow for first round to startup - if !to_deploy.is_empty() { - // Increment the round - round += 1; - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - - log.push_str(&format!( - "\n{}: finished after {} round{}", - muted("INFO"), - bold(round), - (round > 1).then_some("s").unwrap_or_default() - )); - - logs.push(Log::simple("Sync Deployment State", log)); - - Some(logs) -} - -impl ResourceSync for Deployment { - fn resource_target(id: String) -> ResourceTarget { - ResourceTarget::Deployment(id) - } - - fn get_diff( - mut original: Self::Config, - update: Self::PartialConfig, - resources: &AllResourcesById, - ) -> anyhow::Result { - // need to replace the server id with name - original.server_id = resources - .servers - .get(&original.server_id) - .map(|s| s.name.clone()) - .unwrap_or_default(); - - // need to replace the build id with name - if let DeploymentImage::Build { build_id, version } = - &original.image - { - original.image = DeploymentImage::Build { - build_id: resources - .builds - .get(build_id) - .map(|b| b.name.clone()) - .unwrap_or_default(), - version: *version, - }; - } - - Ok(original.partial_diff(update)) - } -} diff --git a/bin/core/src/helpers/sync/file.rs b/bin/core/src/helpers/sync/file.rs index 7e761044c..3ad9b13e7 100644 --- a/bin/core/src/helpers/sync/file.rs +++ b/bin/core/src/helpers/sync/file.rs @@ -45,15 +45,17 @@ fn read_resources_recursive( resources.servers.extend(more.servers); resources.deployments.extend(more.deployments); + resources.stacks.extend(more.stacks); resources.builds.extend(more.builds); resources.repos.extend(more.repos); resources.procedures.extend(more.procedures); - resources.builders.extend(more.builders); resources.alerters.extend(more.alerters); + resources.builders.extend(more.builders); resources.server_templates.extend(more.server_templates); resources.resource_syncs.extend(more.resource_syncs); resources.user_groups.extend(more.user_groups); resources.variables.extend(more.variables); + Ok(()) } else if res.is_dir() { let directory = fs::read_dir(path) diff --git a/bin/core/src/helpers/sync/mod.rs b/bin/core/src/helpers/sync/mod.rs index 4c7360216..284c7db54 100644 --- a/bin/core/src/helpers/sync/mod.rs +++ b/bin/core/src/helpers/sync/mod.rs @@ -5,40 +5,57 @@ use monitor_client::{ use mungos::find::find_collect; use resolver_api::Resolve; -use crate::state::{db_client, State}; +use crate::{ + config::core_config, + state::{db_client, State}, +}; -pub mod deployment; +// pub mod deployment; pub mod remote; pub mod resource; pub mod user_groups; pub mod variables; +pub mod deploy; mod file; mod resources; pub fn spawn_sync_refresh_loop() { + let interval: Timelength = core_config() + .sync_poll_interval + .try_into() + .expect("Invalid sync poll interval"); tokio::spawn(async move { - let db = db_client().await; - let user = sync_user(); + refresh_syncs().await; loop { - wait_until_timelength(Timelength::FiveMinutes, 0).await; - let Ok(syncs) = find_collect(&db.resource_syncs, None, None) - .await - .inspect_err(|e| warn!("failed to get resource syncs from db in refresh task | {e:#}")) else { - continue; - }; - for sync in syncs { - State - .resolve( - RefreshResourceSyncPending { sync: sync.id }, - user.clone(), - ) - .await - .inspect_err(|e| { - warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name) - }) - .ok(); - } + wait_until_timelength(interval, 0).await; + refresh_syncs().await; } }); } + +async fn refresh_syncs() { + let Ok(syncs) = + find_collect(&db_client().await.resource_syncs, None, None) + .await + .inspect_err(|e| { + warn!( + "failed to get resource syncs from db in refresh task | {e:#}" + ) + }) + else { + return; + }; + for sync in syncs { + State + .resolve( + RefreshResourceSyncPending { sync: sync.id }, + sync_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name) + }) + .ok(); + } +} diff --git a/bin/core/src/helpers/sync/remote.rs b/bin/core/src/helpers/sync/remote.rs index 093ddca22..f9b459321 100644 --- a/bin/core/src/helpers/sync/remote.rs +++ b/bin/core/src/helpers/sync/remote.rs @@ -2,11 +2,14 @@ use std::fs; use anyhow::{anyhow, Context}; use monitor_client::entities::{ - sync::ResourceSync, to_monitor_name, toml::ResourcesToml, - update::Log, CloneArgs, LatestCommit, + sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs, }; -use crate::{config::core_config, state::resource_sync_lock_cache}; +use crate::{ + config::core_config, + helpers::{git_token, random_string}, + state::resource_sync_lock_cache, +}; pub async fn get_remote_resources( sync: &ResourceSync, @@ -18,30 +21,28 @@ pub async fn get_remote_resources( // commit message String, )> { - let name = to_monitor_name(&sync.name); let mut clone_args: CloneArgs = sync.into(); let config = core_config(); - let access_token = match (&clone_args.account, &clone_args.provider) { + let access_token = match (&clone_args.account, &clone_args.provider) + { (None, _) => None, - (Some(_), None) => return Err(anyhow!("Account is configured, but provider is empty")), - (Some(username), Some(provider)) => config - .git_providers - .iter() - .find(|_provider| { - &_provider.domain == provider - }) - .and_then(|provider| { - clone_args.https = provider.https; - provider.accounts.iter().find(|account| &account.username == username).map(|account| &account.token) - }) - .with_context(|| format!("did not find git token for account {username} | provider: {provider}"))? - .to_owned() - .into(), + (Some(_), None) => { + return Err(anyhow!( + "Account is configured, but provider is empty" + )) + } + (Some(username), Some(provider)) => { + git_token(provider, username, |https| clone_args.https = https) + .await + .with_context( + || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + )? + } }; - fs::create_dir_all(&config.sync_directory) + fs::create_dir_all(&config.repo_directory) .context("failed to create sync directory")?; // lock simultaneous access to same directory @@ -50,18 +51,25 @@ pub async fn get_remote_resources( .await; let _lock = lock.lock().await; - let mut logs = - git::clone(clone_args, &config.sync_directory, access_token) - .await - .context("failed to clone resource repo")?; + let repo_path = config.repo_directory.join(random_string(10)); + // This overrides any other method of determining clone path. + clone_args.destination = Some(repo_path.display().to_string()); - let repo_dir = config.sync_directory.join(&name); - let LatestCommit { hash, message } = - git::get_commit_hash_info(&repo_dir) - .await - .context("failed to get commit hash info")?; + let (mut logs, hash, message, _) = git::clone( + clone_args, + &config.repo_directory, + access_token, + &[], + "", + None, + ) + .await + .context("failed to clone resource repo")?; + + let hash = hash.context("failed to get commit hash")?; + let message = + message.context("failed to get commit hash message")?; - let repo_path = config.sync_directory.join(&sync.name); let resource_path = repo_path.join(&sync.config.resource_path); let res = super::file::read_resources(&resource_path).map( @@ -71,8 +79,10 @@ pub async fn get_remote_resources( }, ); - if let Err(e) = std::fs::remove_dir_all(&repo_path) { - warn!("failed to remove sync repo directory | {e:?}") + if repo_path.exists() { + if let Err(e) = std::fs::remove_dir_all(&repo_path) { + warn!("failed to remove sync repo directory | {e:?}") + } } Ok((res, logs, hash, message)) diff --git a/bin/core/src/helpers/sync/resource.rs b/bin/core/src/helpers/sync/resource.rs index e1aa67a91..c872cef66 100644 --- a/bin/core/src/helpers/sync/resource.rs +++ b/bin/core/src/helpers/sync/resource.rs @@ -14,6 +14,7 @@ use monitor_client::{ repo::Repo, server::Server, server_template::ServerTemplate, + stack::Stack, sync::SyncUpdate, tag::Tag, toml::ResourceToml, @@ -513,6 +514,7 @@ pub async fn run_update_description( pub struct AllResourcesById { pub servers: HashMap, pub deployments: HashMap, + pub stacks: HashMap, pub builds: HashMap, pub repos: HashMap, pub procedures: HashMap, @@ -549,6 +551,10 @@ impl AllResourcesById { entities::sync::ResourceSync, >() .await?, + stacks: crate::resource::get_id_to_resource_map::< + entities::stack::Stack, + >() + .await?, }) } } diff --git a/bin/core/src/helpers/sync/resources.rs b/bin/core/src/helpers/sync/resources.rs index 6177edc93..30998d697 100644 --- a/bin/core/src/helpers/sync/resources.rs +++ b/bin/core/src/helpers/sync/resources.rs @@ -6,10 +6,12 @@ use monitor_client::{ alerter::Alerter, build::Build, builder::{Builder, BuilderConfig}, + deployment::{Deployment, DeploymentImage}, procedure::Procedure, repo::Repo, server::Server, server_template::ServerTemplate, + stack::Stack, update::{Log, ResourceTarget}, user::sync_user, }, @@ -42,6 +44,62 @@ impl ResourceSync for Server { } } +impl ResourceSync for Deployment { + fn resource_target(id: String) -> ResourceTarget { + ResourceTarget::Deployment(id) + } + + fn get_diff( + mut original: Self::Config, + update: Self::PartialConfig, + resources: &AllResourcesById, + ) -> anyhow::Result { + // need to replace the server id with name + original.server_id = resources + .servers + .get(&original.server_id) + .map(|s| s.name.clone()) + .unwrap_or_default(); + + // need to replace the build id with name + if let DeploymentImage::Build { build_id, version } = + &original.image + { + original.image = DeploymentImage::Build { + build_id: resources + .builds + .get(build_id) + .map(|b| b.name.clone()) + .unwrap_or_default(), + version: *version, + }; + } + + Ok(original.partial_diff(update)) + } +} + +impl ResourceSync for Stack { + fn resource_target(id: String) -> ResourceTarget { + ResourceTarget::Stack(id) + } + + fn get_diff( + mut original: Self::Config, + update: Self::PartialConfig, + resources: &AllResourcesById, + ) -> anyhow::Result { + // Need to replace server id with name + original.server_id = resources + .servers + .get(&original.server_id) + .map(|s| s.name.clone()) + .unwrap_or_default(); + + Ok(original.partial_diff(update)) + } +} + impl ResourceSync for Build { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Build(id) @@ -63,6 +121,9 @@ impl ResourceSync for Build { fn validate_diff(diff: &mut Self::ConfigDiff) { if let Some((_, to)) = &diff.version { + // When setting a build back to "latest" version, + // Don't actually set version to None. + // You can do this on the db, or set it to 0.0.1 if to.is_none() { diff.version = None; } @@ -184,6 +245,13 @@ impl ResourceSync for Procedure { .map(|b| b.name.clone()) .unwrap_or_default(); } + Execution::CancelBuild(config) => { + config.build = resources + .builds + .get(&config.build) + .map(|b| b.name.clone()) + .unwrap_or_default(); + } Execution::Deploy(config) => { config.deployment = resources .deployments @@ -198,6 +266,27 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } + Execution::RestartContainer(config) => { + config.deployment = resources + .deployments + .get(&config.deployment) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::PauseContainer(config) => { + config.deployment = resources + .deployments + .get(&config.deployment) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::UnpauseContainer(config) => { + config.deployment = resources + .deployments + .get(&config.deployment) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } Execution::StopContainer(config) => { config.deployment = resources .deployments @@ -226,6 +315,20 @@ impl ResourceSync for Procedure { .map(|d| d.name.clone()) .unwrap_or_default(); } + Execution::BuildRepo(config) => { + config.repo = resources + .repos + .get(&config.repo) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } + Execution::CancelRepoBuild(config) => { + config.repo = resources + .repos + .get(&config.repo) + .map(|d| d.name.clone()) + .unwrap_or_default(); + } Execution::StopAllContainers(config) => { config.server = resources .servers @@ -261,6 +364,55 @@ impl ResourceSync for Procedure { .map(|s| s.name.clone()) .unwrap_or_default(); } + Execution::DeployStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::StartStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::RestartStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::PauseStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::UnpauseStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::StopStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } + Execution::DestroyStack(config) => { + config.stack = resources + .stacks + .get(&config.stack) + .map(|s| s.name.clone()) + .unwrap_or_default(); + } Execution::Sleep(_) => {} } } @@ -445,6 +597,9 @@ impl ResourceSync for Procedure { } warn!("procedure sync loop exited after max iterations"); - todo!() + Some(Log::error( + "run procedure", + String::from("procedure sync loop exited after max iterations"), + )) } } diff --git a/bin/core/src/helpers/sync/user_groups.rs b/bin/core/src/helpers/sync/user_groups.rs index 679fcd086..552955686 100644 --- a/bin/core/src/helpers/sync/user_groups.rs +++ b/bin/core/src/helpers/sync/user_groups.rs @@ -206,6 +206,13 @@ pub async fn get_updates_for_view( .map(|b| b.name.clone()) .unwrap_or_default() } + ResourceTarget::Stack(id) => { + *id = all_resources + .stacks + .get(id) + .map(|b| b.name.clone()) + .unwrap_or_default() + } } PermissionToml { target: p.resource_target, @@ -248,7 +255,7 @@ pub async fn get_updates_for_view( let adding = if adding.is_empty() { String::from("None") } else { - colored(&adding.join(", "), Color::Green) + colored(adding.join(", "), Color::Green) }; let removing = original_users .iter() @@ -258,7 +265,7 @@ pub async fn get_updates_for_view( let removing = if removing.is_empty() { String::from("None") } else { - colored(&removing.join(", "), Color::Red) + colored(removing.join(", "), Color::Red) }; lines.push(format!( "{}: 'users'\n{}: {removing}\n{}: {adding}", @@ -303,7 +310,7 @@ pub async fn get_updates_for_view( let adding = if adding.is_empty() { String::from("None") } else { - colored(&adding.join(", "), Color::Green) + colored(adding.join(", "), Color::Green) }; let updating = user_group .permissions @@ -324,7 +331,7 @@ pub async fn get_updates_for_view( let updating = if updating.is_empty() { String::from("None") } else { - colored(&updating.join(", "), Color::Blue) + colored(updating.join(", "), Color::Blue) }; let removing = original_permissions .iter() @@ -340,7 +347,7 @@ pub async fn get_updates_for_view( let removing = if removing.is_empty() { String::from("None") } else { - colored(&removing.join(", "), Color::Red) + colored(removing.join(", "), Color::Red) }; lines.push(format!( "{}: 'permissions'\n{}: {removing}\n{}: {updating}\n{}: {adding}", @@ -524,6 +531,13 @@ pub async fn get_updates_for_execution( .map(|b| b.name.clone()) .unwrap_or_default() } + ResourceTarget::Stack(id) => { + *id = all_resources + .stacks + .get(id) + .map(|b| b.name.clone()) + .unwrap_or_default() + } } PermissionToml { target: p.resource_target, @@ -970,6 +984,17 @@ async fn expand_user_group_permissions( }); expanded.extend(permissions); } + ResourceTargetVariant::Stack => { + let permissions = all_resources + .stacks + .values() + .filter(|resource| regex.is_match(&resource.name)) + .map(|resource| PermissionToml { + target: ResourceTarget::Stack(resource.name.clone()), + level: permission.level, + }); + expanded.extend(permissions); + } ResourceTargetVariant::System => {} } } else { diff --git a/bin/core/src/helpers/update.rs b/bin/core/src/helpers/update.rs index 636504ae9..2539476f7 100644 --- a/bin/core/src/helpers/update.rs +++ b/bin/core/src/helpers/update.rs @@ -7,6 +7,7 @@ use monitor_client::entities::{ repo::Repo, server::Server, server_template::ServerTemplate, + stack::Stack, sync::ResourceSync, update::{ResourceTarget, Update, UpdateListItem}, user::User, @@ -61,8 +62,8 @@ pub async fn add_update( #[instrument(level = "debug")] pub async fn update_update(update: Update) -> anyhow::Result<()> { update_one_by_id(&db_client().await.updates, &update.id, mungos::update::Update::Set(to_document(&update)?), None) - .await - .context("failed to update the update on db. the update build process was deleted")?; + .await + .context("failed to update the update on db. the update build process was deleted")?; let update = update_list_item(update).await?; let _ = send_update(update).await; Ok(()) @@ -110,6 +111,12 @@ pub async fn init_execution_update( ) -> anyhow::Result { let (operation, target) = match &request { // Server + ExecuteRequest::StopAllContainers(data) => ( + Operation::StopAllContainers, + ResourceTarget::Server( + resource::get::(&data.server).await?.id, + ), + ), ExecuteRequest::PruneContainers(data) => ( Operation::PruneImages, ResourceTarget::Server( @@ -128,12 +135,6 @@ pub async fn init_execution_update( resource::get::(&data.server).await?.id, ), ), - ExecuteRequest::StopAllContainers(data) => ( - Operation::StopAllContainers, - ResourceTarget::Server( - resource::get::(&data.server).await?.id, - ), - ), // Deployment ExecuteRequest::Deploy(data) => ( @@ -148,6 +149,24 @@ pub async fn init_execution_update( resource::get::(&data.deployment).await?.id, ), ), + ExecuteRequest::RestartContainer(data) => ( + Operation::RestartContainer, + ResourceTarget::Deployment( + resource::get::(&data.deployment).await?.id, + ), + ), + ExecuteRequest::PauseContainer(data) => ( + Operation::PauseContainer, + ResourceTarget::Deployment( + resource::get::(&data.deployment).await?.id, + ), + ), + ExecuteRequest::UnpauseContainer(data) => ( + Operation::UnpauseContainer, + ResourceTarget::Deployment( + resource::get::(&data.deployment).await?.id, + ), + ), ExecuteRequest::StopContainer(data) => ( Operation::StopContainer, ResourceTarget::Deployment( @@ -188,6 +207,18 @@ pub async fn init_execution_update( resource::get::(&data.repo).await?.id, ), ), + ExecuteRequest::BuildRepo(data) => ( + Operation::BuildRepo, + ResourceTarget::Repo( + resource::get::(&data.repo).await?.id, + ), + ), + ExecuteRequest::CancelRepoBuild(data) => ( + Operation::CancelRepoBuild, + ResourceTarget::Repo( + resource::get::(&data.repo).await?.id, + ), + ), // Procedure ExecuteRequest::RunProcedure(data) => ( @@ -214,6 +245,70 @@ pub async fn init_execution_update( resource::get::(&data.sync).await?.id, ), ), + + // Stack + ExecuteRequest::DeployStack(data) => ( + Operation::DeployStack, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::StartStack(data) => ( + if data.service.is_some() { + Operation::StartStackService + } else { + Operation::StartStack + }, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::RestartStack(data) => ( + if data.service.is_some() { + Operation::RestartStackService + } else { + Operation::RestartStack + }, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::PauseStack(data) => ( + if data.service.is_some() { + Operation::PauseStackService + } else { + Operation::PauseStack + }, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::UnpauseStack(data) => ( + if data.service.is_some() { + Operation::UnpauseStackService + } else { + Operation::UnpauseStack + }, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::StopStack(data) => ( + if data.service.is_some() { + Operation::StopStackService + } else { + Operation::StopStack + }, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), + ExecuteRequest::DestroyStack(data) => ( + Operation::DestroyStack, + ResourceTarget::Stack( + resource::get::(&data.stack).await?.id, + ), + ), }; let mut update = make_update(target, operation, user); update.in_progress(); diff --git a/bin/core/src/listener/github/build.rs b/bin/core/src/listener/github/build.rs index 12e020353..b215a4764 100644 --- a/bin/core/src/listener/github/build.rs +++ b/bin/core/src/listener/github/build.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use axum::http::HeaderMap; use monitor_client::{ api::execute::RunBuild, - entities::{build::Build, user::github_user}, + entities::{build::Build, user::git_webhook_user}, }; use resolver_api::Resolve; @@ -40,7 +40,7 @@ pub async fn handle_build_webhook( if request_branch != build.config.branch { return Err(anyhow!("request branch does not match expected")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); let req = ExecuteRequest::RunBuild(RunBuild { build: build_id }); let update = init_execution_update(&req, &user).await?; let ExecuteRequest::RunBuild(req) = req else { diff --git a/bin/core/src/listener/github/mod.rs b/bin/core/src/listener/github/mod.rs index 12b119fe3..d028450c4 100644 --- a/bin/core/src/listener/github/mod.rs +++ b/bin/core/src/listener/github/mod.rs @@ -17,6 +17,7 @@ use crate::{ mod build; mod procedure; mod repo; +mod stack; mod sync; type HmacSha256 = Hmac; @@ -88,6 +89,42 @@ pub fn router() -> Router { }, ) ) + .route( + "/stack/:id/refresh", + post( + |Path(Id { id }), headers: HeaderMap, body: String| async move { + tokio::spawn(async move { + let span = info_span!("stack_clone_webhook", id); + async { + let res = stack::handle_stack_refresh_webhook(id.clone(), headers, body).await; + if let Err(e) = res { + warn!("failed to run stack clone webook for stack {id} | {e:#}"); + } + } + .instrument(span) + .await + }); + }, + ) + ) + .route( + "/stack/:id/deploy", + post( + |Path(Id { id }), headers: HeaderMap, body: String| async move { + tokio::spawn(async move { + let span = info_span!("stack_pull_webhook", id); + async { + let res = stack::handle_stack_deploy_webhook(id.clone(), headers, body).await; + if let Err(e) = res { + warn!("failed to run stack pull webook for stack {id} | {e:#}"); + } + } + .instrument(span) + .await + }); + }, + ) + ) .route( "/procedure/:id/:branch", post( diff --git a/bin/core/src/listener/github/procedure.rs b/bin/core/src/listener/github/procedure.rs index d851aa95a..705183716 100644 --- a/bin/core/src/listener/github/procedure.rs +++ b/bin/core/src/listener/github/procedure.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use axum::http::HeaderMap; use monitor_client::{ api::execute::RunProcedure, - entities::{procedure::Procedure, user::github_user}, + entities::{procedure::Procedure, user::git_webhook_user}, }; use resolver_api::Resolve; @@ -42,7 +42,7 @@ pub async fn handle_procedure_webhook( if !procedure.config.webhook_enabled { return Err(anyhow!("procedure does not have webhook enabled")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); let req = ExecuteRequest::RunProcedure(RunProcedure { procedure: procedure_id, }); diff --git a/bin/core/src/listener/github/repo.rs b/bin/core/src/listener/github/repo.rs index 6684356f3..df6676787 100644 --- a/bin/core/src/listener/github/repo.rs +++ b/bin/core/src/listener/github/repo.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use axum::http::HeaderMap; use monitor_client::{ api::execute::{CloneRepo, PullRepo}, - entities::{repo::Repo, user::github_user}, + entities::{repo::Repo, user::git_webhook_user}, }; use resolver_api::Resolve; @@ -39,7 +39,7 @@ pub async fn handle_repo_clone_webhook( if request_branch != repo.config.branch { return Err(anyhow!("request branch does not match expected")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); let req = crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo { repo: repo_id, @@ -73,7 +73,7 @@ pub async fn handle_repo_pull_webhook( if request_branch != repo.config.branch { return Err(anyhow!("request branch does not match expected")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo { repo: repo_id, }); diff --git a/bin/core/src/listener/github/stack.rs b/bin/core/src/listener/github/stack.rs new file mode 100644 index 000000000..59c818570 --- /dev/null +++ b/bin/core/src/listener/github/stack.rs @@ -0,0 +1,81 @@ +use std::sync::OnceLock; + +use anyhow::anyhow; +use axum::http::HeaderMap; +use monitor_client::{ + api::{execute::DeployStack, write::RefreshStackCache}, + entities::{stack::Stack, user::git_webhook_user}, +}; +use resolver_api::Resolve; + +use crate::{ + api::execute::ExecuteRequest, + helpers::update::init_execution_update, resource, state::State, +}; + +use super::{extract_branch, verify_gh_signature, ListenerLockCache}; + +fn stack_locks() -> &'static ListenerLockCache { + static STACK_LOCKS: OnceLock = OnceLock::new(); + STACK_LOCKS.get_or_init(Default::default) +} + +pub async fn handle_stack_refresh_webhook( + stack_id: String, + headers: HeaderMap, + body: String, +) -> anyhow::Result<()> { + // Acquire and hold lock to make a task queue for + // subsequent listener calls on same resource. + // It would fail if we let it go through, from "action state busy". + let lock = stack_locks().get_or_insert_default(&stack_id).await; + let _lock = lock.lock().await; + + verify_gh_signature(headers, &body).await?; + let request_branch = extract_branch(&body)?; + let stack = resource::get::(&stack_id).await?; + if !stack.config.webhook_enabled { + return Err(anyhow!("stack does not have webhook enabled")); + } + if request_branch != stack.config.branch { + return Err(anyhow!("request branch does not match expected")); + } + let user = git_webhook_user().to_owned(); + State + .resolve(RefreshStackCache { stack: stack.id }, user) + .await?; + Ok(()) +} + +pub async fn handle_stack_deploy_webhook( + stack_id: String, + headers: HeaderMap, + body: String, +) -> anyhow::Result<()> { + // Acquire and hold lock to make a task queue for + // subsequent listener calls on same resource. + // It would fail if we let it go through from action state busy. + let lock = stack_locks().get_or_insert_default(&stack_id).await; + let _lock = lock.lock().await; + + verify_gh_signature(headers, &body).await?; + let request_branch = extract_branch(&body)?; + let stack = resource::get::(&stack_id).await?; + if !stack.config.webhook_enabled { + return Err(anyhow!("stack does not have webhook enabled")); + } + if request_branch != stack.config.branch { + return Err(anyhow!("request branch does not match expected")); + } + let user = git_webhook_user().to_owned(); + let req = ExecuteRequest::DeployStack(DeployStack { + stack: stack_id, + stop_time: None, + }); + let update = init_execution_update(&req, &user).await?; + let ExecuteRequest::DeployStack(req) = req else { + unreachable!() + }; + State.resolve(req, (user, update)).await?; + Ok(()) +} diff --git a/bin/core/src/listener/github/sync.rs b/bin/core/src/listener/github/sync.rs index d7bc39628..d6693b533 100644 --- a/bin/core/src/listener/github/sync.rs +++ b/bin/core/src/listener/github/sync.rs @@ -4,7 +4,7 @@ use anyhow::anyhow; use axum::http::HeaderMap; use monitor_client::{ api::{execute::RunSync, write::RefreshResourceSyncPending}, - entities::{sync::ResourceSync, user::github_user}, + entities::{sync::ResourceSync, user::git_webhook_user}, }; use resolver_api::Resolve; @@ -40,7 +40,7 @@ pub async fn handle_sync_refresh_webhook( if request_branch != sync.config.branch { return Err(anyhow!("request branch does not match expected")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); State .resolve(RefreshResourceSyncPending { sync: sync_id }, user) .await?; @@ -67,7 +67,7 @@ pub async fn handle_sync_execute_webhook( if request_branch != sync.config.branch { return Err(anyhow!("request branch does not match expected")); } - let user = github_user().to_owned(); + let user = git_webhook_user().to_owned(); let req = ExecuteRequest::RunSync(RunSync { sync: sync_id }); let update = init_execution_update(&req, &user).await?; let ExecuteRequest::RunSync(req) = req else { diff --git a/bin/core/src/main.rs b/bin/core/src/main.rs index e8a794ade..a5edf5d6f 100644 --- a/bin/core/src/main.rs +++ b/bin/core/src/main.rs @@ -5,6 +5,8 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::Context; use axum::Router; +use helpers::startup_in_progress_update_cleanup; +use state::jwt_client; use tower_http::{ cors::{Any, CorsLayer}, services::{ServeDir, ServeFile}, @@ -25,16 +27,24 @@ mod state; mod ws; async fn app() -> anyhow::Result<()> { - dotenv::dotenv().ok(); + dotenvy::dotenv().ok(); let config = core_config(); logger::init(&config.logging)?; info!("monitor core version: v{}", env!("CARGO_PKG_VERSION")); info!("config: {:?}", config.sanitized()); - // Spawn monitoring loops - monitor::spawn_monitor_loop()?; + // includes init db_client check to crash on db init failure + startup_in_progress_update_cleanup().await; + // init jwt client to crash on failure + jwt_client(); + + // Spawn tasks + monitor::spawn_monitor_loop(); helpers::prune::spawn_prune_loop(); + helpers::stack::spawn_stack_refresh_loop(); helpers::sync::spawn_sync_refresh_loop(); + helpers::build::spawn_build_refresh_loop(); + helpers::repo::spawn_repo_refresh_loop(); resource::spawn_build_state_refresh_loop(); resource::spawn_repo_state_refresh_loop(); resource::spawn_procedure_state_refresh_loop(); diff --git a/bin/core/src/monitor/alert/deployment.rs b/bin/core/src/monitor/alert/deployment.rs index a91d82587..6c19eb348 100644 --- a/bin/core/src/monitor/alert/deployment.rs +++ b/bin/core/src/monitor/alert/deployment.rs @@ -15,7 +15,7 @@ use crate::{ #[instrument(level = "debug")] pub async fn alert_deployments( ts: i64, - server_names: HashMap, + server_names: &HashMap, ) { let mut alerts = Vec::::new(); for status in deployment_status_cache().get_list().await { diff --git a/bin/core/src/monitor/alert/mod.rs b/bin/core/src/monitor/alert/mod.rs index ddb1a290e..9f00d0bc9 100644 --- a/bin/core/src/monitor/alert/mod.rs +++ b/bin/core/src/monitor/alert/mod.rs @@ -1,6 +1,3 @@ -mod deployment; -mod server; - use std::collections::HashMap; use anyhow::Context; @@ -12,21 +9,25 @@ use monitor_client::entities::{ use crate::resource; +mod deployment; +mod server; +mod stack; + // called after cache update #[instrument(level = "debug")] pub async fn check_alerts(ts: i64) { - let servers = get_all_servers_map().await; - - if let Err(e) = servers { - error!("{e:#?}"); - return; - } - - let (servers, server_names) = servers.unwrap(); + let (servers, server_names) = match get_all_servers_map().await { + Ok(res) => res, + Err(e) => { + error!("{e:#?}"); + return; + } + }; tokio::join!( server::alert_servers(ts, servers), - deployment::alert_deployments(ts, server_names) + deployment::alert_deployments(ts, &server_names), + stack::alert_stacks(ts, &server_names) ); } diff --git a/bin/core/src/monitor/alert/stack.rs b/bin/core/src/monitor/alert/stack.rs new file mode 100644 index 000000000..78086e288 --- /dev/null +++ b/bin/core/src/monitor/alert/stack.rs @@ -0,0 +1,82 @@ +use std::collections::HashMap; + +use monitor_client::entities::{ + alert::{Alert, AlertData}, + server::stats::SeverityLevel, + stack::{Stack, StackState}, + update::ResourceTarget, +}; + +use crate::{ + helpers::alert::send_alerts, + resource, + state::{db_client, stack_status_cache}, +}; + +#[instrument(level = "debug")] +pub async fn alert_stacks( + ts: i64, + server_names: &HashMap, +) { + let mut alerts = Vec::::new(); + for status in stack_status_cache().get_list().await { + // Don't alert if prev None + let Some(prev) = status.prev else { + continue; + }; + + // Don't alert if either prev or curr is Unknown. + // This will happen if server is unreachable, so this would be redundant. + if status.curr.state == StackState::Unknown + || prev == StackState::Unknown + { + continue; + } + + if status.curr.state != prev { + // send alert + let Ok(stack) = + resource::get::(&status.curr.id) + .await + .inspect_err(|e| { + error!("failed to get stack from db | {e:#?}") + }) + else { + continue; + }; + if !stack.config.send_alerts { + continue; + } + let target: ResourceTarget = (&stack).into(); + let data = AlertData::StackStateChange { + id: status.curr.id.clone(), + name: stack.name, + server_name: server_names + .get(&stack.config.server_id) + .cloned() + .unwrap_or(String::from("unknown")), + server_id: stack.config.server_id, + from: prev, + to: status.curr.state, + }; + let alert = Alert { + id: Default::default(), + level: SeverityLevel::Warning, + resolved: true, + resolved_ts: ts.into(), + target, + data, + ts, + }; + alerts.push(alert); + } + } + if alerts.is_empty() { + return; + } + send_alerts(&alerts).await; + let res = db_client().await.alerts.insert_many(alerts).await; + if let Err(e) = res { + error!("failed to record stack status alerts to db | {e:#}"); + } +} diff --git a/bin/core/src/monitor/helpers.rs b/bin/core/src/monitor/helpers.rs index d4733275e..16ac10037 100644 --- a/bin/core/src/monitor/helpers.rs +++ b/bin/core/src/monitor/helpers.rs @@ -1,22 +1,26 @@ use monitor_client::entities::{ - deployment::{Deployment, DeploymentState}, + deployment::{ContainerSummary, Deployment, DeploymentState}, repo::Repo, server::{ + docker_image::ImageSummary, + docker_network::DockerNetwork, stats::{ ServerHealth, SeverityLevel, SingleDiskUsage, SystemStats, }, Server, ServerConfig, ServerState, }, + stack::{ComposeProject, Stack, StackState}, }; use serror::Serror; use crate::state::{ deployment_status_cache, repo_status_cache, server_status_cache, + stack_status_cache, }; use super::{ CachedDeploymentStatus, CachedRepoStatus, CachedServerStatus, - History, + CachedStackStatus, History, }; #[instrument(level = "debug", skip_all)] @@ -61,12 +65,43 @@ pub async fn insert_repos_status_unknown(repos: Vec) { } } +#[instrument(level = "debug", skip_all)] +pub async fn insert_stacks_status_unknown(stacks: Vec) { + let status_cache = stack_status_cache(); + for stack in stacks { + let prev = + status_cache.get(&stack.id).await.map(|s| s.curr.state); + status_cache + .insert( + stack.id.clone(), + History { + curr: CachedStackStatus { + id: stack.id, + state: StackState::Unknown, + services: Vec::new(), + }, + prev, + } + .into(), + ) + .await; + } +} + +type DockerLists = ( + Option>, + Option>, + Option>, + Option>, +); + #[instrument(level = "debug", skip_all)] pub async fn insert_server_status( server: &Server, state: ServerState, version: String, stats: Option, + (containers, networks, images, projects): DockerLists, err: impl Into>, ) { let health = stats.as_ref().map(|s| get_server_health(server, s)); @@ -79,6 +114,10 @@ pub async fn insert_server_status( version, stats, health, + containers, + networks, + images, + projects, err: err.into(), } .into(), diff --git a/bin/core/src/monitor/lists.rs b/bin/core/src/monitor/lists.rs new file mode 100644 index 000000000..7efc3aac3 --- /dev/null +++ b/bin/core/src/monitor/lists.rs @@ -0,0 +1,52 @@ +use anyhow::Context; +use monitor_client::entities::{ + deployment::ContainerSummary, + server::{ + docker_image::ImageSummary, docker_network::DockerNetwork, + }, + stack::ComposeProject, +}; +use periphery_client::{ + api::{ + container::GetContainerList, GetDockerLists, + GetDockerListsResponse, + }, + PeripheryClient, +}; + +pub async fn get_docker_lists( + periphery: &PeripheryClient, +) -> anyhow::Result<( + Vec, + Vec, + Vec, + Vec, +)> { + if let Ok(GetDockerListsResponse { + containers, + networks, + images, + projects, + }) = periphery.request(GetDockerLists {}).await + { + // TODO: handle the errors + let (mut containers, mut networks, images, mut projects) = ( + containers.unwrap_or_default(), + networks.unwrap_or_default(), + images.unwrap_or_default(), + projects.unwrap_or_default(), + ); + containers.sort_by(|a, b| a.name.cmp(&b.name)); + networks.sort_by(|a, b| a.name.cmp(&b.name)); + projects.sort_by(|a, b| a.name.cmp(&b.name)); + return Ok((containers, networks, images, projects)); + } + // Fallback to ListContainers for backward compat w/ v1.12 + let mut containers = + periphery + .request(GetContainerList {}) + .await + .context("failed to get docker container list")?; + containers.sort_by(|a, b| a.name.cmp(&b.name)); + Ok((containers, Vec::new(), Vec::new(), Vec::new())) +} diff --git a/bin/core/src/monitor/mod.rs b/bin/core/src/monitor/mod.rs index e3b23d1e8..6fcc9bb3c 100644 --- a/bin/core/src/monitor/mod.rs +++ b/bin/core/src/monitor/mod.rs @@ -1,11 +1,16 @@ use async_timing_util::wait_until_timelength; use futures::future::join_all; +use helpers::insert_stacks_status_unknown; use monitor_client::entities::{ deployment::{ContainerSummary, DeploymentState}, + monitor_timestamp, server::{ + docker_image::ImageSummary, + docker_network::DockerNetwork, stats::{ServerHealth, SystemStats}, Server, ServerState, }, + stack::{ComposeProject, Stack, StackService, StackState}, }; use mungos::{find::find_collect, mongodb::bson::doc}; use periphery_client::api::{self, git::GetLatestCommit}; @@ -15,6 +20,7 @@ use crate::{ config::core_config, helpers::periphery_client, monitor::{alert::check_alerts, record::record_server_stats}, + resource, state::{db_client, deployment_status_cache, repo_status_cache}, }; @@ -25,7 +31,9 @@ use self::helpers::{ mod alert; mod helpers; +mod lists; mod record; +mod resources; #[derive(Default, Debug)] pub struct History { @@ -40,11 +48,17 @@ pub struct CachedServerStatus { pub version: String, pub stats: Option, pub health: Option, + pub containers: Option>, + pub networks: Option>, + pub images: Option>, + pub projects: Option>, + /// Store the error in reaching periphery pub err: Option, } #[derive(Default, Clone, Debug)] pub struct CachedDeploymentStatus { + /// The deployment id pub id: String, pub state: DeploymentState, pub container: Option, @@ -56,74 +70,91 @@ pub struct CachedRepoStatus { pub latest_message: Option, } -pub fn spawn_monitor_loop() -> anyhow::Result<()> { - let interval: async_timing_util::Timelength = - core_config().monitoring_interval.try_into()?; +#[derive(Default, Clone, Debug)] +pub struct CachedStackStatus { + /// The stack id + pub id: String, + /// The stack state + pub state: StackState, + /// The services connected to the stack + pub services: Vec, +} + +const ADDITIONAL_MS: u128 = 500; + +pub fn spawn_monitor_loop() { + let interval: async_timing_util::Timelength = core_config() + .monitoring_interval + .try_into() + .expect("Invalid monitoring interval"); tokio::spawn(async move { + refresh_server_cache(monitor_timestamp()).await; loop { - let ts = - (wait_until_timelength(interval, 500).await - 500) as i64; - let servers = - match find_collect(&db_client().await.servers, None, None) - .await - { - Ok(servers) => servers, - Err(e) => { - error!( - "failed to get server list (manage status cache) | {e:#}" - ); - continue; - } - }; - let futures = servers.into_iter().map(|server| async move { - update_cache_for_server(&server).await; - }); - join_all(futures).await; - tokio::join!(check_alerts(ts), record_server_stats(ts)); + let ts = (wait_until_timelength(interval, ADDITIONAL_MS).await + - ADDITIONAL_MS) as i64; + refresh_server_cache(ts).await; } }); - Ok(()) +} + +async fn refresh_server_cache(ts: i64) { + let servers = match find_collect( + &db_client().await.servers, + None, + None, + ) + .await + { + Ok(servers) => servers, + Err(e) => { + error!( + "failed to get server list (manage status cache) | {e:#}" + ); + return; + } + }; + let futures = servers.into_iter().map(|server| async move { + update_cache_for_server(&server).await; + }); + join_all(futures).await; + tokio::join!(check_alerts(ts), record_server_stats(ts)); } #[instrument(level = "debug")] pub async fn update_cache_for_server(server: &Server) { - let deployments = match find_collect( - &db_client().await.deployments, - doc! { "config.server_id": &server.id }, - None, - ) - .await - { - Ok(deployments) => deployments, - Err(e) => { - error!("failed to get deployments list from mongo (update status cache) | server id: {} | {e:#}", server.id); - Vec::new() - } - }; + let (deployments, repos, stacks) = tokio::join!( + find_collect( + &db_client().await.deployments, + doc! { "config.server_id": &server.id }, + None, + ), + find_collect( + &db_client().await.repos, + doc! { "config.server_id": &server.id }, + None, + ), + find_collect( + &db_client().await.stacks, + doc! { "config.server_id": &server.id }, + None, + ) + ); - let repos = match find_collect( - &db_client().await.repos, - doc! { "config.server_id": &server.id }, - None, - ) - .await - { - Ok(repos) => repos, - Err(e) => { - error!("failed to get repos list from mongo (update status cache) | server id: {} | {e:#}", server.id); - Vec::new() - } - }; + let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default(); + let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default(); + let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default(); // Handle server disabled if !server.config.enabled { insert_deployments_status_unknown(deployments).await; insert_repos_status_unknown(repos).await; + insert_stacks_status_unknown(stacks).await; insert_server_status( server, ServerState::Disabled, String::from("unknown"), None, + (None, None, None, None), None, ) .await; @@ -142,11 +173,13 @@ pub async fn update_cache_for_server(server: &Server) { Err(e) => { insert_deployments_status_unknown(deployments).await; insert_repos_status_unknown(repos).await; + insert_stacks_status_unknown(stacks).await; insert_server_status( server, ServerState::NotOk, String::from("unknown"), None, + (None, None, None, None), Serror::from(&e), ) .await; @@ -160,11 +193,13 @@ pub async fn update_cache_for_server(server: &Server) { Err(e) => { insert_deployments_status_unknown(deployments).await; insert_repos_status_unknown(repos).await; + insert_stacks_status_unknown(stacks).await; insert_server_status( server, ServerState::NotOk, String::from("unknown"), None, + (None, None, None, None), Serror::from(&e), ) .await; @@ -175,46 +210,44 @@ pub async fn update_cache_for_server(server: &Server) { None }; - insert_server_status(server, ServerState::Ok, version, stats, None) - .await; - - match periphery.request(api::container::GetContainerList {}).await { - Ok(containers) => { - let status_cache = deployment_status_cache(); - for deployment in deployments { - let container = containers - .iter() - .find(|c| c.name == deployment.name) - .cloned(); - let prev = status_cache - .get(&deployment.id) - .await - .map(|s| s.curr.state); - let state = container - .as_ref() - .map(|c| c.state) - .unwrap_or(DeploymentState::NotDeployed); - status_cache - .insert( - deployment.id.clone(), - History { - curr: CachedDeploymentStatus { - id: deployment.id, - state, - container, - }, - prev, - } - .into(), - ) - .await; - } + match lists::get_docker_lists(&periphery).await { + Ok((containers, networks, images, projects)) => { + tokio::join!( + resources::update_deployment_cache(deployments, &containers), + resources::update_stack_cache(stacks, &containers), + ); + insert_server_status( + server, + ServerState::Ok, + version, + stats, + ( + Some(containers.clone()), + Some(networks), + Some(images), + Some(projects), + ), + None, + ) + .await; } Err(e) => { - warn!("could not get containers list | {e:#}"); + warn!( + "could not get docker lists | (update status cache) | {e:#}" + ); insert_deployments_status_unknown(deployments).await; + insert_stacks_status_unknown(stacks).await; + insert_server_status( + server, + ServerState::Ok, + version, + stats, + (None, None, None, None), + Some(e.into()), + ) + .await; } - }; + } let status_cache = repo_status_cache(); for repo in repos { @@ -238,3 +271,19 @@ pub async fn update_cache_for_server(server: &Server) { .await; } } + +#[instrument(level = "debug")] +pub async fn update_cache_for_stack(stack: &Stack) { + if stack.config.server_id.is_empty() { + return; + } + let Ok(server) = resource::get::(&stack.config.server_id) + .await + .inspect_err(|e| { + warn!("Failed to get server for stack {} | {e:#}", stack.name) + }) + else { + return; + }; + update_cache_for_server(&server).await; +} diff --git a/bin/core/src/monitor/resources.rs b/bin/core/src/monitor/resources.rs new file mode 100644 index 000000000..b8b78a535 --- /dev/null +++ b/bin/core/src/monitor/resources.rs @@ -0,0 +1,102 @@ +use anyhow::Context; +use monitor_client::entities::{ + deployment::{ContainerSummary, Deployment, DeploymentState}, + stack::{Stack, StackService, StackServiceNames}, +}; + +use crate::{ + helpers::{ + query::get_stack_state_from_containers, + stack::{ + compose_container_match_regex, + services::extract_services_from_stack, + }, + }, + state::{deployment_status_cache, stack_status_cache}, +}; + +use super::{CachedDeploymentStatus, CachedStackStatus, History}; + +pub async fn update_deployment_cache( + deployments: Vec, + containers: &[ContainerSummary], +) { + let deployment_status_cache = deployment_status_cache(); + for deployment in deployments { + let container = containers + .iter() + .find(|container| container.name == deployment.name) + .cloned(); + let prev = deployment_status_cache + .get(&deployment.id) + .await + .map(|s| s.curr.state); + let state = container + .as_ref() + .map(|c| c.state) + .unwrap_or(DeploymentState::NotDeployed); + deployment_status_cache + .insert( + deployment.id.clone(), + History { + curr: CachedDeploymentStatus { + id: deployment.id, + state, + container, + }, + prev, + } + .into(), + ) + .await; + } +} + +pub async fn update_stack_cache( + stacks: Vec, + containers: &[ContainerSummary], +) { + let stack_status_cache = stack_status_cache(); + for stack in stacks { + let services = match extract_services_from_stack(&stack, false) + .await + { + Ok(services) => services, + Err(e) => { + warn!("failed to extract services for stack {}. cannot match services to containers. (update status cache) | {e:?}", stack.name); + continue; + } + }; + let mut services_with_containers = services.iter().map(|StackServiceNames { service_name, container_name }| { + let container = containers.iter().find(|container| { + match compose_container_match_regex(container_name) + .with_context(|| format!("failed to construct container name matching regex for service {service_name}")) + { + Ok(regex) => regex, + Err(e) => { + warn!("{e:#}"); + return false + } + }.is_match(&container.name) + }).cloned(); + StackService { + service: service_name.clone(), + container, + } + }).collect::>(); + services_with_containers + .sort_by(|a, b| a.service.cmp(&b.service)); + let prev = stack_status_cache + .get(&stack.id) + .await + .map(|s| s.curr.state); + let status = CachedStackStatus { + id: stack.id.clone(), + state: get_stack_state_from_containers(&services, containers), + services: services_with_containers, + }; + stack_status_cache + .insert(stack.id, History { curr: status, prev }.into()) + .await; + } +} diff --git a/bin/core/src/resource/build.rs b/bin/core/src/resource/build.rs index c7d3916ba..66e58531b 100644 --- a/bin/core/src/resource/build.rs +++ b/bin/core/src/resource/build.rs @@ -56,6 +56,8 @@ impl super::MonitorResource for Build { git_provider: build.config.git_provider, repo: build.config.repo, branch: build.config.branch, + built_hash: build.info.built_hash, + latest_hash: build.info.latest_hash, state, }, } diff --git a/bin/core/src/resource/deployment.rs b/bin/core/src/resource/deployment.rs index 00c654678..dbc458ab8 100644 --- a/bin/core/src/resource/deployment.rs +++ b/bin/core/src/resource/deployment.rs @@ -173,70 +173,71 @@ impl super::MonitorResource for Deployment { let state = get_deployment_state(deployment) .await .context("failed to get container state")?; - if !matches!( + if matches!( state, DeploymentState::NotDeployed | DeploymentState::Unknown ) { - // container needs to be destroyed - let server = match super::get::( - &deployment.config.server_id, - ) - .await - { - Ok(server) => server, - Err(e) => { - update.push_error_log( - "remove container", - format_serror( - &e.context(format!( - "failed to retrieve server at {} from db.", - deployment.config.server_id - )) - .into(), - ), - ); - return Ok(()); - } - }; - if !server.config.enabled { - // Don't need to - update.push_simple_log( + return Ok(()); + } + // container needs to be destroyed + let server = match super::get::( + &deployment.config.server_id, + ) + .await + { + Ok(server) => server, + Err(e) => { + update.push_error_log( "remove container", - "skipping container removal, server is disabled.", + format_serror( + &e.context(format!( + "failed to retrieve server at {} from db.", + deployment.config.server_id + )) + .into(), + ), ); return Ok(()); } - let periphery = match periphery_client(&server) { - Ok(periphery) => periphery, - Err(e) => { - // This case won't ever happen, as periphery_client only fallible if the server is disabled. - // Leaving it for completeness sake - update.push_error_log( - "remove container", - format_serror( - &e.context("failed to get periphery client").into(), - ), - ); - return Ok(()); - } - }; - match periphery - .request(RemoveContainer { - name: deployment.name.clone(), - signal: deployment.config.termination_signal.into(), - time: deployment.config.termination_timeout.into(), - }) - .await - { - Ok(log) => update.logs.push(log), - Err(e) => update.push_error_log( + }; + if !server.config.enabled { + // Don't need to + update.push_simple_log( + "remove container", + "skipping container removal, server is disabled.", + ); + return Ok(()); + } + let periphery = match periphery_client(&server) { + Ok(periphery) => periphery, + Err(e) => { + // This case won't ever happen, as periphery_client only fallible if the server is disabled. + // Leaving it for completeness sake + update.push_error_log( "remove container", format_serror( - &e.context("failed to remove container").into(), + &e.context("failed to get periphery client").into(), ), + ); + return Ok(()); + } + }; + match periphery + .request(RemoveContainer { + name: deployment.name.clone(), + signal: deployment.config.termination_signal.into(), + time: deployment.config.termination_timeout.into(), + }) + .await + { + Ok(log) => update.logs.push(log), + Err(e) => update.push_error_log( + "remove container", + format_serror( + &e.context("failed to remove container").into(), ), - }; - } + ), + }; Ok(()) } diff --git a/bin/core/src/resource/mod.rs b/bin/core/src/resource/mod.rs index 8d295d2a0..736dd0bb7 100644 --- a/bin/core/src/resource/mod.rs +++ b/bin/core/src/resource/mod.rs @@ -1,8 +1,11 @@ -use std::{collections::HashMap, str::FromStr}; +use std::{ + collections::{HashMap, HashSet}, + str::FromStr, +}; use anyhow::{anyhow, Context}; use formatting::format_serror; -use futures::future::join_all; +use futures::{future::join_all, FutureExt}; use monitor_client::{ api::write::CreateTag, entities::{ @@ -33,8 +36,8 @@ use crate::{ helpers::{ create_permission, flatten_document, query::{ - get_resource_ids_for_user, get_tag, - get_user_permission_on_resource, id_or_name_filter, + get_tag, get_user_user_groups, id_or_name_filter, + user_target_query, }, update::{add_update, make_update}, }, @@ -49,6 +52,7 @@ mod procedure; mod repo; mod server; mod server_template; +mod stack; mod sync; pub use build::{ @@ -205,17 +209,16 @@ pub async fn get_check_permissions( ) -> anyhow::Result> { let resource = get::(id_or_name).await?; if user.admin + // Allow if its just read or below, and transparent mode enabled || (permission_level <= PermissionLevel::Read && core_config().transparent_mode) + // Allow if resource has base permission level greater than or equal to required permission level + || resource.base_permission >= permission_level { return Ok(resource); } - let permissions = get_user_permission_on_resource( - user, - T::resource_type(), - &resource.id, - ) - .await?; + let permissions = + get_user_permission_on_resource::(user, &resource.id).await?; if permissions >= permission_level { Ok(resource) } else { @@ -230,6 +233,147 @@ pub async fn get_check_permissions( // LIST // ====== +/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access). +#[instrument(level = "debug")] +pub async fn get_resource_ids_for_user( + user: &User, +) -> anyhow::Result>> { + // Check admin or transparent mode + if user.admin || core_config().transparent_mode { + return Ok(None); + } + + let resource_type = T::resource_type(); + + // Check user 'all' on variant + if let Some(level) = user.all.get(&resource_type).cloned() { + if level > PermissionLevel::None { + return Ok(None); + } + } + + // Check user groups 'all' on variant + let groups = get_user_user_groups(&user.id).await?; + for group in &groups { + if let Some(level) = group.all.get(&resource_type).cloned() { + if level > PermissionLevel::None { + return Ok(None); + } + } + } + + let (base, perms) = tokio::try_join!( + // Get any resources with non-none base permission, + find_collect( + T::coll().await, + doc! { "base_permission": { "$ne": "None" } }, + None, + ) + .map(|res| res.with_context(|| format!( + "failed to query {resource_type} on db" + ))), + // And any ids using the permissions table + find_collect( + &db_client().await.permissions, + doc! { + "$or": user_target_query(&user.id, &groups)?, + "resource_target.type": resource_type.as_ref(), + "level": { "$in": ["Read", "Execute", "Write"] } + }, + None, + ) + .map(|res| res.context("failed to query permissions on db")) + )?; + + // Add specific ids + let ids = perms + .into_iter() + .map(|p| p.resource_target.extract_variant_id().1.to_string()) + // Chain in the ones with non-None base permissions + .chain(base.into_iter().map(|res| res.id)) + // collect into hashset first to remove any duplicates + .collect::>() + .into_iter() + .flat_map(|id| ObjectId::from_str(&id)) + .collect::>(); + + Ok(Some(ids.into_iter().collect())) +} + +#[instrument(level = "debug")] +pub async fn get_user_permission_on_resource( + user: &User, + resource_id: &str, +) -> anyhow::Result { + if user.admin { + return Ok(PermissionLevel::Write); + } + + let resource_type = T::resource_type(); + + // Start with base of Read or None + let mut base = if core_config().transparent_mode { + PermissionLevel::Read + } else { + PermissionLevel::None + }; + + // Add in the resource level global base permission + let resource_base = get::(resource_id).await?.base_permission; + if resource_base > base { + base = resource_base; + } + + // Overlay users base on resource variant + if let Some(level) = user.all.get(&resource_type).cloned() { + if level > base { + base = level; + } + } + if base == PermissionLevel::Write { + // No reason to keep going if already Write at this point. + return Ok(PermissionLevel::Write); + } + + // Overlay any user groups base on resource variant + let groups = get_user_user_groups(&user.id).await?; + for group in &groups { + if let Some(level) = group.all.get(&resource_type).cloned() { + if level > base { + base = level; + } + } + } + if base == PermissionLevel::Write { + // No reason to keep going if already Write at this point. + return Ok(PermissionLevel::Write); + } + + // Overlay any specific permissions + let permission = find_collect( + &db_client().await.permissions, + doc! { + "$or": user_target_query(&user.id, &groups)?, + "resource_target.type": resource_type.as_ref(), + "resource_target.id": resource_id + }, + None, + ) + .await + .context("failed to query db for permissions")? + .into_iter() + // get the max permission user has between personal / any user groups + .fold(base, |level, permission| { + if permission.level > level { + permission.level + } else { + level + } + }); + Ok(permission) +} + +#[instrument(level = "debug")] pub async fn list_for_user( mut query: ResourceQuery, user: &User, @@ -251,6 +395,7 @@ pub async fn list_for_user_using_document( Ok(join_all(list).await) } +#[instrument(level = "debug")] pub async fn list_full_for_user( mut query: ResourceQuery, user: &User, @@ -261,13 +406,12 @@ pub async fn list_full_for_user( list_full_for_user_using_document::(filters, user).await } +#[instrument(level = "debug")] async fn list_full_for_user_using_document( mut filters: Document, user: &User, ) -> anyhow::Result>> { - if let Some(ids) = - get_resource_ids_for_user(user, T::resource_type()).await? - { + if let Some(ids) = get_resource_ids_for_user::(user).await? { filters.insert("_id", doc! { "$in": ids }); } find_collect( @@ -281,8 +425,17 @@ async fn list_full_for_user_using_document( }) } +pub type IdResourceMap = HashMap< + String, + Resource< + ::Config, + ::Info, + >, +>; + +#[instrument(level = "debug")] pub async fn get_id_to_resource_map( -) -> anyhow::Result>> { +) -> anyhow::Result> { let res = find_collect(T::coll().await, None, None) .await .with_context(|| { @@ -328,6 +481,7 @@ pub async fn create( tags: Default::default(), config: config.into(), info: T::default_info().await?, + base_permission: PermissionLevel::None, }; let resource_id = T::coll() @@ -466,6 +620,7 @@ fn resource_target(id: String) -> ResourceTarget { ResourceTargetVariant::ResourceSync => { ResourceTarget::ResourceSync(id) } + ResourceTargetVariant::Stack => ResourceTarget::Stack(id), } } diff --git a/bin/core/src/resource/procedure.rs b/bin/core/src/resource/procedure.rs index cbc9e3d39..88cbd7d01 100644 --- a/bin/core/src/resource/procedure.rs +++ b/bin/core/src/resource/procedure.rs @@ -15,6 +15,7 @@ use monitor_client::{ repo::Repo, resource::Resource, server::Server, + stack::Stack, sync::ResourceSync, update::{ResourceTargetVariant, Update}, user::User, @@ -177,6 +178,15 @@ async fn validate_config( .await?; params.build = build.id; } + Execution::CancelBuild(params) => { + let build = super::get_check_permissions::( + ¶ms.build, + user, + PermissionLevel::Execute, + ) + .await?; + params.build = build.id; + } Execution::Deploy(params) => { let deployment = super::get_check_permissions::( @@ -197,6 +207,36 @@ async fn validate_config( .await?; params.deployment = deployment.id; } + Execution::RestartContainer(params) => { + let deployment = + super::get_check_permissions::( + ¶ms.deployment, + user, + PermissionLevel::Execute, + ) + .await?; + params.deployment = deployment.id; + } + Execution::PauseContainer(params) => { + let deployment = + super::get_check_permissions::( + ¶ms.deployment, + user, + PermissionLevel::Execute, + ) + .await?; + params.deployment = deployment.id; + } + Execution::UnpauseContainer(params) => { + let deployment = + super::get_check_permissions::( + ¶ms.deployment, + user, + PermissionLevel::Execute, + ) + .await?; + params.deployment = deployment.id; + } Execution::StopContainer(params) => { let deployment = super::get_check_permissions::( @@ -244,6 +284,24 @@ async fn validate_config( .await?; params.repo = repo.id; } + Execution::BuildRepo(params) => { + let repo = super::get_check_permissions::( + ¶ms.repo, + user, + PermissionLevel::Execute, + ) + .await?; + params.repo = repo.id; + } + Execution::CancelRepoBuild(params) => { + let repo = super::get_check_permissions::( + ¶ms.repo, + user, + PermissionLevel::Execute, + ) + .await?; + params.repo = repo.id; + } Execution::PruneNetworks(params) => { let server = super::get_check_permissions::( ¶ms.server, @@ -280,6 +338,69 @@ async fn validate_config( .await?; params.sync = sync.id; } + Execution::DeployStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::StartStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::RestartStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::PauseStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::UnpauseStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::StopStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } + Execution::DestroyStack(params) => { + let stack = super::get_check_permissions::( + ¶ms.stack, + user, + PermissionLevel::Execute, + ) + .await?; + params.stack = stack.id; + } Execution::Sleep(_) => {} } } diff --git a/bin/core/src/resource/repo.rs b/bin/core/src/resource/repo.rs index 37ff34549..fe7d62b3d 100644 --- a/bin/core/src/resource/repo.rs +++ b/bin/core/src/resource/repo.rs @@ -60,12 +60,15 @@ impl super::MonitorResource for Repo { info: RepoListItemInfo { server_id: repo.config.server_id, last_pulled_at: repo.info.last_pulled_at, + last_built_at: repo.info.last_built_at, git_provider: repo.config.git_provider, repo: repo.config.repo, branch: repo.config.branch, state, - latest_hash: status.latest_hash.clone(), - latest_message: status.latest_message.clone(), + cloned_hash: status.latest_hash.clone(), + cloned_message: status.latest_message.clone(), + latest_hash: repo.info.latest_hash, + built_hash: repo.info.built_hash, }, } } diff --git a/bin/core/src/resource/server.rs b/bin/core/src/resource/server.rs index 05051ec4a..17ffec2c7 100644 --- a/bin/core/src/resource/server.rs +++ b/bin/core/src/resource/server.rs @@ -142,6 +142,14 @@ impl super::MonitorResource for Server { .await .context("failed to detach server from deployments")?; + db.stacks + .update_many( + doc! { "config.server_id": &id }, + doc! { "$set": { "config.server_id": "" } }, + ) + .await + .context("failed to detach server from stacks")?; + db.repos .update_many( doc! { "config.server_id": &id }, diff --git a/bin/core/src/resource/stack.rs b/bin/core/src/resource/stack.rs new file mode 100644 index 000000000..1213e9de9 --- /dev/null +++ b/bin/core/src/resource/stack.rs @@ -0,0 +1,382 @@ +use anyhow::Context; +use formatting::format_serror; +use monitor_client::entities::{ + permission::PermissionLevel, + resource::Resource, + server::Server, + stack::{ + PartialStackConfig, Stack, StackConfig, StackConfigDiff, + StackInfo, StackListItem, StackListItemInfo, StackQuerySpecifics, + StackState, + }, + update::{ResourceTargetVariant, Update}, + user::User, + Operation, +}; +use mungos::mongodb::Collection; +use periphery_client::api::compose::ComposeExecution; + +use crate::{ + helpers::{periphery_client, query::get_stack_state}, + monitor::update_cache_for_server, + resource, + state::{ + action_states, db_client, server_status_cache, stack_status_cache, + }, +}; + +use super::get_check_permissions; + +impl super::MonitorResource for Stack { + type Config = StackConfig; + type PartialConfig = PartialStackConfig; + type ConfigDiff = StackConfigDiff; + type Info = StackInfo; + type ListItem = StackListItem; + type QuerySpecifics = StackQuerySpecifics; + + fn resource_type() -> ResourceTargetVariant { + ResourceTargetVariant::Stack + } + + async fn coll( + ) -> &'static Collection> { + &db_client().await.stacks + } + + async fn to_list_item( + stack: Resource, + ) -> Self::ListItem { + let status = stack_status_cache().get(&stack.id).await; + let state = + status.as_ref().map(|s| s.curr.state).unwrap_or_default(); + let project_name = stack.project_name(false); + let services = match ( + state, + stack.info.deployed_services, + stack.info.latest_services, + ) { + // Always use latest if its down. + (StackState::Down, _, latest_services) => latest_services, + // Also use latest if deployed services is empty. + (_, Some(deployed_services), _) => deployed_services, + // Otherwise use deployed services + (_, _, latest_services) => latest_services, + } + .into_iter() + .map(|service| service.service_name) + .collect(); + // This is only true if it is KNOWN to be true. so other cases are false. + let (project_missing, status) = if stack.config.server_id.is_empty() + || matches!(state, StackState::Down | StackState::Unknown) + { + (false, None) + } else if let Some(status) = server_status_cache() + .get(&stack.config.server_id) + .await + .as_ref() + { + if let Some(projects) = &status.projects { + if let Some(project) = projects.iter().find(|project| project.name == project_name) { + (false, project.status.clone()) + } else { + // The project doesn't exist + (true, None) + } + } else { + (false, None) + } + } else { + (false, None) + }; + StackListItem { + id: stack.id, + name: stack.name, + tags: stack.tags, + resource_type: ResourceTargetVariant::Stack, + info: StackListItemInfo { + state, + status, + services, + project_missing, + server_id: stack.config.server_id, + missing_files: stack.info.missing_files, + git_provider: stack.config.git_provider, + repo: stack.config.repo, + branch: stack.config.branch, + latest_hash: stack.info.latest_hash, + deployed_hash: stack.info.deployed_hash, + }, + } + } + + async fn busy(id: &String) -> anyhow::Result { + action_states() + .stack + .get(id) + .await + .unwrap_or_default() + .busy() + } + + // CREATE + + fn create_operation() -> Operation { + Operation::CreateStack + } + + fn user_can_create(user: &User) -> bool { + user.admin + } + + async fn validate_create_config( + config: &mut Self::PartialConfig, + user: &User, + ) -> anyhow::Result<()> { + validate_config(config, user).await + } + + async fn post_create( + created: &Resource, + _update: &mut Update, + ) -> anyhow::Result<()> { + if !created.config.server_id.is_empty() { + let server = + resource::get::(&created.config.server_id).await?; + update_cache_for_server(&server).await; + } + Ok(()) + } + + // UPDATE + + fn update_operation() -> Operation { + Operation::UpdateStack + } + + async fn validate_update_config( + _id: &str, + config: &mut Self::PartialConfig, + user: &User, + ) -> anyhow::Result<()> { + validate_config(config, user).await + } + + async fn post_update( + updated: &Resource, + _update: &mut Update, + ) -> anyhow::Result<()> { + if !updated.config.server_id.is_empty() { + let server = + resource::get::(&updated.config.server_id).await?; + update_cache_for_server(&server).await; + } + Ok(()) + } + + // DELETE + + fn delete_operation() -> Operation { + Operation::DeleteStack + } + + async fn pre_delete( + stack: &Resource, + update: &mut Update, + ) -> anyhow::Result<()> { + // If it is Up, it should be taken down + let state = get_stack_state(stack) + .await + .context("failed to get stack state")?; + if matches!(state, StackState::Down | StackState::Unknown) { + return Ok(()); + } + // stack needs to be destroyed + let server = + match super::get::(&stack.config.server_id).await { + Ok(server) => server, + Err(e) => { + update.push_error_log( + "destroy stack", + format_serror( + &e.context(format!( + "failed to retrieve server at {} from db.", + stack.config.server_id + )) + .into(), + ), + ); + return Ok(()); + } + }; + + if !server.config.enabled { + // Don't need to + update.push_simple_log( + "destroy stack", + "skipping stack destroy, server is disabled.", + ); + return Ok(()); + } + + let periphery = match periphery_client(&server) { + Ok(periphery) => periphery, + Err(e) => { + // This case won't ever happen, as periphery_client only fallible if the server is disabled. + // Leaving it for completeness sake + update.push_error_log( + "destroy stack", + format_serror( + &e.context("failed to get periphery client").into(), + ), + ); + return Ok(()); + } + }; + + match periphery + .request(ComposeExecution { + project: stack.project_name(false), + command: String::from("down --remove-orphans"), + }) + .await + { + Ok(log) => update.logs.push(log), + Err(e) => update.push_simple_log( + "Failed to destroy stack", + format_serror( + &e.context( + "failed to destroy stack on periphery server before delete", + ) + .into(), + ), + ), + }; + + Ok(()) + } + + async fn post_delete( + _resource: &Resource, + _update: &mut Update, + ) -> anyhow::Result<()> { + Ok(()) + } +} + +#[instrument(skip(user))] +async fn validate_config( + config: &mut PartialStackConfig, + user: &User, +) -> anyhow::Result<()> { + if let Some(server_id) = &config.server_id { + if !server_id.is_empty() { + let server = get_check_permissions::(server_id, user, PermissionLevel::Write) + .await + .context("cannot create stack on this server. user must have update permissions on the server to perform this action.")?; + // in case it comes in as name + config.server_id = Some(server.id); + } + } + Ok(()) +} + +// pub fn spawn_resource_sync_state_refresh_loop() { +// tokio::spawn(async move { +// loop { +// refresh_resource_sync_state_cache().await; +// tokio::time::sleep(Duration::from_secs(60)).await; +// } +// }); +// } + +// pub async fn refresh_resource_sync_state_cache() { +// let _ = async { +// let resource_syncs = +// find_collect(&db_client().await.resource_syncs, None, None) +// .await +// .context("failed to get resource_syncs from db")?; +// let cache = resource_sync_state_cache(); +// for resource_sync in resource_syncs { +// let state = +// get_resource_sync_state_from_db(&resource_sync.id).await; +// cache.insert(resource_sync.id, state).await; +// } +// anyhow::Ok(()) +// } +// .await +// .inspect_err(|e| { +// error!("failed to refresh resource_sync state cache | {e:#}") +// }); +// } + +// async fn get_resource_sync_state( +// id: &String, +// data: &PendingSyncUpdatesData, +// ) -> StackState { +// if let Some(state) = action_states() +// .resource_sync +// .get(id) +// .await +// .and_then(|s| { +// s.get() +// .map(|s| { +// if s.syncing { +// Some(StackState::Syncing) +// } else { +// None +// } +// }) +// .ok() +// }) +// .flatten() +// { +// return state; +// } +// let data = match data { +// PendingSyncUpdatesData::Err(_) => return StackState::Failed, +// PendingSyncUpdatesData::Ok(data) => data, +// }; +// if !data.no_updates() { +// return StackState::Pending; +// } +// resource_sync_state_cache() +// .get(id) +// .await +// .unwrap_or_default() +// } + +// async fn get_resource_sync_state_from_db(id: &str) -> StackState { +// async { +// let state = db_client() +// .await +// .updates +// .find_one(doc! { +// "target.type": "Stack", +// "target.id": id, +// "operation": "RunSync" +// }) +// .with_options( +// FindOneOptions::builder() +// .sort(doc! { "start_ts": -1 }) +// .build(), +// ) +// .await? +// .map(|u| { +// if u.success { +// StackState::Ok +// } else { +// StackState::Failed +// } +// }) +// .unwrap_or(StackState::Ok); +// anyhow::Ok(state) +// } +// .await +// .inspect_err(|e| { +// warn!( +// "failed to get resource sync state from db for {id} | {e:#}" +// ) +// }) +// .unwrap_or(StackState::Unknown) +// } diff --git a/bin/core/src/state.rs b/bin/core/src/state.rs index 1bab3acb9..73fde28c4 100644 --- a/bin/core/src/state.rs +++ b/bin/core/src/state.rs @@ -3,13 +3,14 @@ use std::{ sync::{Arc, OnceLock}, }; -use anyhow::{anyhow, Context}; +use anyhow::Context; use monitor_client::entities::{ build::BuildState, config::core::{CoreConfig, GithubWebhookAppConfig}, deployment::DeploymentState, procedure::ProcedureState, repo::RepoState, + stack::StackState, sync::ResourceSyncState, }; use octorust::auth::{ @@ -24,7 +25,7 @@ use crate::{ helpers::{action_state::ActionStates, cache::Cache}, monitor::{ CachedDeploymentStatus, CachedRepoStatus, CachedServerStatus, - History, + CachedStackStatus, History, }, }; @@ -34,16 +35,29 @@ pub async fn db_client() -> &'static DbClient { static DB_CLIENT: OnceCell = OnceCell::const_new(); DB_CLIENT .get_or_init(|| async { - DbClient::new(&core_config().mongo) + match DbClient::new(&core_config().mongo) .await - .expect("failed to initialize mongo client") + .context("failed to initialize mongo client") + { + Ok(client) => client, + Err(e) => { + error!("{e:#}"); + panic!("Exiting"); + } + } }) .await } pub fn jwt_client() -> &'static JwtClient { static JWT_CLIENT: OnceLock = OnceLock::new(); - JWT_CLIENT.get_or_init(|| JwtClient::new(core_config())) + JWT_CLIENT.get_or_init(|| match JwtClient::new(core_config()) { + Ok(client) => client, + Err(e) => { + error!("failed to initialialize JwtClient | {e:#}"); + panic!("Exiting"); + } + }) } pub fn github_client( @@ -66,20 +80,29 @@ pub fn github_client( if *app_id == 0 || installations.is_empty() { return None; } - let private_key = std::fs::read(pk_path) - .context("github webhook app | failed to load private key") - .unwrap(); + let private_key = match std::fs::read(pk_path).with_context(|| format!("github webhook app | failed to load private key at {pk_path}")) { + Ok(key) => key, + Err(e) => { + error!("{e:#}"); + return None; + } + }; - let private_key = nom_pem::decode_block(&private_key) - .map_err(|e| anyhow!("{e:?}")) - .context("github webhook app | failed to decode private key") - .unwrap(); + let private_key = match nom_pem::decode_block(&private_key) { + Ok(key) => key, + Err(e) => { + error!("github webhook app | failed to decode private key at {pk_path} | {e:?}"); + return None; + } + }; - let jwt = JWTCredentials::new(*app_id, private_key.data) - .context( - "github webhook app | failed to make github JWTCredentials", - ) - .unwrap(); + let jwt = match JWTCredentials::new(*app_id, private_key.data).context("failed to initialize github JWTCredentials") { + Ok(jwt) => jwt, + Err(e) => { + error!("github webhook app | failed to make github JWTCredentials | pk path: {pk_path} | {e:#}"); + return None + } + }; let mut clients = HashMap::with_capacity(installations.capacity()); @@ -89,12 +112,16 @@ pub fn github_client( installation.id, jwt.clone(), ); - let client = octorust::Client::new( + let client = match octorust::Client::new( "github-app", Credentials::InstallationToken(token_generator), - ) - .context("failed to initialize github client") - .unwrap(); + ).with_context(|| format!("failed to initialize github webhook client for installation {}", installation.id)) { + Ok(client) => client, + Err(e) => { + error!("{e:#}"); + continue; + } + }; clients.insert(installation.namespace.to_string(), client); } @@ -119,6 +146,15 @@ pub fn deployment_status_cache() -> &'static DeploymentStatusCache { DEPLOYMENT_STATUS_CACHE.get_or_init(Default::default) } +pub type StackStatusCache = + Cache>>; + +pub fn stack_status_cache() -> &'static StackStatusCache { + static STACK_STATUS_CACHE: OnceLock = + OnceLock::new(); + STACK_STATUS_CACHE.get_or_init(Default::default) +} + pub type ServerStatusCache = Cache>; pub fn server_status_cache() -> &'static ServerStatusCache { diff --git a/bin/core/src/ws.rs b/bin/core/src/ws.rs index 61c9c970a..835567d04 100644 --- a/bin/core/src/ws.rs +++ b/bin/core/src/ws.rs @@ -25,7 +25,7 @@ use crate::{ auth::{auth_api_key_check_enabled, auth_jwt_check_enabled}, db::DbClient, helpers::{ - channel::update_channel, query::get_user_permission_on_resource, + channel::update_channel, query::get_user_permission_on_target, }, state::db_client, }; @@ -206,12 +206,13 @@ async fn user_can_see_update( if user.admin { return Ok(()); } - let (variant, id) = update_target.extract_variant_id(); let permissions = - get_user_permission_on_resource(user, variant, id).await?; + get_user_permission_on_target(user, update_target).await?; if permissions > PermissionLevel::None { Ok(()) } else { - Err(anyhow!("user does not have permissions on {variant} {id}")) + Err(anyhow!( + "user does not have permissions on {update_target:?}" + )) } } diff --git a/bin/migrator/Cargo.toml b/bin/migrator/Cargo.toml index 5093043d8..a0b27016f 100644 --- a/bin/migrator/Cargo.toml +++ b/bin/migrator/Cargo.toml @@ -17,7 +17,7 @@ mungos.workspace = true # tokio.workspace = true anyhow.workspace = true -dotenv.workspace = true +dotenvy.workspace = true envy.workspace = true serde.workspace = true tracing.workspace = true diff --git a/bin/migrator/src/legacy/v0/build.rs b/bin/migrator/src/legacy/v0/build.rs index 954aef92d..5b2531045 100644 --- a/bin/migrator/src/legacy/v0/build.rs +++ b/bin/migrator/src/legacy/v0/build.rs @@ -202,11 +202,18 @@ impl TryFrom for monitor_client::entities::build::Build { tags: Vec::new(), info: BuildInfo { last_built_at: unix_from_monitor_ts(&value.last_built_at)?, + built_hash: None, + built_message: None, + latest_hash: None, + latest_message: None, }, + base_permission: Default::default(), config: BuildConfig { builder_id: String::new(), skip_secret_interp: value.skip_secret_interp, version: value.version.into(), + image_name: Default::default(), + image_tag: Default::default(), git_provider: String::from("github.com"), git_https: true, repo: value.repo.unwrap_or_default(), diff --git a/bin/migrator/src/legacy/v0/deployment.rs b/bin/migrator/src/legacy/v0/deployment.rs index b902ba993..c7812a8db 100644 --- a/bin/migrator/src/legacy/v0/deployment.rs +++ b/bin/migrator/src/legacy/v0/deployment.rs @@ -331,11 +331,7 @@ impl TryFrom id: value.id, name: value.name, description: value.description, - // permissions: value - // .permissions - // .into_iter() - // .map(|(id, p)| (id, p.into())) - // .collect(), + base_permission: Default::default(), updated_at: unix_from_monitor_ts(&value.updated_at)?, tags: Vec::new(), info: (), diff --git a/bin/migrator/src/legacy/v0/server.rs b/bin/migrator/src/legacy/v0/server.rs index e530a2f7d..cfa399df6 100644 --- a/bin/migrator/src/legacy/v0/server.rs +++ b/bin/migrator/src/legacy/v0/server.rs @@ -293,11 +293,7 @@ impl TryFrom for monitor_client::entities::server::Server { id: value.id, name: value.name, description: value.description, - // permissions: value - // .permissions - // .into_iter() - // .map(|(id, p)| (id, p.into())) - // .collect(), + base_permission: Default::default(), updated_at: unix_from_monitor_ts(&value.updated_at)?, tags: Vec::new(), info: (), diff --git a/bin/migrator/src/legacy/v0/update.rs b/bin/migrator/src/legacy/v0/update.rs index d62915a52..8aa0e367c 100644 --- a/bin/migrator/src/legacy/v0/update.rs +++ b/bin/migrator/src/legacy/v0/update.rs @@ -48,6 +48,7 @@ impl TryFrom for monitor_client::entities::update::Update { .and_then(|ts| unix_from_monitor_ts(&ts).ok()), status: value.status.into(), version: value.version.map(|v| v.into()).unwrap_or_default(), + commit_hash: Default::default(), other_data: Default::default(), }; Ok(update) diff --git a/bin/migrator/src/legacy/v1_11/build.rs b/bin/migrator/src/legacy/v1_11/build.rs index 043aabec6..3c9fd9382 100644 --- a/bin/migrator/src/legacy/v1_11/build.rs +++ b/bin/migrator/src/legacy/v1_11/build.rs @@ -18,8 +18,13 @@ impl From for monitor_client::entities::build::Build { tags: value.tags, info: monitor_client::entities::build::BuildInfo { last_built_at: value.info.last_built_at, + built_hash: None, + built_message: None, + latest_hash: None, + latest_message: None, }, config: value.config.into(), + base_permission: Default::default(), } } } @@ -134,6 +139,8 @@ impl From minor: value.version.minor, patch: value.version.patch, }, + image_name: Default::default(), + image_tag: Default::default(), git_provider: String::from("github.com"), git_https: true, repo: value.repo, diff --git a/bin/migrator/src/legacy/v1_11/deployment.rs b/bin/migrator/src/legacy/v1_11/deployment.rs index 0b1345c8d..8dd5723c0 100644 --- a/bin/migrator/src/legacy/v1_11/deployment.rs +++ b/bin/migrator/src/legacy/v1_11/deployment.rs @@ -24,6 +24,7 @@ impl From tags: value.tags, info: (), config: value.config.into(), + base_permission: Default::default(), } } } diff --git a/bin/migrator/src/legacy/v1_6/build.rs b/bin/migrator/src/legacy/v1_6/build.rs index 4d20667c2..3ec98d94b 100644 --- a/bin/migrator/src/legacy/v1_6/build.rs +++ b/bin/migrator/src/legacy/v1_6/build.rs @@ -20,7 +20,12 @@ impl From for monitor_client::entities::build::Build { tags: value.tags, info: monitor_client::entities::build::BuildInfo { last_built_at: value.info.last_built_at, + built_hash: None, + built_message: None, + latest_hash: None, + latest_message: None, }, + base_permission: Default::default(), config: value.config.into(), } } @@ -119,6 +124,8 @@ impl From minor: value.version.minor, patch: value.version.patch, }, + image_name: Default::default(), + image_tag: Default::default(), git_provider: String::from("github.com"), git_https: true, repo: value.repo, diff --git a/bin/migrator/src/legacy/v1_6/deployment.rs b/bin/migrator/src/legacy/v1_6/deployment.rs index 2c39db932..c398853f1 100644 --- a/bin/migrator/src/legacy/v1_6/deployment.rs +++ b/bin/migrator/src/legacy/v1_6/deployment.rs @@ -20,6 +20,7 @@ impl From tags: value.tags, info: value.info, config: value.config.into(), + base_permission: Default::default(), } } } diff --git a/bin/migrator/src/main.rs b/bin/migrator/src/main.rs index 8ce34004a..66fa429ca 100644 --- a/bin/migrator/src/main.rs +++ b/bin/migrator/src/main.rs @@ -35,7 +35,7 @@ struct Env { #[tokio::main] async fn main() -> anyhow::Result<()> { - dotenv::dotenv().ok(); + dotenvy::dotenv().ok(); logger::init(&Default::default())?; info!("starting migrator"); diff --git a/bin/periphery/Cargo.toml b/bin/periphery/Cargo.toml index e91c20525..7fc151d45 100644 --- a/bin/periphery/Cargo.toml +++ b/bin/periphery/Cargo.toml @@ -32,14 +32,16 @@ svi.workspace = true # external axum-extra.workspace = true serde_json.workspace = true +serde_yaml.workspace = true +futures.workspace = true tracing.workspace = true bollard.workspace = true sysinfo.workspace = true -dotenv.workspace = true +dotenvy.workspace = true anyhow.workspace = true tokio.workspace = true serde.workspace = true axum.workspace = true -envy.workspace = true clap.workspace = true +envy.workspace = true uuid.workspace = true diff --git a/bin/periphery/src/api/build.rs b/bin/periphery/src/api/build.rs index 05e33d1d5..33f0a27f5 100644 --- a/bin/periphery/src/api/build.rs +++ b/bin/periphery/src/api/build.rs @@ -14,8 +14,8 @@ use resolver_api::Resolve; use crate::{ config::periphery_config, - docker::docker_client, - helpers::{docker_login, parse_extra_args, parse_labels}, + docker::{docker_client, docker_login}, + helpers::{parse_extra_args, parse_labels}, State, }; @@ -27,6 +27,7 @@ impl Resolve for State { build, aws_ecr, registry_token, + additional_tags, replacers: core_replacers, }: build::Build, _: (), @@ -36,6 +37,7 @@ impl Resolve for State { config: BuildConfig { version, + image_tag, skip_secret_interp, build_path, dockerfile_path, @@ -91,7 +93,8 @@ impl Resolve for State { let labels = parse_labels(labels); let extra_args = parse_extra_args(extra_args); let buildx = if *use_buildx { " buildx" } else { "" }; - let image_tags = image_tags(&image_name, version); + let image_tags = + image_tags(&image_name, image_tag, version, &additional_tags); let push_command = should_push .then(|| { format!(" && docker image push --all-tags {image_name}") @@ -139,10 +142,25 @@ impl Resolve for State { } } -fn image_tags(image_name: &str, version: &Version) -> String { +fn image_tags( + image_name: &str, + custom_tag: &str, + version: &Version, + additional: &[String], +) -> String { let Version { major, minor, .. } = version; + let custom_tag = if custom_tag.is_empty() { + String::new() + } else { + format!("-{custom_tag}") + }; + let additional = additional + .iter() + .map(|tag| format!(" -t {image_name}:{tag}{custom_tag}")) + .collect::>() + .join(""); format!( - " -t {image_name}:latest -t {image_name}:{version} -t {image_name}:{major}.{minor} -t {image_name}:{major}", + " -t {image_name}:latest{custom_tag} -t {image_name}:{version}{custom_tag} -t {image_name}:{major}.{minor}{custom_tag} -t {image_name}:{major}{custom_tag}{additional}", ) } diff --git a/bin/periphery/src/api/compose.rs b/bin/periphery/src/api/compose.rs new file mode 100644 index 000000000..dbd1a3693 --- /dev/null +++ b/bin/periphery/src/api/compose.rs @@ -0,0 +1,168 @@ +use anyhow::{anyhow, Context}; +use command::run_monitor_command; +use formatting::format_serror; +use monitor_client::entities::{stack::ComposeProject, update::Log}; +use periphery_client::api::compose::*; +use resolver_api::Resolve; +use serde::{Deserialize, Serialize}; + +use crate::{ + compose::{compose_up, docker_compose}, + helpers::log_grep, + State, +}; + +impl Resolve for State { + #[instrument(name = "ComposeInfo", level = "debug", skip(self))] + async fn resolve( + &self, + ListComposeProjects {}: ListComposeProjects, + _: (), + ) -> anyhow::Result> { + let docker_compose = docker_compose(); + let res = run_monitor_command( + "list projects", + format!("{docker_compose} ls --format json"), + ) + .await; + + if !res.success { + return Err(anyhow!("{}", res.combined()).context(format!( + "failed to list compose projects using {docker_compose} ls" + ))); + } + + let res = + serde_json::from_str::>(&res.stdout) + .with_context(|| res.stdout.clone()) + .with_context(|| { + format!( + "failed to parse '{docker_compose} ls' response to json" + ) + })? + .into_iter() + .filter(|item| !item.name.is_empty()) + .map(|item| ComposeProject { + name: item.name, + status: item.status, + compose_files: item + .config_files + .split(',') + .map(str::to_string) + .collect(), + }) + .collect(); + + Ok(res) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DockerComposeLsItem { + #[serde(default, alias = "Name")] + pub name: String, + #[serde(alias = "Status")] + pub status: Option, + /// Comma seperated list of paths + #[serde(default, alias = "ConfigFiles")] + pub config_files: String, +} + +// + +impl Resolve for State { + #[instrument( + name = "GetComposeServiceLog", + level = "debug", + skip(self) + )] + async fn resolve( + &self, + GetComposeServiceLog { + project, + service, + tail, + }: GetComposeServiceLog, + _: (), + ) -> anyhow::Result { + let docker_compose = docker_compose(); + let command = format!( + "{docker_compose} -p {project} logs {service} --tail {tail}" + ); + Ok(run_monitor_command("get stack log", command).await) + } +} + +impl Resolve for State { + #[instrument( + name = "GetComposeServiceLogSearch", + level = "debug", + skip(self) + )] + async fn resolve( + &self, + GetComposeServiceLogSearch { + project, + service, + terms, + combinator, + invert, + }: GetComposeServiceLogSearch, + _: (), + ) -> anyhow::Result { + let docker_compose = docker_compose(); + let grep = log_grep(&terms, combinator, invert); + let command = format!("{docker_compose} -p {project} logs {service} --tail 5000 2>&1 | {grep}"); + Ok(run_monitor_command("get stack log grep", command).await) + } +} + +// + +impl Resolve for State { + #[instrument( + name = "ComposeUp", + skip(self, git_token, registry_token) + )] + async fn resolve( + &self, + ComposeUp { + stack, + service, + git_token, + registry_token, + }: ComposeUp, + _: (), + ) -> anyhow::Result { + let mut res = ComposeUpResponse::default(); + if let Err(e) = + compose_up(stack, service, git_token, registry_token, &mut res) + .await + { + res.logs.push(Log::error( + "compose up failed", + format_serror(&e.into()), + )); + }; + Ok(res) + } +} + +// + +impl Resolve for State { + #[instrument(name = "ComposeExecution", skip(self))] + async fn resolve( + &self, + ComposeExecution { project, command }: ComposeExecution, + _: (), + ) -> anyhow::Result { + let docker_compose = docker_compose(); + let log = run_monitor_command( + "compose command", + format!("{docker_compose} -p {project} {command}"), + ) + .await; + Ok(log) + } +} diff --git a/bin/periphery/src/api/container.rs b/bin/periphery/src/api/container.rs index 8a194925f..5e3f9b3db 100644 --- a/bin/periphery/src/api/container.rs +++ b/bin/periphery/src/api/container.rs @@ -1,28 +1,26 @@ use anyhow::{anyhow, Context}; use command::run_monitor_command; -use formatting::format_serror; +use futures::future::join_all; use monitor_client::entities::{ - build::{ImageRegistry, StandardRegistryConfig}, deployment::{ - extract_registry_domain, ContainerSummary, Conversion, - Deployment, DeploymentConfig, DeploymentImage, - DockerContainerStats, RestartMode, TerminationSignal, + ContainerSummary, DeploymentState, DockerContainerStats, }, to_monitor_name, update::Log, - EnvironmentVar, NoData, SearchCombinator, }; use periphery_client::api::container::*; use resolver_api::Resolve; -use run_command::async_run_command; use crate::{ - config::periphery_config, - docker::docker_client, - helpers::{docker_login, parse_extra_args, parse_labels}, + docker::{container_stats, docker_client, stop_container_command}, + helpers::log_grep, State, }; +// ====== +// READ +// ====== + // impl Resolve for State { @@ -72,18 +70,7 @@ impl Resolve for State { }: GetContainerLogSearch, _: (), ) -> anyhow::Result { - let maybe_invert = invert.then_some(" -v").unwrap_or_default(); - let grep = match combinator { - SearchCombinator::Or => { - format!("grep{maybe_invert} -E '{}'", terms.join("|")) - } - SearchCombinator::And => { - format!( - "grep{maybe_invert} -P '^(?=.*{})'", - terms.join(")(?=.*") - ) - } - }; + let grep = log_grep(&terms, combinator, invert); let command = format!("docker logs {name} --tail 5000 2>&1 | {grep}"); Ok(run_monitor_command("get container log grep", command).await) @@ -127,7 +114,9 @@ impl Resolve for State { } } -// +// ========= +// ACTIONS +// ========= impl Resolve for State { #[instrument(name = "StartContainer", skip(self))] @@ -148,6 +137,61 @@ impl Resolve for State { // +impl Resolve for State { + #[instrument(name = "RestartContainer", skip(self))] + async fn resolve( + &self, + RestartContainer { name }: RestartContainer, + _: (), + ) -> anyhow::Result { + Ok( + run_monitor_command( + "docker restart", + format!("docker restart {name}"), + ) + .await, + ) + } +} + +// + +impl Resolve for State { + #[instrument(name = "PauseContainer", skip(self))] + async fn resolve( + &self, + PauseContainer { name }: PauseContainer, + _: (), + ) -> anyhow::Result { + Ok( + run_monitor_command( + "docker pause", + format!("docker pause {name}"), + ) + .await, + ) + } +} + +impl Resolve for State { + #[instrument(name = "UnpauseContainer", skip(self))] + async fn resolve( + &self, + UnpauseContainer { name }: UnpauseContainer, + _: (), + ) -> anyhow::Result { + Ok( + run_monitor_command( + "docker unpause", + format!("docker unpause {name}"), + ) + .await, + ) + } +} + +// + impl Resolve for State { #[instrument(name = "StopContainer", skip(self))] async fn resolve( @@ -177,6 +221,38 @@ impl Resolve for State { // +impl Resolve for State { + #[instrument(name = "StopAllContainers", skip(self))] + async fn resolve( + &self, + StopAllContainers {}: StopAllContainers, + _: (), + ) -> anyhow::Result> { + let containers = docker_client() + .list_containers() + .await + .context("failed to list all containers on host")?; + let futures = containers.iter().filter_map( + |ContainerSummary { name, state, .. }| { + // only stop running containers. if not running, early exit. + if !matches!(state, DeploymentState::Running) { + return None; + } + Some(async move { + run_monitor_command( + &format!("docker stop {name}"), + stop_container_command(name, None, None), + ) + .await + }) + }, + ); + Ok(join_all(futures).await) + } +} + +// + impl Resolve for State { #[instrument(name = "RemoveContainer", skip(self))] async fn resolve( @@ -240,233 +316,3 @@ impl Resolve for State { Ok(run_monitor_command("prune containers", command).await) } } - -// - -impl Resolve for State { - #[instrument( - name = "Deploy", - skip(self, core_replacers, aws_ecr, registry_token) - )] - async fn resolve( - &self, - Deploy { - deployment, - stop_signal, - stop_time, - registry_token, - replacers: core_replacers, - aws_ecr, - }: Deploy, - _: (), - ) -> anyhow::Result { - let image = if let DeploymentImage::Image { image } = - &deployment.config.image - { - if image.is_empty() { - return Ok(Log::error( - "get image", - String::from("deployment does not have image attached"), - )); - } - image - } else { - return Ok(Log::error( - "get image", - String::from("deployment does not have image attached"), - )); - }; - - let image_registry = if aws_ecr.is_some() { - ImageRegistry::AwsEcr(String::new()) - } else if deployment.config.image_registry_account.is_empty() { - ImageRegistry::None(NoData {}) - } else { - ImageRegistry::Standard(StandardRegistryConfig { - account: deployment.config.image_registry_account.clone(), - domain: extract_registry_domain(image)?, - ..Default::default() - }) - }; - - if let Err(e) = docker_login( - &image_registry, - registry_token.as_deref(), - aws_ecr.as_ref(), - ) - .await - { - return Ok(Log::error( - "docker login", - format_serror( - &e.context("failed to login to docker registry").into(), - ), - )); - } - - let _ = pull_image(image).await; - debug!("image pulled"); - let _ = State - .resolve( - RemoveContainer { - name: deployment.name.clone(), - signal: stop_signal, - time: stop_time, - }, - (), - ) - .await; - debug!("container stopped and removed"); - - let command = docker_run_command(&deployment, image); - debug!("docker run command: {command}"); - - if deployment.config.skip_secret_interp { - Ok(run_monitor_command("docker run", command).await) - } else { - let command = svi::interpolate_variables( - &command, - &periphery_config().secrets, - svi::Interpolator::DoubleBrackets, - true, - ) - .context( - "failed to interpolate secrets into docker run command", - ); - if let Err(e) = command { - return Ok(Log::error("docker run", format!("{e:?}"))); - } - let (command, mut replacers) = command.unwrap(); - replacers.extend(core_replacers); - let mut log = run_monitor_command("docker run", command).await; - log.command = svi::replace_in_string(&log.command, &replacers); - log.stdout = svi::replace_in_string(&log.stdout, &replacers); - log.stderr = svi::replace_in_string(&log.stderr, &replacers); - Ok(log) - } - } -} - -// - -fn docker_run_command( - Deployment { - name, - config: - DeploymentConfig { - volumes, - ports, - network, - command, - restart, - environment, - labels, - extra_args, - .. - }, - .. - }: &Deployment, - image: &str, -) -> String { - let name = to_monitor_name(name); - let ports = parse_conversions(ports, "-p"); - let volumes = volumes.to_owned(); - let volumes = parse_conversions(&volumes, "-v"); - let network = parse_network(network); - let restart = parse_restart(restart); - let environment = parse_environment(environment); - let labels = parse_labels(labels); - let command = parse_command(command); - let extra_args = parse_extra_args(extra_args); - format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}") -} - -fn parse_conversions( - conversions: &[Conversion], - flag: &str, -) -> String { - conversions - .iter() - .map(|p| format!(" {flag} {}:{}", p.local, p.container)) - .collect::>() - .join("") -} - -fn parse_environment(environment: &[EnvironmentVar]) -> String { - environment - .iter() - .map(|p| format!(" --env {}=\"{}\"", p.variable, p.value)) - .collect::>() - .join("") -} - -fn parse_network(network: &str) -> String { - format!(" --network {network}") -} - -fn parse_restart(restart: &RestartMode) -> String { - let restart = match restart { - RestartMode::OnFailure => "on-failure:10".to_string(), - _ => restart.to_string(), - }; - format!(" --restart {restart}") -} - -fn parse_command(command: &str) -> String { - if command.is_empty() { - String::new() - } else { - format!(" {command}") - } -} - -// - -async fn container_stats( - container_name: Option, -) -> anyhow::Result> { - let format = "--format \"{{ json . }}\""; - let container_name = match container_name { - Some(name) => format!(" {name}"), - None => "".to_string(), - }; - let command = - format!("docker stats{container_name} --no-stream {format}"); - let output = async_run_command(&command).await; - if output.success() { - let res = output - .stdout - .split('\n') - .filter(|e| !e.is_empty()) - .map(|e| { - let parsed = serde_json::from_str(e) - .context(format!("failed at parsing entry {e}"))?; - Ok(parsed) - }) - .collect::>>()?; - Ok(res) - } else { - Err(anyhow!("{}", output.stderr.replace('\n', ""))) - } -} - -#[instrument] -async fn pull_image(image: &str) -> Log { - let command = format!("docker pull {image}"); - run_monitor_command("docker pull", command).await -} - -fn stop_container_command( - container_name: &str, - signal: Option, - time: Option, -) -> String { - let container_name = to_monitor_name(container_name); - let signal = signal - .map(|signal| format!(" --signal {signal}")) - .unwrap_or_default(); - let time = time - .map(|time| format!(" --time {time}")) - .unwrap_or_default(); - format!("docker stop{signal}{time} {container_name}") -} diff --git a/bin/periphery/src/api/deploy.rs b/bin/periphery/src/api/deploy.rs new file mode 100644 index 000000000..8da84cf39 --- /dev/null +++ b/bin/periphery/src/api/deploy.rs @@ -0,0 +1,199 @@ +use anyhow::Context; +use command::run_monitor_command; +use formatting::format_serror; +use monitor_client::entities::{ + build::{ImageRegistry, StandardRegistryConfig}, + deployment::{ + extract_registry_domain, Conversion, Deployment, + DeploymentConfig, DeploymentImage, RestartMode, + }, + to_monitor_name, + update::Log, + EnvironmentVar, NoData, +}; +use periphery_client::api::container::{Deploy, RemoveContainer}; +use resolver_api::Resolve; + +use crate::{ + config::periphery_config, + docker::{docker_login, pull_image}, + helpers::{parse_extra_args, parse_labels}, + State, +}; + +impl Resolve for State { + #[instrument( + name = "Deploy", + skip(self, core_replacers, aws_ecr, registry_token) + )] + async fn resolve( + &self, + Deploy { + deployment, + stop_signal, + stop_time, + registry_token, + replacers: core_replacers, + aws_ecr, + }: Deploy, + _: (), + ) -> anyhow::Result { + let image = if let DeploymentImage::Image { image } = + &deployment.config.image + { + if image.is_empty() { + return Ok(Log::error( + "get image", + String::from("deployment does not have image attached"), + )); + } + image + } else { + return Ok(Log::error( + "get image", + String::from("deployment does not have image attached"), + )); + }; + + let image_registry = if aws_ecr.is_some() { + ImageRegistry::AwsEcr(String::new()) + } else if deployment.config.image_registry_account.is_empty() { + ImageRegistry::None(NoData {}) + } else { + ImageRegistry::Standard(StandardRegistryConfig { + account: deployment.config.image_registry_account.clone(), + domain: extract_registry_domain(image)?, + ..Default::default() + }) + }; + + if let Err(e) = docker_login( + &image_registry, + registry_token.as_deref(), + aws_ecr.as_ref(), + ) + .await + { + return Ok(Log::error( + "docker login", + format_serror( + &e.context("failed to login to docker registry").into(), + ), + )); + } + + let _ = pull_image(image).await; + debug!("image pulled"); + let _ = State + .resolve( + RemoveContainer { + name: deployment.name.clone(), + signal: stop_signal, + time: stop_time, + }, + (), + ) + .await; + debug!("container stopped and removed"); + + let command = docker_run_command(&deployment, image); + debug!("docker run command: {command}"); + + if deployment.config.skip_secret_interp { + Ok(run_monitor_command("docker run", command).await) + } else { + let command = svi::interpolate_variables( + &command, + &periphery_config().secrets, + svi::Interpolator::DoubleBrackets, + true, + ) + .context( + "failed to interpolate secrets into docker run command", + ); + let (command, mut replacers) = match command { + Ok(res) => res, + Err(e) => { + return Ok(Log::error("docker run", format!("{e:?}"))); + } + }; + replacers.extend(core_replacers); + let mut log = run_monitor_command("docker run", command).await; + log.command = svi::replace_in_string(&log.command, &replacers); + log.stdout = svi::replace_in_string(&log.stdout, &replacers); + log.stderr = svi::replace_in_string(&log.stderr, &replacers); + Ok(log) + } + } +} + +fn docker_run_command( + Deployment { + name, + config: + DeploymentConfig { + volumes, + ports, + network, + command, + restart, + environment, + labels, + extra_args, + .. + }, + .. + }: &Deployment, + image: &str, +) -> String { + let name = to_monitor_name(name); + let ports = parse_conversions(ports, "-p"); + let volumes = volumes.to_owned(); + let volumes = parse_conversions(&volumes, "-v"); + let network = parse_network(network); + let restart = parse_restart(restart); + let environment = parse_environment(environment); + let labels = parse_labels(labels); + let command = parse_command(command); + let extra_args = parse_extra_args(extra_args); + format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}") +} + +fn parse_conversions( + conversions: &[Conversion], + flag: &str, +) -> String { + conversions + .iter() + .map(|p| format!(" {flag} {}:{}", p.local, p.container)) + .collect::>() + .join("") +} + +fn parse_environment(environment: &[EnvironmentVar]) -> String { + environment + .iter() + .map(|p| format!(" --env {}=\"{}\"", p.variable, p.value)) + .collect::>() + .join("") +} + +fn parse_network(network: &str) -> String { + format!(" --network {network}") +} + +fn parse_restart(restart: &RestartMode) -> String { + let restart = match restart { + RestartMode::OnFailure => "on-failure:10".to_string(), + _ => restart.to_string(), + }; + format!(" --restart {restart}") +} + +fn parse_command(command: &str) -> String { + if command.is_empty() { + String::new() + } else { + format!(" {command}") + } +} diff --git a/bin/periphery/src/api/git.rs b/bin/periphery/src/api/git.rs index 8beabf132..419b63ed7 100644 --- a/bin/periphery/src/api/git.rs +++ b/bin/periphery/src/api/git.rs @@ -4,12 +4,11 @@ use monitor_client::entities::{ }; use periphery_client::api::git::{ CloneRepo, DeleteRepo, GetLatestCommit, PullRepo, + RepoActionResponse, RepoActionResponseV1_13, }; use resolver_api::Resolve; -use crate::{ - config::periphery_config, helpers::get_git_token, State, -}; +use crate::{config::periphery_config, State}; impl Resolve for State { async fn resolve( @@ -31,9 +30,15 @@ impl Resolve for State { #[instrument(name = "CloneRepo", skip(self))] async fn resolve( &self, - CloneRepo { args, git_token }: CloneRepo, + CloneRepo { + args, + git_token, + environment, + env_file_path, + skip_secret_interp, + }: CloneRepo, _: (), - ) -> anyhow::Result> { + ) -> anyhow::Result { let CloneArgs { provider, account, .. } = &args; @@ -46,14 +51,30 @@ impl Resolve for State { } (Some(_), Some(_), Some(token)) => Some(token), (Some(account), Some(provider), None) => Some( - get_git_token(provider, account) + crate::helpers::git_token(provider, account).map(ToString::to_string) .with_context( || format!("failed to get git token from periphery config | provider: {provider} | account: {account}") - )? - .clone(), + )?, ), }; - git::clone(args, &periphery_config().repo_dir, token).await + git::clone( + args, + &periphery_config().repo_dir, + token, + &environment, + &env_file_path, + (!skip_secret_interp).then_some(&periphery_config().secrets), + ) + .await + .map(|(logs, commit_hash, commit_message, env_file_path)| { + RepoActionResponseV1_13 { + logs, + commit_hash, + commit_message, + env_file_path, + } + .into() + }) } } @@ -68,18 +89,32 @@ impl Resolve for State { branch, commit, on_pull, + environment, + env_file_path, + skip_secret_interp, }: PullRepo, _: (), - ) -> anyhow::Result> { + ) -> anyhow::Result { let name = to_monitor_name(&name); - Ok( + let (logs, commit_hash, commit_message, env_file_path) = git::pull( &periphery_config().repo_dir.join(name), &branch, &commit, &on_pull, + &environment, + &env_file_path, + (!skip_secret_interp).then_some(&periphery_config().secrets), ) - .await, + .await; + Ok( + RepoActionResponseV1_13 { + logs, + commit_hash, + commit_message, + env_file_path, + } + .into(), ) } } diff --git a/bin/periphery/src/api/mod.rs b/bin/periphery/src/api/mod.rs index 24ec2e7fd..00007f23e 100644 --- a/bin/periphery/src/api/mod.rs +++ b/bin/periphery/src/api/mod.rs @@ -1,13 +1,12 @@ use anyhow::Context; use command::run_monitor_command; -use monitor_client::{ - api::read::ListGitProviders, - entities::{update::Log, SystemCommand}, -}; +use futures::TryFutureExt; +use monitor_client::entities::{update::Log, SystemCommand}; use periphery_client::api::{ - build::*, container::*, git::*, network::*, stats::*, GetHealth, - GetVersion, GetVersionResponse, ListDockerRegistries, ListSecrets, - PruneSystem, RunCommand, + build::*, compose::*, container::*, git::*, network::*, stats::*, + GetDockerLists, GetDockerListsResponse, GetHealth, GetVersion, + GetVersionResponse, ListDockerRegistries, ListGitProviders, + ListSecrets, PruneSystem, RunCommand, }; use resolver_api::{derive::Resolver, Resolve, ResolveToString}; use serde::{Deserialize, Serialize}; @@ -17,11 +16,14 @@ use crate::{ docker_registries_response, git_providers_response, secrets_response, }, + docker::docker_client, State, }; mod build; +mod compose; mod container; +mod deploy; mod git; mod network; mod stats; @@ -52,6 +54,9 @@ pub enum PeripheryRequest { GetSystemProcesses(GetSystemProcesses), GetLatestCommit(GetLatestCommit), + // All in one + GetDockerLists(GetDockerLists), + // Docker GetContainerList(GetContainerList), GetContainerLog(GetContainerLog), @@ -62,17 +67,36 @@ pub enum PeripheryRequest { // Actions RunCommand(RunCommand), + + // Repo CloneRepo(CloneRepo), PullRepo(PullRepo), DeleteRepo(DeleteRepo), + + // Build Build(Build), PruneImages(PruneImages), + + // Container Deploy(Deploy), StartContainer(StartContainer), + RestartContainer(RestartContainer), + PauseContainer(PauseContainer), + UnpauseContainer(UnpauseContainer), StopContainer(StopContainer), + StopAllContainers(StopAllContainers), RemoveContainer(RemoveContainer), RenameContainer(RenameContainer), PruneContainers(PruneContainers), + + // Compose + ListComposeProjects(ListComposeProjects), + GetComposeServiceLog(GetComposeServiceLog), + GetComposeServiceLogSearch(GetComposeServiceLogSearch), + ComposeUp(ComposeUp), + ComposeExecution(ComposeExecution), + + // Networks CreateNetwork(CreateNetwork), DeleteNetwork(DeleteNetwork), PruneNetworks(PruneNetworks), @@ -152,6 +176,29 @@ impl ResolveToString for State { } } +impl Resolve for State { + #[instrument(name = "GetDockerLists", skip(self))] + async fn resolve( + &self, + GetDockerLists {}: GetDockerLists, + _: (), + ) -> anyhow::Result { + let docker = docker_client(); + let (containers, networks, images, projects) = tokio::join!( + docker.list_containers().map_err(Into::into), + docker.list_networks().map_err(Into::into), + docker.list_images().map_err(Into::into), + self.resolve(ListComposeProjects {}, ()).map_err(Into::into) + ); + Ok(GetDockerListsResponse { + containers, + networks, + images, + projects, + }) + } +} + impl Resolve for State { #[instrument(name = "RunCommand", skip(self))] async fn resolve( diff --git a/bin/periphery/src/compose.rs b/bin/periphery/src/compose.rs new file mode 100644 index 000000000..df664e506 --- /dev/null +++ b/bin/periphery/src/compose.rs @@ -0,0 +1,346 @@ +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; +use command::run_monitor_command; +use formatting::format_serror; +use git::write_environment_file; +use monitor_client::entities::{ + all_logs_success, + build::{ImageRegistry, StandardRegistryConfig}, + stack::{ComposeContents, Stack}, + to_monitor_name, + update::Log, + CloneArgs, +}; +use periphery_client::api::{ + compose::ComposeUpResponse, + git::{CloneRepo, RepoActionResponseV1_13}, +}; +use resolver_api::Resolve; +use tokio::fs; + +use crate::{ + config::periphery_config, docker::docker_login, + helpers::parse_extra_args, State, +}; + +pub fn docker_compose() -> &'static str { + if periphery_config().legacy_compose_cli { + "docker-compose" + } else { + "docker compose" + } +} + +/// If Err, remember to write result to the log before return. +pub async fn compose_up( + stack: Stack, + service: Option, + git_token: Option, + registry_token: Option, + res: &mut ComposeUpResponse, +) -> anyhow::Result<()> { + // Write the stack to local disk. For repos, will first delete any existing folder to ensure fresh deploy. + // Will also set additional fields on the reponse. + // Use the env_file_path in the compose command. + let env_file_path = write_stack(&stack, git_token, res) + .await + .context("failed to write / clone compose file")?; + + let root = periphery_config() + .stack_dir + .join(to_monitor_name(&stack.name)); + let run_directory = root.join(&stack.config.run_directory); + let run_directory = run_directory.canonicalize().context( + "failed to validate run directory on host after stack write (canonicalize error)", + )?; + + let file_paths = stack + .file_paths() + .iter() + .map(|path| (path, run_directory.join(path))) + .collect::>(); + + for (path, full_path) in &file_paths { + if !full_path.exists() { + res.missing_files.push(path.to_string()); + } + } + if !res.missing_files.is_empty() { + return Err(anyhow!("A compose file doesn't exist after writing stack. Ensure the run_directory and file_paths are correct.")); + } + + for (path, full_path) in &file_paths { + let file_contents = + match fs::read_to_string(&full_path).await.with_context(|| { + format!( + "failed to read compose file contents at {full_path:?}" + ) + }) { + Ok(res) => res, + Err(e) => { + let error = format_serror(&e.into()); + res + .logs + .push(Log::error("read compose file", error.clone())); + // This should only happen for repo stacks, ie remote error + res.remote_errors.push(ComposeContents { + path: path.to_string(), + contents: error, + }); + return Err(anyhow!( + "failed to read compose file at {full_path:?}, stopping run" + )); + } + }; + res.file_contents.push(ComposeContents { + path: full_path.display().to_string(), + contents: file_contents, + }); + } + + let docker_compose = docker_compose(); + let run_dir = run_directory + .canonicalize() + .context("failed to canonicalize run directory on host")?; + let run_dir = run_dir.display(); + let service_arg = service + .as_ref() + .map(|service| format!(" {service}")) + .unwrap_or_default(); + let file_args = if stack.config.file_paths.is_empty() { + String::from("compose.yaml") + } else { + stack.config.file_paths.join(" -f ") + }; + let last_project_name = stack.project_name(false); + let project_name = stack.project_name(true); + + // Pull images before destroying to minimize downtime. + // If this fails, do not continue. + let log = run_monitor_command( + "compose pull", + format!( + "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} pull{service_arg}", + ), + ) + .await; + if !log.success { + res.logs.push(log); + return Err(anyhow!( + "Failed to pull required images, stopping the run." + )); + } + + // Login to the registry to pull private images, if account is set + if !stack.config.registry_account.is_empty() { + let registry = ImageRegistry::Standard(StandardRegistryConfig { + domain: stack.config.registry_provider.clone(), + account: stack.config.registry_account.clone(), + ..Default::default() + }); + docker_login(®istry, registry_token.as_deref(), None) + .await + .with_context(|| { + format!( + "domain: {} | account: {}", + stack.config.registry_provider, + stack.config.registry_account + ) + }) + .context("failed to login to image registry")?; + } + + // Take down the existing containers. + // This one tries to use the previously deployed service name, to ensure the right stack is taken down. + destroy_existing_containers(&last_project_name, service, res) + .await + .context("failed to destroy existing containers")?; + + // Run compose up + let extra_args = parse_extra_args(&stack.config.extra_args); + let env_file = env_file_path + .map(|path| format!(" --env-file {}", path.display())) + .unwrap_or_default(); + let log = run_monitor_command( + "compose up", + format!( + "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}", + ), + ) + .await; + res.deployed = log.success; + res.logs.push(log); + + if let Err(e) = fs::remove_dir_all(&root).await.with_context(|| { + format!("failed to clean up files after deploy | path: {root:?}") + }) { + res + .logs + .push(Log::error("clean up files", format_serror(&e.into()))) + } + + Ok(()) +} + +/// Either writes the stack file_contents to a file, or clones the repo. +/// Returns the env file path, to maybe include in command with --env-file. +async fn write_stack( + stack: &Stack, + git_token: Option, + res: &mut ComposeUpResponse, +) -> anyhow::Result> { + let root = periphery_config() + .stack_dir + .join(to_monitor_name(&stack.name)); + let run_directory = root.join(&stack.config.run_directory); + + if stack.config.file_contents.is_empty() { + // Clone the repo + if stack.config.repo.is_empty() { + // Err response will be written to return, no need to add it to log here + return Err(anyhow!("Must either input compose file contents directly or provide a repo. Got neither.")); + } + let mut args: CloneArgs = stack.into(); + // Set the clone destination to the one created for this run + args.destination = Some(root.display().to_string()); + + let git_token = match git_token { + Some(token) => Some(token), + None => { + if !stack.config.git_account.is_empty() { + match crate::helpers::git_token( + &stack.config.git_provider, + &stack.config.git_account, + ) { + Ok(token) => Some(token.to_string()), + Err(e) => { + let error = format_serror(&e.into()); + res + .logs + .push(Log::error("no git token", error.clone())); + res.remote_errors.push(ComposeContents { + path: Default::default(), + contents: error, + }); + return Err(anyhow!( + "failed to find required git token, stopping run" + )); + } + } + } else { + None + } + } + }; + + // Ensure directory is clear going in. + fs::remove_dir_all(&root).await.ok(); + + let RepoActionResponseV1_13 { + logs, + commit_hash, + commit_message, + env_file_path, + } = match State + .resolve( + CloneRepo { + args, + git_token, + environment: stack.config.environment.clone(), + env_file_path: stack.config.env_file_path.clone(), + skip_secret_interp: stack.config.skip_secret_interp, + }, + (), + ) + .await + { + Ok(res) => res.into(), + Err(e) => { + let error = format_serror( + &e.context("failed to clone stack repo").into(), + ); + res.logs.push(Log::error("clone stack repo", error.clone())); + res.remote_errors.push(ComposeContents { + path: Default::default(), + contents: error, + }); + return Err(anyhow!( + "failed to clone stack repo, stopping run" + )); + } + }; + + res.logs.extend(logs); + res.commit_hash = commit_hash; + res.commit_message = commit_message; + + if !all_logs_success(&res.logs) { + return Err(anyhow!("Stopped after clone failure")); + } + + Ok(env_file_path) + } else { + // Ensure run directory exists + fs::create_dir_all(&run_directory).await.with_context(|| { + format!("failed to create stack run directory at {root:?}") + })?; + let env_file_path = match write_environment_file( + &stack.config.environment, + &stack.config.env_file_path, + stack + .config + .skip_secret_interp + .then_some(&periphery_config().secrets), + &run_directory, + &mut res.logs, + ) + .await + { + Ok(path) => path, + Err(_) => { + return Err(anyhow!("failed to write environment file")); + } + }; + let file_path = run_directory.join( + stack + .config + .file_paths + // only need the first one, or default + .first() + .map(String::as_str) + .unwrap_or("compose.yaml"), + ); + fs::write(&file_path, &stack.config.file_contents) + .await + .with_context(|| { + format!("failed to write compose file to {file_path:?}") + })?; + + Ok(env_file_path) + } +} + +async fn destroy_existing_containers( + project: &str, + service: Option, + res: &mut ComposeUpResponse, +) -> anyhow::Result<()> { + let docker_compose = docker_compose(); + let service_arg = service + .as_ref() + .map(|service| format!(" {service}")) + .unwrap_or_default(); + let log = run_monitor_command( + "destroy container", + format!("{docker_compose} -p {project} down{service_arg}"), + ) + .await; + let success = log.success; + res.logs.push(log); + if !success { + return Err(anyhow!("Failed to bring down existing container(s) with docker compose down. Stopping run.")); + } + + Ok(()) +} diff --git a/bin/periphery/src/config.rs b/bin/periphery/src/config.rs index efbb25991..cef326cc5 100644 --- a/bin/periphery/src/config.rs +++ b/bin/periphery/src/config.rs @@ -4,7 +4,7 @@ use clap::Parser; use merge_config_files::parse_config_paths; use monitor_client::entities::{ config::periphery::{CliArgs, Env, PeripheryConfig}, - logger::LogLevel, + logger::{LogConfig, LogLevel}, }; pub fn periphery_config() -> &'static PeripheryConfig { @@ -14,45 +14,58 @@ pub fn periphery_config() -> &'static PeripheryConfig { let env: Env = envy::from_env() .expect("failed to parse periphery environment"); let args = CliArgs::parse(); - let mut config = parse_config_paths::( - args.config_path.unwrap_or(env.monitor_config_paths), - args.config_keyword.unwrap_or(env.monitor_config_keywords), - args - .merge_nested_config - .unwrap_or(env.monitor_merge_nested_config), - args - .extend_config_arrays - .unwrap_or(env.monitor_extend_config_arrays), - ) - .expect("failed at parsing config from paths"); + let config_paths = + args.config_path.unwrap_or(env.monitor_config_paths); + let config = if config_paths.is_empty() { + PeripheryConfig::default() + } else { + parse_config_paths::( + config_paths, + args.config_keyword.unwrap_or(env.monitor_config_keywords), + args + .merge_nested_config + .unwrap_or(env.monitor_merge_nested_config), + args + .extend_config_arrays + .unwrap_or(env.monitor_extend_config_arrays), + ) + .expect("failed at parsing config from paths") + }; - // Overrides - config.port = env.monitor_port.unwrap_or(config.port); - config.repo_dir = env.monitor_repo_dir.unwrap_or(config.repo_dir); - config.stats_polling_rate = env - .monitor_stats_polling_rate - .unwrap_or(config.stats_polling_rate); - - // logging - config.logging.level = args - .log_level - .map(LogLevel::from) - .or(env.monitor_logging_level) - .unwrap_or(config.logging.level); - config.logging.stdio = - env.monitor_logging_stdio.unwrap_or(config.logging.stdio); - config.logging.otlp_endpoint = env - .monitor_logging_otlp_endpoint - .or(config.logging.otlp_endpoint); - config.logging.opentelemetry_service_name = env - .monitor_logging_opentelemetry_service_name - .unwrap_or(config.logging.opentelemetry_service_name); - - config.allowed_ips = - env.monitor_allowed_ips.unwrap_or(config.allowed_ips); - config.passkeys = env.monitor_passkeys.unwrap_or(config.passkeys); - - config + PeripheryConfig { + port: env.monitor_port.unwrap_or(config.port), + repo_dir: env.monitor_repo_dir.unwrap_or(config.repo_dir), + stack_dir: env.monitor_stack_dir.unwrap_or(config.stack_dir), + stats_polling_rate: env + .monitor_stats_polling_rate + .unwrap_or(config.stats_polling_rate), + legacy_compose_cli: env + .monitor_legacy_compose_cli + .unwrap_or(config.legacy_compose_cli), + logging: LogConfig { + level: args + .log_level + .map(LogLevel::from) + .or(env.monitor_logging_level) + .unwrap_or(config.logging.level), + stdio: env + .monitor_logging_stdio + .unwrap_or(config.logging.stdio), + otlp_endpoint: env + .monitor_logging_otlp_endpoint + .or(config.logging.otlp_endpoint), + opentelemetry_service_name: env + .monitor_logging_opentelemetry_service_name + .unwrap_or(config.logging.opentelemetry_service_name), + }, + allowed_ips: env + .monitor_allowed_ips + .unwrap_or(config.allowed_ips), + passkeys: env.monitor_passkeys.unwrap_or(config.passkeys), + secrets: config.secrets, + git_providers: config.git_providers, + docker_registries: config.docker_registries, + } }) } diff --git a/bin/periphery/src/docker.rs b/bin/periphery/src/docker.rs index a3ffa9a31..0d0fe5c27 100644 --- a/bin/periphery/src/docker.rs +++ b/bin/periphery/src/docker.rs @@ -1,13 +1,21 @@ use std::sync::OnceLock; -use anyhow::Context; +use anyhow::{anyhow, Context}; use bollard::{container::ListContainersOptions, Docker}; +use command::run_monitor_command; use monitor_client::entities::{ - deployment::ContainerSummary, + build::{ImageRegistry, StandardRegistryConfig}, + config::core::AwsEcrConfig, + deployment::{ + ContainerSummary, DockerContainerStats, TerminationSignal, + }, server::{ docker_image::ImageSummary, docker_network::DockerNetwork, }, + to_monitor_name, + update::Log, }; +use run_command::async_run_command; pub fn docker_client() -> &'static DockerClient { static DOCKER_CLIENT: OnceLock = OnceLock::new(); @@ -39,23 +47,31 @@ impl DockerClient { })) .await? .into_iter() - .map(|s| { + .map(|container| { let info = ContainerSummary { - id: s.id.unwrap_or_default(), - name: s + id: container.id.unwrap_or_default(), + name: container .names .context("no names on container")? .pop() .context("no names on container (empty vec)")? .replace('/', ""), - image: s.image.unwrap_or(String::from("unknown")), - state: s + image: container.image.unwrap_or(String::from("unknown")), + state: container .state .context("no container state")? .parse() .context("failed to parse container state")?, - status: s.status, - labels: s.labels.unwrap_or_default(), + status: container.status, + labels: container.labels.unwrap_or_default(), + network_mode: container + .host_config + .and_then(|config| config.network_mode), + networks: container.network_settings.and_then(|settings| { + settings + .networks + .map(|networks| networks.into_keys().collect()) + }), }; Ok::<_, anyhow::Error>(info) }) @@ -89,3 +105,116 @@ impl DockerClient { Ok(images) } } + +/// Returns whether build result should be pushed after build +#[instrument(skip(registry_token))] +pub async fn docker_login( + registry: &ImageRegistry, + // For local token override from core. + registry_token: Option<&str>, + // For local config override from core. + aws_ecr: Option<&AwsEcrConfig>, +) -> anyhow::Result { + let (domain, account) = match registry { + // Early return for no login + ImageRegistry::None(_) => return Ok(false), + // Early return because Ecr is different + ImageRegistry::AwsEcr(label) => { + let AwsEcrConfig { region, account_id } = aws_ecr + .with_context(|| { + if label.is_empty() { + String::from("Could not find aws ecr config") + } else { + format!("Could not find aws ecr config for label {label}") + } + })?; + let registry_token = registry_token + .context("aws ecr build missing registry token from core")?; + let command = format!("docker login {account_id}.dkr.ecr.{region}.amazonaws.com -u AWS -p {registry_token}"); + let log = async_run_command(&command).await; + if log.success() { + return Ok(true); + } else { + return Err(anyhow!( + "aws ecr login error: stdout: {} | stderr: {}", + log.stdout, + log.stderr + )); + } + } + ImageRegistry::Standard(StandardRegistryConfig { + domain, + account, + .. + }) => (domain.as_str(), account), + }; + if account.is_empty() { + return Err(anyhow!("Must configure account for registry domain {domain}, got empty string")); + } + let registry_token = match registry_token { + Some(token) => token, + None => crate::helpers::registry_token(domain, account)?, + }; + let log = async_run_command(&format!( + "docker login {domain} -u {account} -p {registry_token}", + )) + .await; + if log.success() { + Ok(true) + } else { + Err(anyhow!( + "{domain} login error: stdout: {} | stderr: {}", + log.stdout, + log.stderr + )) + } +} + +#[instrument] +pub async fn pull_image(image: &str) -> Log { + let command = format!("docker pull {image}"); + run_monitor_command("docker pull", command).await +} + +pub fn stop_container_command( + container_name: &str, + signal: Option, + time: Option, +) -> String { + let container_name = to_monitor_name(container_name); + let signal = signal + .map(|signal| format!(" --signal {signal}")) + .unwrap_or_default(); + let time = time + .map(|time| format!(" --time {time}")) + .unwrap_or_default(); + format!("docker stop{signal}{time} {container_name}") +} + +pub async fn container_stats( + container_name: Option, +) -> anyhow::Result> { + let format = "--format \"{{ json . }}\""; + let container_name = match container_name { + Some(name) => format!(" {name}"), + None => "".to_string(), + }; + let command = + format!("docker stats{container_name} --no-stream {format}"); + let output = async_run_command(&command).await; + if output.success() { + let res = output + .stdout + .split('\n') + .filter(|e| !e.is_empty()) + .map(|e| { + let parsed = serde_json::from_str(e) + .context(format!("failed at parsing entry {e}"))?; + Ok(parsed) + }) + .collect::>>()?; + Ok(res) + } else { + Err(anyhow!("{}", output.stderr.replace('\n', ""))) + } +} diff --git a/bin/periphery/src/helpers.rs b/bin/periphery/src/helpers.rs index d8383e85d..7c4aa555a 100644 --- a/bin/periphery/src/helpers.rs +++ b/bin/periphery/src/helpers.rs @@ -1,36 +1,34 @@ -use anyhow::{anyhow, Context}; -use monitor_client::entities::{ - build::{ImageRegistry, StandardRegistryConfig}, - config::core::AwsEcrConfig, - EnvironmentVar, -}; -use run_command::async_run_command; +use anyhow::Context; +use monitor_client::entities::{EnvironmentVar, SearchCombinator}; use crate::config::periphery_config; -pub fn get_git_token( +pub fn git_token( domain: &str, account_username: &str, -) -> anyhow::Result<&'static String> { +) -> anyhow::Result<&'static str> { periphery_config() .git_providers .iter() - .find(|_provider| _provider.domain == domain) - .and_then(|provider| provider.accounts - .iter() - .find(|account| account.username == account_username).map(|account| &account.token)) + .find(|provider| provider.domain == domain) + .and_then(|provider| { + provider.accounts.iter().find(|account| account.username == account_username).map(|account| account.token.as_str()) + }) .with_context(|| format!("did not find token in config for git account {account_username} | domain {domain}")) } -pub fn get_docker_token( +pub fn registry_token( domain: &str, account_username: &str, -) -> anyhow::Result<&'static String> { +) -> anyhow::Result<&'static str> { periphery_config() .docker_registries - .iter().find(|registry| registry.domain == domain) - .and_then(|registry| registry.accounts.iter().find(|account| account.username == account_username).map(|account| &account.token)) - .with_context(|| format!("did not find token in config for docker account {account_username} | domain {domain}")) + .iter() + .find(|registry| registry.domain == domain) + .and_then(|registry| { + registry.accounts.iter().find(|account| account.username == account_username).map(|account| account.token.as_str()) + }) + .with_context(|| format!("did not find token in config for docker registry account {account_username} | domain {domain}")) } pub fn parse_extra_args(extra_args: &[String]) -> String { @@ -50,66 +48,21 @@ pub fn parse_labels(labels: &[EnvironmentVar]) -> String { .join("") } -/// Returns whether build result should be pushed after build -#[instrument(skip(registry_token))] -pub async fn docker_login( - registry: &ImageRegistry, - // For local token override from core. - registry_token: Option<&str>, - // For local config override from core. - aws_ecr: Option<&AwsEcrConfig>, -) -> anyhow::Result { - let (domain, account) = match registry { - // Early return for no login - ImageRegistry::None(_) => return Ok(false), - // Early return because Ecr is different - ImageRegistry::AwsEcr(label) => { - let AwsEcrConfig { region, account_id } = aws_ecr - .with_context(|| { - if label.is_empty() { - String::from("Could not find aws ecr config") - } else { - format!("Could not find aws ecr config for label {label}") - } - })?; - let registry_token = registry_token - .context("aws ecr build missing registry token from core")?; - let command = format!("docker login {account_id}.dkr.ecr.{region}.amazonaws.com -u AWS -p {registry_token}"); - let log = async_run_command(&command).await; - if log.success() { - return Ok(true); - } else { - return Err(anyhow!( - "aws ecr login error: stdout: {} | stderr: {}", - log.stdout, - log.stderr - )); - } +pub fn log_grep( + terms: &[String], + combinator: SearchCombinator, + invert: bool, +) -> String { + let maybe_invert = invert.then_some(" -v").unwrap_or_default(); + match combinator { + SearchCombinator::Or => { + format!("grep{maybe_invert} -E '{}'", terms.join("|")) + } + SearchCombinator::And => { + format!( + "grep{maybe_invert} -P '^(?=.*{})'", + terms.join(")(?=.*") + ) } - ImageRegistry::Standard(StandardRegistryConfig { - domain, - account, - .. - }) => (domain.as_str(), account), - }; - if account.is_empty() { - return Err(anyhow!("Must configure account for registry domain {domain}, got empty string")); - } - let registry_token = match registry_token { - Some(token) => token, - None => get_docker_token(domain, account)?, - }; - let log = async_run_command(&format!( - "docker login {domain} -u {account} -p {registry_token}", - )) - .await; - if log.success() { - Ok(true) - } else { - Err(anyhow!( - "{domain} login error: stdout: {} | stderr: {}", - log.stdout, - log.stderr - )) } } diff --git a/bin/periphery/src/main.rs b/bin/periphery/src/main.rs index 7b66de15f..01df72e31 100644 --- a/bin/periphery/src/main.rs +++ b/bin/periphery/src/main.rs @@ -6,6 +6,7 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::Context; mod api; +mod compose; mod config; mod docker; mod helpers; @@ -15,7 +16,7 @@ mod stats; struct State; async fn app() -> anyhow::Result<()> { - dotenv::dotenv().ok(); + dotenvy::dotenv().ok(); let config = config::periphery_config(); logger::init(&config.logging)?; diff --git a/bin/periphery/src/stats.rs b/bin/periphery/src/stats.rs index 51e40d722..95003234a 100644 --- a/bin/periphery/src/stats.rs +++ b/bin/periphery/src/stats.rs @@ -80,9 +80,7 @@ impl Default for StatsClient { impl StatsClient { fn refresh(&mut self) { - self.system.refresh_cpu(); - self.system.refresh_memory(); - self.system.refresh_processes(); + self.system.refresh_all(); self.disks.refresh(); } @@ -92,7 +90,7 @@ impl StatsClient { pub fn get_system_stats(&self) -> SystemStats { SystemStats { - cpu_perc: self.system.global_cpu_info().cpu_usage(), + cpu_perc: self.system.global_cpu_usage(), mem_used_gb: self.system.used_memory() as f64 / BYTES_PER_GB, mem_total_gb: self.system.total_memory() as f64 / BYTES_PER_GB, disks: self.get_disks(), @@ -132,13 +130,17 @@ impl StatsClient { let disk_usage = p.disk_usage(); SystemProcess { pid: pid.as_u32(), - name: p.name().to_string(), + name: p.name().to_string_lossy().to_string(), exe: p .exe() .map(|exe| exe.to_str().unwrap_or_default()) .unwrap_or_default() .to_string(), - cmd: p.cmd().to_vec(), + cmd: p + .cmd() + .iter() + .map(|cmd| cmd.to_string_lossy().to_string()) + .collect(), start_time: (p.start_time() * 1000) as f64, cpu_perc: p.cpu_usage(), mem_mb: p.memory() as f64 / BYTES_PER_MB, @@ -162,13 +164,17 @@ impl StatsClient { fn get_system_information( sys: &sysinfo::System, ) -> SystemInformation { - let cpu = sys.global_cpu_info(); SystemInformation { name: System::name(), os: System::long_os_version(), kernel: System::kernel_version(), host_name: System::host_name(), core_count: sys.physical_core_count().map(|c| c as u32), - cpu_brand: cpu.brand().to_string(), + cpu_brand: sys + .cpus() + .iter() + .next() + .map(|cpu| cpu.brand().to_string()) + .unwrap_or_default(), } } diff --git a/bin/tests/Cargo.toml b/bin/tests/Cargo.toml index 9de08d25b..8a48d5786 100644 --- a/bin/tests/Cargo.toml +++ b/bin/tests/Cargo.toml @@ -14,7 +14,7 @@ serde.workspace = true serde_json.workspace = true partial_derive2.workspace = true mungos.workspace = true -dotenv.workspace = true +dotenvy.workspace = true envy.workspace = true rand.workspace = true tracing.workspace = true diff --git a/bin/tests/src/core.rs b/bin/tests/src/core.rs index 391e8d252..ce5207edd 100644 --- a/bin/tests/src/core.rs +++ b/bin/tests/src/core.rs @@ -17,7 +17,7 @@ use crate::random_string; #[allow(unused)] pub async fn tests() -> anyhow::Result<()> { - dotenv::dotenv().ok(); + dotenvy::dotenv().ok(); let monitor = MonitorClient::new_from_env().await?; diff --git a/changelog.md b/changelog.md index e5a1f4c86..3d7af18ab 100644 --- a/changelog.md +++ b/changelog.md @@ -1,5 +1,19 @@ # Changelog +## Monitor v1.13 - Komodo (Aug 2024) +- This is the first named release, as I think it is really big. The Komodo Dragon is the largest species of Monitor lizard. +- **Deploy docker compose** with the new **Stack** resource. + - Can define the compose file in the UI, or direct Monitor to clone a git repo containing compose files. + - Use webhooks to redeploy the stack on push to the repo + - Manage the environment variables passed to the compose command. +- **Builds** can now be configured with an alternate repository name to push the image under. + -An optional tag can also be configured to be postfixed onto the version, like image:1.13-aarch64. + This helps for pushing alternate build configurations under the same image repo, just under different tags. +- **Repos** can now be "built" using builders. The idea is, you spawn an AWS instance, clone a repo, execute a shell command +(like running a script in the repo), and terminating the instance. The script can build a binary, and push it to some binary repository. +Users will have to manage their own versioning though. +- **High level UI Updates** courtesy of @karamvirsingh98 + ## v1.12 (July 2024) - Break free of Github dependance. Use other git providers, including self hosted ones. - Same for Docker registry. You can also now use any docker registry for your images. diff --git a/client/core/rs/src/api/execute/build.rs b/client/core/rs/src/api/execute/build.rs index 6e8a4d594..be5af7e0c 100644 --- a/client/core/rs/src/api/execute/build.rs +++ b/client/core/rs/src/api/execute/build.rs @@ -4,7 +4,7 @@ use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; use typeshare::typeshare; -use crate::entities::{update::Update, NoData}; +use crate::entities::update::Update; use super::MonitorExecuteRequest; @@ -41,14 +41,18 @@ pub struct RunBuild { /// Response: [Update] #[typeshare] #[derive( - Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, )] #[empty_traits(MonitorExecuteRequest)] -#[response(CancelBuildResponse)] +#[response(Update)] pub struct CancelBuild { /// Can be id or name pub build: String, } - -#[typeshare] -pub type CancelBuildResponse = NoData; diff --git a/client/core/rs/src/api/execute/deployment.rs b/client/core/rs/src/api/execute/deployment.rs index 7f19948bb..ee0c7673e 100644 --- a/client/core/rs/src/api/execute/deployment.rs +++ b/client/core/rs/src/api/execute/deployment.rs @@ -34,8 +34,10 @@ pub struct Deploy { /// Name or id pub deployment: String, /// Override the default termination signal specified in the deployment. + /// Only used when deployment needs to be taken down before redeploy. pub stop_signal: Option, /// Override the default termination max time. + /// Only used when deployment needs to be taken down before redeploy. pub stop_time: Option, } @@ -64,6 +66,77 @@ pub struct StartContainer { // +/// Restarts the container for the target deployment. Response: [Update] +/// +/// 1. Runs `docker restart ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct RestartContainer { + /// Name or id + pub deployment: String, +} + +// + +/// Pauses the container for the target deployment. Response: [Update] +/// +/// 1. Runs `docker pause ${container_name}`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct PauseContainer { + /// Name or id + pub deployment: String, +} + +// + +/// Unpauses the container for the target deployment. Response: [Update] +/// +/// 1. Runs `docker unpause ${container_name}`. +/// +/// Note. This is the only way to restart a paused container. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct UnpauseContainer { + /// Name or id + pub deployment: String, +} + +// + /// Stops the container for the target deployment. Response: [Update] /// /// 1. Runs `docker stop ${container_name}`. @@ -89,27 +162,6 @@ pub struct StopContainer { pub time: Option, } -/// Stops all deployments on the target server. Response: [Update] -/// -/// 1. Runs [StopContainer] on all deployments on the server concurrently. -#[typeshare] -#[derive( - Serialize, - Deserialize, - Debug, - Clone, - PartialEq, - Request, - EmptyTraits, - Parser, -)] -#[empty_traits(MonitorExecuteRequest)] -#[response(Update)] -pub struct StopAllContainers { - /// Name or id - pub server: String, -} - // /// Stops and removes the container for the target deployment. diff --git a/client/core/rs/src/api/execute/mod.rs b/client/core/rs/src/api/execute/mod.rs index e11cdc06b..279d7552c 100644 --- a/client/core/rs/src/api/execute/mod.rs +++ b/client/core/rs/src/api/execute/mod.rs @@ -11,6 +11,7 @@ mod procedure; mod repo; mod server; mod server_template; +mod stack; mod sync; pub use build::*; @@ -19,6 +20,7 @@ pub use procedure::*; pub use repo::*; pub use server::*; pub use server_template::*; +pub use stack::*; pub use sync::*; use crate::entities::{NoData, I64}; @@ -55,19 +57,25 @@ pub enum Execution { // BUILD RunBuild(RunBuild), + CancelBuild(CancelBuild), // DEPLOYMENT Deploy(Deploy), StartContainer(StartContainer), + RestartContainer(RestartContainer), + PauseContainer(PauseContainer), + UnpauseContainer(UnpauseContainer), StopContainer(StopContainer), - StopAllContainers(StopAllContainers), RemoveContainer(RemoveContainer), // REPO CloneRepo(CloneRepo), PullRepo(PullRepo), + BuildRepo(BuildRepo), + CancelRepoBuild(CancelRepoBuild), // SERVER + StopAllContainers(StopAllContainers), PruneNetworks(PruneNetworks), PruneImages(PruneImages), PruneContainers(PruneContainers), @@ -75,6 +83,15 @@ pub enum Execution { // SYNC RunSync(RunSync), + // STACK + DeployStack(DeployStack), + StartStack(StartStack), + RestartStack(RestartStack), + PauseStack(PauseStack), + UnpauseStack(UnpauseStack), + StopStack(StopStack), + DestroyStack(DestroyStack), + // SLEEP Sleep(Sleep), } diff --git a/client/core/rs/src/api/execute/repo.rs b/client/core/rs/src/api/execute/repo.rs index eec9fbc88..cae18592e 100644 --- a/client/core/rs/src/api/execute/repo.rs +++ b/client/core/rs/src/api/execute/repo.rs @@ -12,6 +12,8 @@ use super::MonitorExecuteRequest; /// Clones the target repo. Response: [Update]. /// +/// Note. Repo must have server attached at `server_id`. +/// /// 1. Clones the repo on the target server using `git clone https://{$token?}@github.com/${repo} -b ${branch}`. /// The token will only be used if a github account is specified, /// and must be declared in the periphery configuration on the target server. @@ -39,6 +41,8 @@ pub struct CloneRepo { /// Pulls the target repo. Response: [Update]. /// +/// Note. Repo must have server attached at `server_id`. +/// /// 1. Pulls the repo on the target server using `git pull`. /// 2. If `on_pull` is specified, it will be executed after the pull is complete. #[typeshare] @@ -58,3 +62,56 @@ pub struct PullRepo { /// Id or name pub repo: String, } + +// + +/// Builds the target repo, using the attached builder. Response: [Update]. +/// +/// Note. Repo must have builder attached at `builder_id`. +/// +/// 1. Spawns the target builder instance (For AWS type. For Server type, just use CloneRepo). +/// 2. Clones the repo on the builder using `git clone https://{$token?}@github.com/${repo} -b ${branch}`. +/// The token will only be used if a github account is specified, +/// and must be declared in the periphery configuration on the builder instance. +/// 3. If `on_clone` and `on_pull` are specified, they will be executed. +/// `on_clone` will be executed before `on_pull`. +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct BuildRepo { + /// Id or name + pub repo: String, +} + +// + +/// Cancels the target repo build. +/// Only does anything if the repo build is `building` when called. +/// Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct CancelRepoBuild { + /// Can be id or name + pub repo: String, +} diff --git a/client/core/rs/src/api/execute/server.rs b/client/core/rs/src/api/execute/server.rs index fc7820d5c..131a3706f 100644 --- a/client/core/rs/src/api/execute/server.rs +++ b/client/core/rs/src/api/execute/server.rs @@ -10,6 +10,27 @@ use super::MonitorExecuteRequest; // +/// Stops all containers on the target server. Response: [Update] +#[typeshare] +#[derive( + Serialize, + Deserialize, + Debug, + Clone, + PartialEq, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct StopAllContainers { + /// Name or id + pub server: String, +} + +// + /// Prunes the docker networks on the target server. Response: [Update]. /// /// 1. Runs `docker network prune -f`. @@ -75,4 +96,4 @@ pub struct PruneImages { pub struct PruneContainers { /// Id or name pub server: String, -} +} \ No newline at end of file diff --git a/client/core/rs/src/api/execute/stack.rs b/client/core/rs/src/api/execute/stack.rs new file mode 100644 index 000000000..80e70e562 --- /dev/null +++ b/client/core/rs/src/api/execute/stack.rs @@ -0,0 +1,178 @@ +use clap::Parser; +use derive_empty_traits::EmptyTraits; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::update::Update; + +use super::MonitorExecuteRequest; + +/// Deploys the target stack. `docker compose up`. Response: [Update] +/// +/// Note. If the stack is already deployed, it will be destroyed first. +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct DeployStack { + /// Id or name + pub stack: String, + /// Override the default termination max time. + /// Only used if the stack needs to be taken down first. + pub stop_time: Option, +} + +// + +/// Starts the target stack. `docker compose start`. Response: [Update] +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct StartStack { + /// Id or name + pub stack: String, + /// Optionally specify a specific service to start + pub service: Option, +} + +// + +/// Restarts the target stack. `docker compose restart`. Response: [Update] +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct RestartStack { + /// Id or name + pub stack: String, + /// Optionally specify a specific service to restart + pub service: Option, +} + +// + +/// Pauses the target stack. `docker compose pause`. Response: [Update] +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct PauseStack { + /// Id or name + pub stack: String, + /// Optionally specify a specific service to pause + pub service: Option, +} + +// + +/// Unpauses the target stack. `docker compose unpause`. Response: [Update]. +/// +/// Note. This is the only way to restart a paused container. +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct UnpauseStack { + /// Id or name + pub stack: String, + /// Optionally specify a specific service to unpause + pub service: Option, +} + +// + +/// Starts the target stack. `docker compose stop`. Response: [Update] +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct StopStack { + /// Id or name + pub stack: String, + /// Override the default termination max time. + pub stop_time: Option, + /// Optionally specify a specific service to stop + pub service: Option, +} + +// + +/// Destoys the target stack. `docker compose down`. Response: [Update] +#[typeshare] +#[derive( + Debug, + Clone, + PartialEq, + Serialize, + Deserialize, + Request, + EmptyTraits, + Parser, +)] +#[empty_traits(MonitorExecuteRequest)] +#[response(Update)] +pub struct DestroyStack { + /// Id or name + pub stack: String, + /// Pass `--remove-orphans` + #[serde(default)] + pub remove_orphans: bool, + /// Override the default termination max time. + pub stop_time: Option, +} diff --git a/client/core/rs/src/api/mod.rs b/client/core/rs/src/api/mod.rs index 831f3e619..9d1caa0b9 100644 --- a/client/core/rs/src/api/mod.rs +++ b/client/core/rs/src/api/mod.rs @@ -12,8 +12,9 @@ //! - X-Api-Secret: `your_api_secret` //! - Use either Authorization *or* X-Api-Key and X-Api-Secret to authenticate requests. //! - Body: JSON specifying the request type (`type`) and the parameters (`params`). -//! The request type matches the name of the the request struct definition, -//! and the params match the fields of the request struct. +//! +//! To call the api, construct JSON bodies following +//! the schemas given in [read], [mod@write], [execute], and so on. //! //! For example, this is an example body for [read::GetDeployment]: //! ``` diff --git a/client/core/rs/src/api/read/mod.rs b/client/core/rs/src/api/read/mod.rs index fb32f728c..e45b1de57 100644 --- a/client/core/rs/src/api/read/mod.rs +++ b/client/core/rs/src/api/read/mod.rs @@ -10,10 +10,12 @@ mod builder; mod deployment; mod permission; mod procedure; +mod provider; mod repo; mod search; mod server; mod server_template; +mod stack; mod sync; mod tag; mod toml; @@ -29,10 +31,12 @@ pub use builder::*; pub use deployment::*; pub use permission::*; pub use procedure::*; +pub use provider::*; pub use repo::*; pub use search::*; pub use server::*; pub use server_template::*; +pub use stack::*; pub use sync::*; pub use tag::*; pub use toml::*; @@ -101,8 +105,8 @@ pub struct GetCoreInfoResponse { // -/// List the git providers. -/// Response: [ListGitProvidersResponse]. +/// List the git providers available in Core / Periphery config files. +/// Response: [ListGitProvidersFromConfigResponse]. /// /// Includes: /// - providers in core config @@ -113,20 +117,20 @@ pub struct GetCoreInfoResponse { Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(ListGitProvidersResponse)] -pub struct ListGitProviders { +#[response(ListGitProvidersFromConfigResponse)] +pub struct ListGitProvidersFromConfig { /// Accepts an optional Server or Builder target to expand the core list with /// providers available on that specific resource. pub target: Option, } #[typeshare] -pub type ListGitProvidersResponse = Vec; +pub type ListGitProvidersFromConfigResponse = Vec; // -/// List the suggested docker registry providers. -/// Response: [ListDockerRegistriesResponse]. +/// List the docker registry providers available in Core / Periphery config files. +/// Response: [ListDockerRegistriesFromConfigResponse]. /// /// Includes: /// - registries in core config @@ -137,15 +141,15 @@ pub type ListGitProvidersResponse = Vec; Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(ListDockerRegistriesResponse)] -pub struct ListDockerRegistries { +#[response(ListDockerRegistriesFromConfigResponse)] +pub struct ListDockerRegistriesFromConfig { /// Accepts an optional Server or Builder target to expand the core list with /// providers available on that specific resource. pub target: Option, } #[typeshare] -pub type ListDockerRegistriesResponse = Vec; +pub type ListDockerRegistriesFromConfigResponse = Vec; // diff --git a/client/core/rs/src/api/read/provider.rs b/client/core/rs/src/api/read/provider.rs new file mode 100644 index 000000000..748463150 --- /dev/null +++ b/client/core/rs/src/api/read/provider.rs @@ -0,0 +1,83 @@ +use derive_empty_traits::EmptyTraits; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::provider::{ + DockerRegistryAccount, GitProviderAccount, +}; + +use super::MonitorReadRequest; + +/// Get a specific git provider account. +/// Response: [GetGitProviderAccountResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetGitProviderAccountResponse)] +pub struct GetGitProviderAccount { + pub id: String, +} + +#[typeshare] +pub type GetGitProviderAccountResponse = GitProviderAccount; + +// + +/// List git provider accounts matching optional query. +/// Response: [ListGitProvidersResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListGitProviderAccountsResponse)] +pub struct ListGitProviderAccounts { + /// Optionally filter by accounts with a specific domain. + pub domain: Option, + /// Optionally filter by accounts with a specific username. + pub username: Option, +} + +#[typeshare] +pub type ListGitProviderAccountsResponse = Vec; + +// + +/// Get a specific docker registry account. +/// Response: [GetDockerRegistryAccountResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetDockerRegistryAccountResponse)] +pub struct GetDockerRegistryAccount { + pub id: String, +} + +#[typeshare] +pub type GetDockerRegistryAccountResponse = DockerRegistryAccount; + +// + +/// List docker registry accounts matching optional query. +/// Response: [ListDockerRegistrysResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListDockerRegistryAccountsResponse)] +pub struct ListDockerRegistryAccounts { + /// Optionally filter by accounts with a specific domain. + pub domain: Option, + /// Optionally filter by accounts with a specific username. + pub username: Option, +} + +#[typeshare] +pub type ListDockerRegistryAccountsResponse = + Vec; diff --git a/client/core/rs/src/api/read/server.rs b/client/core/rs/src/api/read/server.rs index b88926ad0..a177f021c 100644 --- a/client/core/rs/src/api/read/server.rs +++ b/client/core/rs/src/api/read/server.rs @@ -15,6 +15,7 @@ use crate::entities::{ Server, ServerActionState, ServerListItem, ServerQuery, ServerState, }, + stack::ComposeProject, Timelength, I64, }; @@ -141,59 +142,78 @@ pub struct GetPeripheryVersionResponse { // -/// Get the docker networks on the server. Response: [GetDockerNetworksResponse]. +/// List the docker networks on the server. Response: [ListDockerNetworksResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(GetDockerNetworksResponse)] -pub struct GetDockerNetworks { +#[response(ListDockerNetworksResponse)] +pub struct ListDockerNetworks { /// Id or name #[serde(alias = "id", alias = "name")] pub server: String, } #[typeshare] -pub type GetDockerNetworksResponse = Vec; +pub type ListDockerNetworksResponse = Vec; // -/// Get the docker images locally cached on the target server. -/// Response: [GetDockerImagesResponse]. +/// List the docker images locally cached on the target server. +/// Response: [ListDockerImagesResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(GetDockerImagesResponse)] -pub struct GetDockerImages { +#[response(ListDockerImagesResponse)] +pub struct ListDockerImages { /// Id or name #[serde(alias = "id", alias = "name")] pub server: String, } #[typeshare] -pub type GetDockerImagesResponse = Vec; +pub type ListDockerImagesResponse = Vec; // -/// Get all docker containers on the target server. -/// Response: [GetDockerContainersResponse]. +/// List all docker containers on the target server. +/// Response: [ListDockerContainersResponse]. #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(GetDockerContainersResponse)] -pub struct GetDockerContainers { +#[response(ListDockerContainersResponse)] +pub struct ListDockerContainers { /// Id or name #[serde(alias = "id", alias = "name")] pub server: String, } #[typeshare] -pub type GetDockerContainersResponse = Vec; +pub type ListDockerContainersResponse = Vec; + +// + +/// List all compose projects on the target server. +/// Response: [ListComposeProjectsResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListComposeProjectsResponse)] +pub struct ListComposeProjects { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub server: String, +} + +#[typeshare] +pub type ListComposeProjectsResponse = Vec; // @@ -238,8 +258,8 @@ pub type GetSystemStatsResponse = SystemStats; // -/// Get the processes running on the target server. -/// Response: [GetSystemProcessesResponse]. +/// List the processes running on the target server. +/// Response: [ListSystemProcessesResponse]. /// /// Note. This does not hit the server directly. The procedures come from an /// in memory cache on the core, which hits the server periodically @@ -249,15 +269,15 @@ pub type GetSystemStatsResponse = SystemStats; Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, )] #[empty_traits(MonitorReadRequest)] -#[response(GetSystemProcessesResponse)] -pub struct GetSystemProcesses { +#[response(ListSystemProcessesResponse)] +pub struct ListSystemProcesses { /// Id or name #[serde(alias = "id", alias = "name")] pub server: String, } #[typeshare] -pub type GetSystemProcessesResponse = Vec; +pub type ListSystemProcessesResponse = Vec; // diff --git a/client/core/rs/src/api/read/stack.rs b/client/core/rs/src/api/read/stack.rs new file mode 100644 index 000000000..e60d33b9d --- /dev/null +++ b/client/core/rs/src/api/read/stack.rs @@ -0,0 +1,250 @@ +use derive_empty_traits::EmptyTraits; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::{ + stack::{ + Stack, StackActionState, StackListItem, StackQuery, StackService, + }, + update::Log, + SearchCombinator, U64, +}; + +use super::MonitorReadRequest; + +// + +/// Get a specific stack. Response: [Stack]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetStackResponse)] +pub struct GetStack { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, +} + +#[typeshare] +pub type GetStackResponse = Stack; + +// + +/// Lists a specific stacks services (the containers). Response: [ListStackServicesResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListStackServicesResponse)] +pub struct ListStackServices { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, +} + +#[typeshare] +pub type ListStackServicesResponse = Vec; + +// + +/// Get a stack service's log. Response: [GetStackContainersResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetStackServiceLogResponse)] +pub struct GetStackServiceLog { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, + /// The service to get the log for. + pub service: String, + /// The number of lines of the log tail to include. + /// Default: 100. + /// Max: 5000. + #[serde(default = "default_tail")] + pub tail: U64, +} + +fn default_tail() -> u64 { + 50 +} + +#[typeshare] +pub type GetStackServiceLogResponse = Log; + +// + +/// Search the deployment log's tail using `grep`. All lines go to stdout. +/// Response: [Log]. +/// +/// Note. This call will hit the underlying server directly for most up to date log. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(SearchStackServiceLogResponse)] +pub struct SearchStackServiceLog { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, + /// The service to get the log for. + pub service: String, + /// The terms to search for. + pub terms: Vec, + /// When searching for multiple terms, can use `AND` or `OR` combinator. + /// + /// - `AND`: Only include lines with **all** terms present in that line. + /// - `OR`: Include lines that have one or more matches in the terms. + #[serde(default)] + pub combinator: SearchCombinator, + /// Invert the results, ie return all lines that DON'T match the terms / combinator. + #[serde(default)] + pub invert: bool, +} + +#[typeshare] +pub type SearchStackServiceLogResponse = Log; + +// + +/// Gets a list of existing values used as extra args across other stacks. +/// Useful to offer suggestions. Response: [ListCommonStackExtraArgsResponse] +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListCommonStackExtraArgsResponse)] +pub struct ListCommonStackExtraArgs { + /// optional structured query to filter stacks. + #[serde(default)] + pub query: StackQuery, +} + +#[typeshare] +pub type ListCommonStackExtraArgsResponse = Vec; + +// + +/// List stacks matching optional query. Response: [ListStacksResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListStacksResponse)] +pub struct ListStacks { + /// optional structured query to filter syncs. + #[serde(default)] + pub query: StackQuery, +} + +#[typeshare] +pub type ListStacksResponse = Vec; + +// + +/// List stacks matching optional query. Response: [ListFullStacksResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(ListFullStacksResponse)] +pub struct ListFullStacks { + /// optional structured query to filter stacks. + #[serde(default)] + pub query: StackQuery, +} + +#[typeshare] +pub type ListFullStacksResponse = Vec; + +// + +/// Get current action state for the stack. Response: [StackActionState]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetStackActionStateResponse)] +pub struct GetStackActionState { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, +} + +#[typeshare] +pub type GetStackActionStateResponse = StackActionState; + +// + +/// Gets a summary of data relating to all syncs. +/// Response: [GetStacksSummaryResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetStacksSummaryResponse)] +pub struct GetStacksSummary {} + +/// Response for [GetStacksSummary] +#[typeshare] +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +pub struct GetStacksSummaryResponse { + /// The total number of stacks + pub total: u32, + /// The number of stacks with Running state. + pub running: u32, + /// The number of stacks with Paused state. + pub paused: u32, + /// The number of stacks with Stopped state. + pub stopped: u32, + /// The number of stacks with Restarting state. + pub restarting: u32, + /// The number of stacks with Dead state. + pub dead: u32, + /// The number of stacks with Unhealthy state. + pub unhealthy: u32, + /// The number of stacks with Down state. + pub down: u32, + /// The number of stacks with Unknown state. + pub unknown: u32, +} + +// + +/// Get a target stack's configured webhooks. Response: [GetStackWebhooksEnabledResponse]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorReadRequest)] +#[response(GetStackWebhooksEnabledResponse)] +pub struct GetStackWebhooksEnabled { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, +} + +/// Response for [GetStackWebhooksEnabled] +#[typeshare] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct GetStackWebhooksEnabledResponse { + /// Whether the repo webhooks can even be managed. + /// The repo owner must be in `github_webhook_app.owners` list to be managed. + pub managed: bool, + /// Whether pushes to branch trigger refresh. Will always be false if managed is false. + pub refresh_enabled: bool, + /// Whether pushes to branch trigger stack execution. Will always be false if managed is false. + pub deploy_enabled: bool, +} diff --git a/client/core/rs/src/api/write/build.rs b/client/core/rs/src/api/write/build.rs index 2d674831b..7cbf4e6e7 100644 --- a/client/core/rs/src/api/write/build.rs +++ b/client/core/rs/src/api/write/build.rs @@ -83,6 +83,20 @@ pub struct UpdateBuild { // +/// Trigger a refresh of the cached latest hash and message. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(NoData)] +pub struct RefreshBuildCache { + /// Id or name + pub build: String, +} + +// + /// Create a webhook on the github repo attached to the build /// passed in request. Response: [CreateBuildWebhookResponse] #[typeshare] diff --git a/client/core/rs/src/api/write/deployment.rs b/client/core/rs/src/api/write/deployment.rs index 785bd831e..de818e181 100644 --- a/client/core/rs/src/api/write/deployment.rs +++ b/client/core/rs/src/api/write/deployment.rs @@ -66,6 +66,9 @@ pub struct DeleteDeployment { /// Update the deployment at the given id, and return the updated deployment. /// Response: [Deployment]. /// +/// Note. If the attached server for the deployment changes, +/// the deployment will be deleted / cleaned up on the old server. +/// /// Note. This method updates only the fields which are set in the [_PartialDeploymentConfig], /// effectively merging diffs into the final document. /// This is helpful when multiple users are using diff --git a/client/core/rs/src/api/write/mod.rs b/client/core/rs/src/api/write/mod.rs index 758223afe..a703424ed 100644 --- a/client/core/rs/src/api/write/mod.rs +++ b/client/core/rs/src/api/write/mod.rs @@ -6,9 +6,11 @@ mod deployment; mod description; mod permissions; mod procedure; +mod provider; mod repo; mod server; mod server_template; +mod stack; mod sync; mod tags; mod user; @@ -23,9 +25,11 @@ pub use deployment::*; pub use description::*; pub use permissions::*; pub use procedure::*; +pub use provider::*; pub use repo::*; pub use server::*; pub use server_template::*; +pub use stack::*; pub use sync::*; pub use tags::*; pub use user::*; diff --git a/client/core/rs/src/api/write/provider.rs b/client/core/rs/src/api/write/provider.rs new file mode 100644 index 000000000..9e82b967e --- /dev/null +++ b/client/core/rs/src/api/write/provider.rs @@ -0,0 +1,118 @@ +use derive_empty_traits::EmptyTraits; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::provider::*; + +use super::MonitorWriteRequest; + +/// **Admin only.** Create a git provider account. +/// Response: [GitProviderAccount]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(CreateGitProviderAccountResponse)] +pub struct CreateGitProviderAccount { + /// The initial account config. Anything in the _id field will be ignored, + /// as this is generated on creation. + pub account: _PartialGitProviderAccount, +} + +#[typeshare] +pub type CreateGitProviderAccountResponse = GitProviderAccount; + +// + +/// **Admin only.** Update a git provider account. +/// Response: [GitProviderAccount]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(UpdateGitProviderAccountResponse)] +pub struct UpdateGitProviderAccount { + /// The id of the git provider account to update. + pub id: String, + /// The partial git provider account. + pub account: _PartialGitProviderAccount, +} + +#[typeshare] +pub type UpdateGitProviderAccountResponse = GitProviderAccount; + +// + +/// **Admin only.** Delete a git provider account. +/// Response: [User]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(DeleteGitProviderAccountResponse)] +pub struct DeleteGitProviderAccount { + /// The id of the git provider to delete + pub id: String, +} + +#[typeshare] +pub type DeleteGitProviderAccountResponse = GitProviderAccount; + +// + +/// **Admin only.** Create a docker registry account. +/// Response: [DockerRegistryAccount]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(CreateDockerRegistryAccountResponse)] +pub struct CreateDockerRegistryAccount { + pub account: _PartialDockerRegistryAccount, +} + +#[typeshare] +pub type CreateDockerRegistryAccountResponse = DockerRegistryAccount; + +// + +/// **Admin only.** Update a docker registry account. +/// Response: [DockerRegistryAccount]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(UpdateDockerRegistryAccountResponse)] +pub struct UpdateDockerRegistryAccount { + /// The id of the docker registry to update + pub id: String, + /// The partial docker registry account. + pub account: _PartialDockerRegistryAccount, +} + +#[typeshare] +pub type UpdateDockerRegistryAccountResponse = DockerRegistryAccount; + +// + +/// **Admin only.** Delete a docker registry account. +/// Response: [DockerRegistryAccount]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(DeleteDockerRegistryAccountResponse)] +pub struct DeleteDockerRegistryAccount { + /// The id of the docker registry account to delete + pub id: String, +} + +#[typeshare] +pub type DeleteDockerRegistryAccountResponse = DockerRegistryAccount; diff --git a/client/core/rs/src/api/write/repo.rs b/client/core/rs/src/api/write/repo.rs index 3e6badfba..67fd32d28 100644 --- a/client/core/rs/src/api/write/repo.rs +++ b/client/core/rs/src/api/write/repo.rs @@ -86,6 +86,20 @@ pub struct UpdateRepo { // +/// Trigger a refresh of the cached latest hash and message. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(NoData)] +pub struct RefreshRepoCache { + /// Id or name + pub repo: String, +} + +// + #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] pub enum RepoWebhookAction { diff --git a/client/core/rs/src/api/write/stack.rs b/client/core/rs/src/api/write/stack.rs new file mode 100644 index 000000000..fed83925c --- /dev/null +++ b/client/core/rs/src/api/write/stack.rs @@ -0,0 +1,166 @@ +use derive_empty_traits::EmptyTraits; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use crate::entities::{ + stack::{Stack, _PartialStackConfig}, update::Update, NoData +}; + +use super::MonitorWriteRequest; + +// + +/// Create a stack. Response: [Stack]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(Stack)] +pub struct CreateStack { + /// The name given to newly created stack. + pub name: String, + /// Optional partial config to initialize the stack with. + pub config: _PartialStackConfig, +} + +// + +/// Creates a new stack with given `name` and the configuration +/// of the stack at the given `id`. Response: [Stack]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(Stack)] +pub struct CopyStack { + /// The name of the new stack. + pub name: String, + /// The id of the stack to copy. + pub id: String, +} + +// + +/// Deletes the stack at the given id, and returns the deleted stack. +/// Response: [Stack] +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(Stack)] +pub struct DeleteStack { + /// The id or name of the stack to delete. + pub id: String, +} + +// + +/// Update the stack at the given id, and return the updated stack. +/// Response: [Stack]. +/// +/// Note. If the attached server for the stack changes, +/// the stack will be deleted / cleaned up on the old server. +/// +/// Note. This method updates only the fields which are set in the [_PartialStackConfig], +/// merging diffs into the final document. +/// This is helpful when multiple users are using +/// the same resources concurrently by ensuring no unintentional +/// field changes occur from out of date local state. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(Stack)] +pub struct UpdateStack { + /// The id of the Stack to update. + pub id: String, + /// The partial config update to apply. + pub config: _PartialStackConfig, +} + +// + +/// Rename the stack at id to the given name. Response: [Update]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(Update)] +pub struct RenameStack { + /// The id of the stack to rename. + pub id: String, + /// The new name. + pub name: String, +} + +// + +/// Trigger a refresh of the cached compose file contents. +/// Refreshes: +/// - Whether the remote file is missing +/// - The latest json, and for repos, the remote contents, hash, and message. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(NoData)] +pub struct RefreshStackCache { + /// Id or name + pub stack: String, +} + +// + +#[typeshare] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum StackWebhookAction { + Refresh, + Deploy, +} + +/// Create a webhook on the github repo attached to the stack +/// passed in request. Response: [CreateStackWebhookResponse] +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(CreateStackWebhookResponse)] +pub struct CreateStackWebhook { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, + /// "Refresh" or "Deploy" + pub action: StackWebhookAction, +} + +#[typeshare] +pub type CreateStackWebhookResponse = NoData; + +// + +/// Delete the webhook on the github repo attached to the stack +/// passed in request. Response: [DeleteStackWebhookResponse] +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(MonitorWriteRequest)] +#[response(DeleteStackWebhookResponse)] +pub struct DeleteStackWebhook { + /// Id or name + #[serde(alias = "id", alias = "name")] + pub stack: String, + /// "Refresh" or "Deploy" + pub action: StackWebhookAction, +} + +#[typeshare] +pub type DeleteStackWebhookResponse = NoData; diff --git a/client/core/rs/src/api/write/sync.rs b/client/core/rs/src/api/write/sync.rs index 742131508..4888de188 100644 --- a/client/core/rs/src/api/write/sync.rs +++ b/client/core/rs/src/api/write/sync.rs @@ -63,9 +63,6 @@ pub struct DeleteResourceSync { /// Update the sync at the given id, and return the updated sync. /// Response: [ResourceSync]. /// -/// Note. If the attached server for the sync changes, -/// the sync will be deleted / cleaned up on the old server. -/// /// Note. This method updates only the fields which are set in the [_PartialResourceSyncConfig], /// effectively merging diffs into the final document. /// This is helpful when multiple users are using diff --git a/client/core/rs/src/busy.rs b/client/core/rs/src/busy.rs index dde21d7fc..5cca4232d 100644 --- a/client/core/rs/src/busy.rs +++ b/client/core/rs/src/busy.rs @@ -1,7 +1,8 @@ use crate::entities::{ build::BuildActionState, deployment::DeploymentActionState, procedure::ProcedureActionState, repo::RepoActionState, - server::ServerActionState, sync::ResourceSyncActionState, + server::ServerActionState, stack::StackActionState, + sync::ResourceSyncActionState, }; pub trait Busy { @@ -19,13 +20,26 @@ impl Busy for ServerActionState { impl Busy for DeploymentActionState { fn busy(&self) -> bool { self.deploying - || self.removing || self.starting + || self.restarting + || self.pausing || self.stopping + || self.removing || self.renaming } } +impl Busy for StackActionState { + fn busy(&self) -> bool { + self.deploying + || self.starting + || self.restarting + || self.pausing + || self.stopping + || self.destroying + } +} + impl Busy for BuildActionState { fn busy(&self) -> bool { self.building diff --git a/client/core/rs/src/entities/alert.rs b/client/core/rs/src/entities/alert.rs index 21f57b901..aba893d0a 100644 --- a/client/core/rs/src/entities/alert.rs +++ b/client/core/rs/src/entities/alert.rs @@ -8,7 +8,7 @@ use crate::entities::{MongoId, I64}; use super::{ _Serror, deployment::DeploymentState, server::stats::SeverityLevel, - update::ResourceTarget, Version, + stack::StackState, update::ResourceTarget, Version, }; /// Representation of an alert in the system. @@ -133,7 +133,7 @@ pub enum AlertData { id: String, /// The name of the deployment name: String, - /// The server id of server deployment is on + /// The server id of server that the deployment is on server_id: String, /// The server name server_name: String, @@ -143,6 +143,22 @@ pub enum AlertData { to: DeploymentState, }, + /// A stack's state has changed unexpectedly. + StackStateChange { + /// The id of the stack + id: String, + /// The name of the stack + name: String, + /// The server id of server that the stack is on + server_id: String, + /// The server name + server_name: String, + /// The previous stack state + from: StackState, + /// The current stack state + to: StackState, + }, + /// An AWS builder failed to terminate. AwsBuilderTerminationFailed { /// The id of the aws instance which failed to terminate @@ -168,6 +184,14 @@ pub enum AlertData { /// The version that failed to build version: Version, }, + + /// A repo has failed + RepoBuildFailed { + /// The id of the repo + id: String, + /// The name of the repo + name: String, + }, } impl Default for AlertData { diff --git a/client/core/rs/src/entities/build.rs b/client/core/rs/src/entities/build.rs index 6f6a444de..b4f942982 100644 --- a/client/core/rs/src/entities/build.rs +++ b/client/core/rs/src/entities/build.rs @@ -34,6 +34,10 @@ pub struct BuildListItemInfo { pub branch: String, /// State of the build. Reflects whether most recent build successful. pub state: BuildState, + /// Latest built short commit hash, or null. + pub built_hash: Option, + /// Latest short commit hash, or null. Only for repo based stacks + pub latest_hash: Option, } #[typeshare] @@ -56,6 +60,14 @@ pub enum BuildState { #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct BuildInfo { pub last_built_at: I64, + /// Latest built short commit hash, or null. + pub built_hash: Option, + /// Latest built commit message, or null. Only for repo based stacks + pub built_message: Option, + /// Latest remote short commit hash, or null. + pub latest_hash: Option, + /// Latest remote commit message, or null + pub latest_message: Option, } #[typeshare(serialized_as = "Partial")] @@ -78,6 +90,27 @@ pub struct BuildConfig { #[builder(default)] pub version: Version, + /// An alternate name for the image pushed to the repository. + /// If this is empty, it will use the build name. + /// + /// Can be used in conjunction with `image_tag` to direct multiple builds + /// with different configs to push to the same image registry, under different, + /// independantly versioned tags. + #[serde(default)] + #[builder(default)] + pub image_name: String, + + /// An extra tag put before the build version, for the image pushed to the repository. + /// Eg. in image tag of `aarch64` would push to mbecker20/monitor_core:aarch64-1.13.2. + /// If this is empty, the image tag will just be the build version. + /// + /// Can be used in conjunction with `image_name` to direct multiple builds + /// with different configs to push to the same image registry, under different, + /// independantly versioned tags. + #[serde(default)] + #[builder(default)] + pub image_tag: String, + /// The git provider domain. Default: github.com #[serde(default = "default_git_provider")] #[builder(default = "default_git_provider()")] @@ -113,7 +146,7 @@ pub struct BuildConfig { /// /// Note. A token for the account must be available in the core config or the builder server's periphery config /// for the configured git provider. - #[serde(default, alias = "github_account")] + #[serde(default)] #[builder(default)] pub git_account: String, @@ -245,6 +278,8 @@ impl Default for BuildConfig { builder_id: Default::default(), skip_secret_interp: Default::default(), version: Default::default(), + image_name: Default::default(), + image_tag: Default::default(), git_provider: default_git_provider(), git_https: default_git_https(), repo: Default::default(), diff --git a/client/core/rs/src/entities/config/core.rs b/client/core/rs/src/entities/config/core.rs index c2d330ee3..379dbe850 100644 --- a/client/core/rs/src/entities/config/core.rs +++ b/client/core/rs/src/entities/config/core.rs @@ -45,10 +45,20 @@ pub struct Env { pub monitor_port: Option, /// Override `passkey` pub monitor_passkey: Option, - /// Override `jwt_valid_for` - pub monitor_jwt_valid_for: Option, - /// Override `sync_directory` - pub monitor_sync_directory: Option, + /// Override `jwt_secret` + pub monitor_jwt_secret: Option, + /// Override `jwt_ttl` + pub monitor_jwt_ttl: Option, + /// Override `repo_directory` + pub monitor_repo_directory: Option, + /// Override `sync_poll_interval` + pub monitor_sync_poll_interval: Option, + /// Override `stack_poll_interval` + pub monitor_stack_poll_interval: Option, + /// Override `build_poll_interval` + pub monitor_build_poll_interval: Option, + /// Override `repo_poll_interval` + pub monitor_repo_poll_interval: Option, /// Override `monitoring_interval` pub monitor_monitoring_interval: Option, /// Override `keep_stats_for_days` @@ -73,6 +83,8 @@ pub struct Env { pub monitor_transparent_mode: Option, /// Override `ui_write_disabled` pub monitor_ui_write_disabled: Option, + /// Override `enable_new_users` + pub monitor_enable_new_users: Option, /// Override `local_auth` pub monitor_local_auth: Option, @@ -142,164 +154,7 @@ fn default_config_path() -> String { /// to `/config/config.toml` inside the container, or simply override whichever fields /// you need using the environment. /// -/// ## Example TOML -/// ```toml -/// ## this will be the document title on the web page (shows up as text in the browser tab). -/// ## default: 'Monitor' -/// title = "Monitor" -/// -/// ## Required for oauth functionality. This should be the url used to access monitor in browser, -/// ## potentially behind DNS. -/// ## Eg https://monitor.dev or http://12.34.56.78:9000. -/// ## This should match the address configured in your oauth app. -/// ## Required (no default). -/// host = "https://monitor.dev" -/// -/// ## The port the core system will run on. If running core in docker container, -/// ## Leave as this port as 9000 and use port bind eg. -p 9001:9000 -/// ## Default: 9000 -/// port = 9000 -/// -/// ## Must match a passkey in periphery config to communicate with periphery. -/// ## Required (No default) -/// passkey = "a_random_passkey" -/// -/// ## Specify the log level of the monitor core application. -/// ## Default: `info`. -/// ## Options: `off`, `error`, `warn`, `info`, `debug`, `trace`. -/// logging.level = "info" -/// -/// ## Specify the logging format for stdout / stderr. -/// ## Default: standard -/// ## Options: `standard`, `json`, `none` -/// logging.stdio = "standard" -/// -/// ## Specify a opentelemetry otlp endpoint to send traces to. -/// ## Optional, default unassigned (don't export telemetry). -/// # logging.otlp_endpoint = "http://localhost:4317" -/// -/// ## Specify how long an issued jwt stays valid. -/// ## All jwts are invalidated on application restart. -/// ## Default: `1-day`. -/// ## Options: `1-hr`, `12-hr`, `1-day`, `3-day`, `1-wk`, `2-wk`, `30-day`. -/// jwt_valid_for = "1-day" -/// -/// ## Controls the granularity of the system stats collection by monitor core. -/// ## Options: `5-sec`, `15-sec`, `30-sec`, `1-min`, `2-min`, `5-min`. -/// ## Default: `15-sec`. -/// monitoring_interval = "15-sec" -/// -/// ## Number of days to store stats, or 0 to disable stats pruning. -/// ## Stats older than this number of days are deleted daily -/// ## Default: 0 (pruning disabled) -/// keep_stats_for_days = 14 -/// -/// ## Number of days to store alerts, or 0 to disable alert pruning. -/// ## Alerts older than this number of days are deleted daily -/// ## Default: 0 (pruning disabled) -/// keep_alerts_for_days = 14 -/// -/// ## These will be available . -/// ## When attached to build, image will be pushed to repo under the specified organization. -/// ## if empty, the "docker organization" config option will not be shown. -/// ## default: empty -/// # docker_organizations = ["your_docker_org1", "your_docker_org_2"] -/// -/// ## allows all users to have read access on all resources -/// ## default: false -/// # transparent_mode = true -/// -/// ## disables write support on resources in the UI -/// ## default: false -/// # ui_write_disabled = true -/// -/// ## allow or deny user login with username / password -/// ## default: false -/// # local_auth = true -/// -/// ## Use to configure google oauth -/// # google_oauth.enabled = true -/// # google_oauth.id = "your_google_client_id" -/// # google_oauth.secret = "your_google_client_secret" -/// -/// ## Use to configure github oauth -/// # github_oauth.enabled = true -/// # github_oauth.id = "your_github_client_id" -/// # github_oauth.secret = "your_github_client_secret" -/// -/// ## an alternate base url that is used to recieve github webhook requests -/// ## if empty or not specified, will use 'host' address as base -/// ## default: empty (none) -/// # github_webhook_base_url = "https://github-webhook.monitor.dev" -/// -/// ## token that has to be given to github during repo webhook config as the secret -/// ## default: empty (none) -/// github_webhook_secret = "your_random_webhook_secret" -/// -/// ## Configure github webhook app. Enables webhook management apis. -/// # github_webhook_app.app_id = 1234455 # Find on the app page. -/// # github_webhook_app.installations = [ -/// # ## Find the id after installing the app to user / organization. "namespace" is the username / organization name. -/// # { id = 1234, namespace = "mbecker20" } -/// # ] -/// -/// ## Path to github webhook app private key. -/// ## This is defaulted to `/github/private-key.pem`, and doesn't need to be changed if running in Docker. -/// ## Just mount the private key pem file on the host to `/github/private-key.pem` in the container. -/// # github_webhook_app.pk_path = "/path/to/pk.pem" -/// -/// ## MUST comment back in some way to configure mongo. -/// # mongo.uri = "mongodb://username:password@localhost:27017" -/// ## ==== or ==== -/// mongo.address = "localhost:27017" -/// # mongo.username = "username" -/// # mongo.password = "password" -/// ## ==== other ==== -/// ## default: monitor. this is the name of the mongo database that monitor will create its collections in. -/// mongo.db_name = "monitor" -/// ## default: monitor_core. this is the assigned app_name of the mongo client -/// mongo.app_name = "monitor_core" -/// -/// ## provide aws api keys for ephemeral builders -/// # aws.access_key_id = "your_aws_key_id" -/// # aws.secret_access_key = "your_aws_secret_key" -/// -/// ## provide hetzner api token for ephemeral builders -/// # hetzner.token = "your_hetzner_token" -/// -/// ## provide core-base secrets -/// [secrets] -/// # SECRET_1 = "value_1" -/// # SECRET_2 = "value_2" -/// -/// ## configure git providers -/// # [[git_provider]] -/// # domain = "git.mogh.tech" # use a custom provider, like self-hosted gitea -/// # accounts = [ -/// # { username = "mbecker20", token = "access_token_for_account" }, -/// # ] -/// -/// ## configure docker registries -/// # [[docker_registry]] -/// # domain = "docker.io" -/// # accounts = [ -/// # { username = "mbecker2020", token = "access_token_for_account" } -/// # ] -/// # organizations = ["DockerhubOrganization"] -/// -/// ## configure aws ecr registries -/// # [aws_ecr_registry.label_1] -/// # region = "us-east-1" -/// # account_id = "123456677" -/// # access_key_id = "your_aws_key_id_1" -/// # secret_access_key = "your_aws_secret_key_1" -/// -/// # [aws_ecr_registry.label_2] -/// # region = "us-west-1" -/// # account_id = "123456677" -/// # access_key_id = "your_aws_key_id_2" -/// # secret_access_key = "your_aws_secret_key_2" -/// ``` +/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml) for a full example. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CoreConfig { /// The title of this monitor deployment. Will be used in the browser page title. @@ -314,7 +169,7 @@ pub struct CoreConfig { pub host: String, /// Port the core web server runs on. - /// Default: 9000. + /// Default: 9120. #[serde(default = "default_core_port")] pub port: u16, @@ -322,30 +177,63 @@ pub struct CoreConfig { /// Should be some secure hash, maybe 20-40 chars. pub passkey: String, + /// Optionally provide a specific jwt secret. + /// Passing nothing or an empty string will cause one to be generated. + /// Default: "" (empty string) + #[serde(default)] + pub jwt_secret: String, + /// Control how long distributed JWT remain valid for. /// Default: `1-day`. - #[serde(default = "default_jwt_valid_for")] - pub jwt_valid_for: Timelength, + #[serde(default = "default_jwt_ttl")] + pub jwt_ttl: Timelength, - /// Specify the directory used to clone sync repos. The default is fine when using a container. + /// Specify the directory used to clone stack / repo / build repos, for latest hash / contents. + /// The default is fine when using a container. /// This directory has no need for persistence, so no need to mount it. - /// Default: `/syncs` - #[serde(default = "default_sync_directory")] - pub sync_directory: PathBuf, + /// Default: `/repos` + #[serde(default = "default_repo_directory")] + pub repo_directory: PathBuf, + + /// Interval at which to poll stacks for any updates / automated actions. + /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` + /// Default: `5-min`. + #[serde(default = "default_poll_interval")] + pub stack_poll_interval: Timelength, + + /// Interval at which to poll syncs for any updates / automated actions. + /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` + /// Default: `5-min`. + #[serde(default = "default_poll_interval")] + pub sync_poll_interval: Timelength, + + /// Interval at which to poll build commit hash for any updates / automated actions. + /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` + /// Default: `5-min`. + #[serde(default = "default_poll_interval")] + pub build_poll_interval: Timelength, + + /// Interval at which to poll repo commit hash for any updates / automated actions. + /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` + /// Default: `5-min`. + #[serde(default = "default_poll_interval")] + pub repo_poll_interval: Timelength, /// Interval at which to collect server stats and send any alerts. /// Default: `15-sec` #[serde(default = "default_monitoring_interval")] pub monitoring_interval: Timelength, - /// Number of days to keep stats, or 0 to disable pruning. stats older than this number of days are deleted on a daily cycle - /// Default: 0 (no pruning). - #[serde(default)] + /// Number of days to keep stats, or 0 to disable pruning. + /// Stats older than this number of days are deleted on a daily cycle + /// Default: 14 + #[serde(default = "default_prune_days")] pub keep_stats_for_days: u64, - /// Number of days to keep alerts, or 0 to disable pruning. alerts older than this number of days are deleted on a daily cycle - /// Default: 0 (no pruning). - #[serde(default)] + /// Number of days to keep alerts, or 0 to disable pruning. + /// Alerts older than this number of days are deleted on a daily cycle + /// Default: 14 + #[serde(default = "default_prune_days")] pub keep_alerts_for_days: u64, /// Configure logging @@ -372,6 +260,11 @@ pub struct CoreConfig { #[serde(default)] pub github_oauth: OauthCredentials, + /// New users will be automatically enabled. + /// Combined with transparent mode, this is suitable for a demo instance. + #[serde(default)] + pub enable_new_users: bool, + /// Used to verify validity from webhooks. /// Should be some secure hash maybe 20-40 chars. /// It is given to git provider when configuring the webhook. @@ -431,16 +324,24 @@ fn default_title() -> String { } fn default_core_port() -> u16 { - 9000 + 9120 } -fn default_jwt_valid_for() -> Timelength { +fn default_jwt_ttl() -> Timelength { Timelength::OneDay } -fn default_sync_directory() -> PathBuf { - // `/syncs` will always be valid path - PathBuf::from_str("/syncs").unwrap() +fn default_repo_directory() -> PathBuf { + // unwrap ok: `/repos` will always be valid path + PathBuf::from_str("/repos").unwrap() +} + +fn default_prune_days() -> u64 { + 14 +} + +fn default_poll_interval() -> Timelength { + Timelength::FiveMinutes } fn default_monitoring_interval() -> Timelength { @@ -449,58 +350,99 @@ fn default_monitoring_interval() -> Timelength { impl CoreConfig { pub fn sanitized(&self) -> CoreConfig { - let mut config = self.clone(); - - config.passkey = empty_or_redacted(&config.passkey); - config.webhook_secret = empty_or_redacted(&config.webhook_secret); - - config.github_oauth.id = - empty_or_redacted(&config.github_oauth.id); - config.github_oauth.secret = - empty_or_redacted(&config.github_oauth.secret); - - config.google_oauth.id = - empty_or_redacted(&config.google_oauth.id); - config.google_oauth.secret = - empty_or_redacted(&config.google_oauth.secret); - - config.mongo.uri = - config.mongo.uri.map(|cur| empty_or_redacted(&cur)); - config.mongo.username = - config.mongo.username.map(|cur| empty_or_redacted(&cur)); - config.mongo.password = - config.mongo.password.map(|cur| empty_or_redacted(&cur)); - - config.aws.access_key_id = - empty_or_redacted(&config.aws.access_key_id); - config.aws.secret_access_key = - empty_or_redacted(&config.aws.secret_access_key); - - config.hetzner.token = empty_or_redacted(&config.hetzner.token); - - config.secrets.iter_mut().for_each(|(_, secret)| { - *secret = empty_or_redacted(secret); - }); - - config.git_providers.iter_mut().for_each(|provider| { - provider.accounts.iter_mut().for_each(|account| { - account.token = empty_or_redacted(&account.token); - }) - }); - - config.docker_registries.iter_mut().for_each(|provider| { - provider.accounts.iter_mut().for_each(|account| { - account.token = empty_or_redacted(&account.token); - }) - }); - - config.aws_ecr_registries.iter_mut().for_each(|ecr| { - ecr.access_key_id = empty_or_redacted(&ecr.access_key_id); - ecr.secret_access_key = - empty_or_redacted(&ecr.secret_access_key); - }); - - config + let config = self.clone(); + CoreConfig { + title: config.title, + host: config.host, + port: config.port, + passkey: empty_or_redacted(&config.passkey), + jwt_secret: empty_or_redacted(&config.jwt_secret), + jwt_ttl: config.jwt_ttl, + repo_directory: config.repo_directory, + sync_poll_interval: config.sync_poll_interval, + stack_poll_interval: config.stack_poll_interval, + build_poll_interval: config.build_poll_interval, + repo_poll_interval: config.repo_poll_interval, + monitoring_interval: config.monitoring_interval, + keep_stats_for_days: config.keep_stats_for_days, + keep_alerts_for_days: config.keep_alerts_for_days, + logging: config.logging, + transparent_mode: config.transparent_mode, + ui_write_disabled: config.ui_write_disabled, + enable_new_users: config.enable_new_users, + local_auth: config.local_auth, + google_oauth: OauthCredentials { + enabled: config.google_oauth.enabled, + id: empty_or_redacted(&config.google_oauth.id), + secret: empty_or_redacted(&config.google_oauth.id), + }, + github_oauth: OauthCredentials { + enabled: config.github_oauth.enabled, + id: empty_or_redacted(&config.github_oauth.id), + secret: empty_or_redacted(&config.github_oauth.id), + }, + webhook_secret: empty_or_redacted(&config.webhook_secret), + webhook_base_url: config.webhook_base_url, + github_webhook_app: config.github_webhook_app, + mongo: MongoConfig { + uri: config.mongo.uri.map(|cur| empty_or_redacted(&cur)), + address: config.mongo.address, + username: config + .mongo + .username + .map(|cur| empty_or_redacted(&cur)), + password: config + .mongo + .password + .map(|cur| empty_or_redacted(&cur)), + app_name: config.mongo.app_name, + db_name: config.mongo.db_name, + }, + aws: AwsCredentials { + access_key_id: empty_or_redacted(&config.aws.access_key_id), + secret_access_key: empty_or_redacted( + &config.aws.secret_access_key, + ), + }, + hetzner: HetznerCredentials { + token: empty_or_redacted(&config.hetzner.token), + }, + secrets: config + .secrets + .into_iter() + .map(|(id, secret)| (id, empty_or_redacted(&secret))) + .collect(), + git_providers: config + .git_providers + .into_iter() + .map(|mut provider| { + provider.accounts.iter_mut().for_each(|account| { + account.token = empty_or_redacted(&account.token); + }); + provider + }) + .collect(), + docker_registries: config + .docker_registries + .into_iter() + .map(|mut provider| { + provider.accounts.iter_mut().for_each(|account| { + account.token = empty_or_redacted(&account.token); + }); + provider + }) + .collect(), + aws_ecr_registries: config + .aws_ecr_registries + .into_iter() + .map(|mut ecr| { + ecr.access_key_id = empty_or_redacted(&ecr.access_key_id); + ecr.secret_access_key = + empty_or_redacted(&ecr.secret_access_key); + ecr + }) + .collect(), + } } } diff --git a/client/core/rs/src/entities/config/periphery.rs b/client/core/rs/src/entities/config/periphery.rs index 564161949..db0c3a364 100644 --- a/client/core/rs/src/entities/config/periphery.rs +++ b/client/core/rs/src/entities/config/periphery.rs @@ -54,7 +54,7 @@ pub struct CliArgs { #[arg(long)] pub config_keyword: Option>, - /// Merges nested configs, eg. secrets, docker_accounts, github_accounts. + /// Merges nested configs, eg. secrets, providers. /// Will override the equivalent env configuration. /// Default: false #[arg(long)] @@ -82,10 +82,10 @@ pub struct CliArgs { pub struct Env { /// Specify the config paths (files or folders) used to build up the /// final [PeripheryConfig]. - /// Default: `~/.config/monitor/periphery.config.toml`. + /// If not provided, will use Default config. /// /// Note. This is overridden if the equivalent arg is passed in [CliArgs]. - #[serde(default = "default_config_paths")] + #[serde(default)] pub monitor_config_paths: Vec, /// If specifying folders, use this to narrow down which /// files will be matched to parse into the final [PeripheryConfig]. @@ -96,7 +96,7 @@ pub struct Env { #[serde(default)] pub monitor_config_keywords: Vec, - /// Will merge nested config object (eg. secrets, github_accounts) across multiple + /// Will merge nested config object (eg. secrets, providers) across multiple /// config files. Default: `false` /// /// Note. This is overridden if the equivalent arg is passed in [CliArgs]. @@ -114,8 +114,12 @@ pub struct Env { pub monitor_port: Option, /// Override `repo_dir` pub monitor_repo_dir: Option, + /// Override `stack_dir` + pub monitor_stack_dir: Option, /// Override `stats_polling_rate` pub monitor_stats_polling_rate: Option, + /// Override `legacy_compose_cli` + pub monitor_legacy_compose_cli: Option, // LOGGING /// Override `logging.level` @@ -133,10 +137,6 @@ pub struct Env { pub monitor_passkeys: Option>, } -fn default_config_paths() -> Vec { - vec!["~/.config/monitor/periphery.config.toml".to_string()] -} - /// # Periphery Configuration File /// /// The periphery agent initializes it's configuration by reading the environment, @@ -148,14 +148,22 @@ fn default_config_paths() -> Vec { /// ## optional. 8120 is default /// port = 8120 /// -/// ## optional. /repos is default. -/// repo_dir = "/repos" +/// ## optional. `/etc/monitor/repos` is default. +/// repo_dir = "/etc/monitor/repos" +/// +/// ## optional. `/etc/monitor/stacks` is default. +/// stack_dir = "/etc/monitor/stacks" /// /// ## optional. 5-sec is default. /// ## can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. /// ## controls granularity of system stats recorded /// stats_polling_rate = "5-sec" /// +/// ## Whether stack actions should use `docker-compose ...` +/// ## instead of `docker compose ...`. +/// ## default: false +/// legacy_compose_cli = false +/// /// ## optional. default is empty, which will not block any request by ip. /// allowed_ips = ["127.0.0.1"] /// @@ -191,7 +199,7 @@ fn default_config_paths() -> Vec { /// # accounts = [ /// # { username = "mbecker20", token = "access_token_for_account" }, /// # ] -/// +/// /// ## configure periphery-based docker registries /// # [[docker_registry]] /// # domain = "docker.io" @@ -208,15 +216,26 @@ pub struct PeripheryConfig { pub port: u16, /// The system directory where monitor managed repos will be cloned. - /// Default: `/repos` + /// Default: `/etc/monitor/repos` #[serde(default = "default_repo_dir")] pub repo_dir: PathBuf, + /// The system directory where stacks will managed. + /// Default: `/etc/monitor/stacks` + #[serde(default = "default_stack_dir")] + pub stack_dir: PathBuf, + /// The rate at which the system stats will be polled to update the cache. /// Default: `5-sec` - #[serde(default = "default_stats_refresh_interval")] + #[serde(default = "default_stats_polling_rate")] pub stats_polling_rate: Timelength, + /// Whether stack actions should use `docker-compose ...` + /// instead of `docker compose ...`. + /// Default: false + #[serde(default)] + pub legacy_compose_cli: bool, + /// Logging configuration #[serde(default)] pub logging: LogConfig, @@ -259,6 +278,28 @@ fn default_repo_dir() -> PathBuf { "/etc/monitor/repos".parse().unwrap() } -fn default_stats_refresh_interval() -> Timelength { +fn default_stack_dir() -> PathBuf { + "/etc/monitor/stacks".parse().unwrap() +} + +fn default_stats_polling_rate() -> Timelength { Timelength::FiveSeconds } + +impl Default for PeripheryConfig { + fn default() -> Self { + Self { + port: default_periphery_port(), + repo_dir: default_repo_dir(), + stack_dir: default_stack_dir(), + stats_polling_rate: default_stats_polling_rate(), + legacy_compose_cli: Default::default(), + logging: Default::default(), + allowed_ips: Default::default(), + passkeys: Default::default(), + secrets: Default::default(), + git_providers: Default::default(), + docker_registries: Default::default(), + } + } +} \ No newline at end of file diff --git a/client/core/rs/src/entities/deployment.rs b/client/core/rs/src/entities/deployment.rs index fd63d9046..f063e7c21 100644 --- a/client/core/rs/src/entities/deployment.rs +++ b/client/core/rs/src/entities/deployment.rs @@ -305,7 +305,11 @@ pub fn conversions_from_str( .split('\n') .map(|line| line.trim()) .enumerate() - .filter(|(_, line)| !line.is_empty() && !line.starts_with('#')) + .filter(|(_, line)| { + !line.is_empty() + && !line.starts_with('#') + && !line.starts_with("//") + }) .map(|(i, line)| { let (local, container) = line .split_once('=') @@ -443,6 +447,10 @@ pub struct ContainerSummary { pub state: DeploymentState, /// The status string of the docker container. pub status: Option, + /// The network mode of the container. + pub network_mode: Option, + /// Network names attached to the container + pub networks: Option>, } #[typeshare] @@ -598,7 +606,11 @@ pub fn term_signal_labels_from_str( .split('\n') .map(|line| line.trim()) .enumerate() - .filter(|(_, line)| !line.is_empty() && !line.starts_with('#')) + .filter(|(_, line)| { + !line.is_empty() + && !line.starts_with('#') + && !line.starts_with("//") + }) .map(|(i, line)| { let (signal, label) = line .split_once('=') @@ -732,8 +744,11 @@ impl<'de> Visitor<'de> for OptionTermSignalLabelVisitor { #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct DeploymentActionState { pub deploying: bool, - pub stopping: bool, pub starting: bool, + pub restarting: bool, + pub pausing: bool, + pub unpausing: bool, + pub stopping: bool, pub removing: bool, pub renaming: bool, } diff --git a/client/core/rs/src/entities/mod.rs b/client/core/rs/src/entities/mod.rs index f4551f838..a1ce6b226 100644 --- a/client/core/rs/src/entities/mod.rs +++ b/client/core/rs/src/entities/mod.rs @@ -37,6 +37,8 @@ pub mod logger; pub mod permission; /// Subtypes of [Procedure][procedure::Procedure]. pub mod procedure; +/// Subtypes of [ProviderAccount][provider::ProviderAccount] +pub mod provider; /// Subtypes of [Repo][repo::Repo]. pub mod repo; /// Subtypes of [Resource][resource::Resource]. @@ -45,6 +47,8 @@ pub mod resource; pub mod server; /// Subtypes of [ServerTemplate][server_template::ServerTemplate]. pub mod server_template; +/// Subtypes of [Stack][stack::Stack] +pub mod stack; /// Subtypes of [ResourceSync][sync::ResourceSync] pub mod sync; /// Subtypes of [Tag][tag::Tag]. @@ -66,6 +70,8 @@ pub type I64 = i64; pub type U64 = u64; #[typeshare(serialized_as = "any")] pub type MongoDocument = bson::Document; +#[typeshare(serialized_as = "any")] +pub type JsonValue = serde_json::Value; #[typeshare(serialized_as = "MongoIdObj")] pub type MongoId = String; #[typeshare(serialized_as = "__Serror")] @@ -110,12 +116,21 @@ pub fn optional_string(string: &str) -> Option { pub fn get_image_name( build::Build { name, - config: build::BuildConfig { image_registry, .. }, + config: + build::BuildConfig { + image_name, + image_registry, + .. + }, .. }: &build::Build, aws_ecr: impl FnOnce(&String) -> Option, ) -> anyhow::Result { - let name = to_monitor_name(name); + let name = if image_name.is_empty() { + to_monitor_name(name) + } else { + to_monitor_name(image_name) + }; let name = match image_registry { build::ImageRegistry::None(_) => name, build::ImageRegistry::AwsEcr(label) => { @@ -323,7 +338,7 @@ impl Version { #[typeshare] #[derive( - Debug, Clone, Default, PartialEq, Serialize, Deserialize, + Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, )] pub struct EnvironmentVar { pub variable: String, @@ -351,7 +366,11 @@ pub fn environment_vars_from_str( .split('\n') .map(|line| line.trim()) .enumerate() - .filter(|(_, line)| !line.is_empty() && !line.starts_with('#')) + .filter(|(_, line)| { + !line.is_empty() + && !line.starts_with('#') + && !line.starts_with("//") + }) .map(|(i, line)| { let (variable, value) = line .split_once('=') @@ -549,13 +568,30 @@ impl From<&self::sync::ResourceSync> for CloneArgs { destination: None, on_clone: None, on_pull: None, - provider: Some(String::from("github.com")), + provider: optional_string(&sync.config.git_provider), https: sync.config.git_https, account: optional_string(&sync.config.git_account), } } } +impl From<&self::stack::Stack> for CloneArgs { + fn from(stack: &self::stack::Stack) -> Self { + CloneArgs { + name: stack.name.clone(), + repo: optional_string(&stack.config.repo), + branch: optional_string(&stack.config.branch), + commit: optional_string(&stack.config.commit), + destination: None, + on_clone: None, + on_pull: None, + provider: optional_string(&stack.config.git_provider), + https: stack.config.git_https, + account: optional_string(&stack.config.git_account), + } + } +} + #[typeshare] #[derive( Serialize, @@ -698,8 +734,11 @@ pub enum Operation { UpdateDeployment, DeleteDeployment, Deploy, - StopContainer, StartContainer, + RestartContainer, + PauseContainer, + UnpauseContainer, + StopContainer, RemoveContainer, RenameDeployment, @@ -709,6 +748,8 @@ pub enum Operation { DeleteRepo, CloneRepo, PullRepo, + BuildRepo, + CancelRepoBuild, // alerter CreateAlerter, @@ -733,10 +774,41 @@ pub enum Operation { DeleteResourceSync, RunSync, + // stack + CreateStack, + UpdateStack, + RenameStack, + DeleteStack, + RefreshStackCache, + DeployStack, + StartStack, + RestartStack, + PauseStack, + UnpauseStack, + StopStack, + DestroyStack, + + // stack (service) + StartStackService, + RestartStackService, + PauseStackService, + UnpauseStackService, + StopStackService, + // variable CreateVariable, UpdateVariableValue, DeleteVariable, + + // git provider + CreateGitProviderAccount, + UpdateGitProviderAccount, + DeleteGitProviderAccount, + + // docker registry + CreateDockerRegistryAccount, + UpdateDockerRegistryAccount, + DeleteDockerRegistryAccount, } #[typeshare] diff --git a/client/core/rs/src/entities/provider.rs b/client/core/rs/src/entities/provider.rs new file mode 100644 index 000000000..39946df0d --- /dev/null +++ b/client/core/rs/src/entities/provider.rs @@ -0,0 +1,111 @@ +use partial_derive2::Partial; +use serde::{Deserialize, Serialize}; +use typeshare::typeshare; + +use super::MongoId; + +#[typeshare(serialized_as = "Partial")] +pub type _PartialGitProviderAccount = PartialGitProviderAccount; + +/// Configuration to access private git repos from various git providers. +/// Note. Cannot create two accounts with the same domain and username. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Partial, +)] +#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)] +#[partial(skip_serializing_none, from, diff)] +#[cfg_attr( + feature = "mongo", + derive(mongo_indexed::derive::MongoIndexed) +)] +#[cfg_attr(feature = "mongo", unique_doc_index({ "domain": 1, "username": 1 }))] +pub struct GitProviderAccount { + /// The Mongo ID of the git provider account. + /// This field is de/serialized from/to JSON as + /// `{ "_id": { "$oid": "..." }, ...(rest of serialized User) }` + #[serde( + default, + rename = "_id", + skip_serializing_if = "String::is_empty", + with = "bson::serde_helpers::hex_string_as_object_id" + )] + pub id: MongoId, + /// The domain of the provider. + /// + /// For git, this cannot include the protocol eg 'http://', + /// which is controlled with 'https' field. + #[cfg_attr(feature = "mongo", index)] + #[serde(default = "default_git_domain")] + #[partial_default(default_git_domain())] + pub domain: String, + /// Whether git provider is accessed over http or https. + #[serde(default = "default_https")] + #[partial_default(default_https())] + pub https: bool, + /// The account username + #[cfg_attr(feature = "mongo", index)] + #[serde(default)] + pub username: String, + /// The token in plain text on the db. + /// If the database / host can be accessed this is insecure. + #[serde(default)] + pub token: String, +} + +fn default_git_domain() -> String { + String::from("github.com") +} + +fn default_https() -> bool { + true +} + +#[typeshare(serialized_as = "Partial")] +pub type _PartialDockerRegistryAccount = PartialDockerRegistryAccount; + +/// Configuration to access private image repositories on various registries. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, Partial, +)] +#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)] +#[partial(skip_serializing_none, from, diff)] +#[cfg_attr( + feature = "mongo", + derive(mongo_indexed::derive::MongoIndexed) +)] +#[cfg_attr(feature = "mongo", unique_doc_index({ "domain": 1, "username": 1 }))] +pub struct DockerRegistryAccount { + /// The Mongo ID of the docker registry account. + /// This field is de/serialized from/to JSON as + /// `{ "_id": { "$oid": "..." }, ...(rest of DockerRegistryAccount) }` + #[serde( + default, + rename = "_id", + skip_serializing_if = "String::is_empty", + with = "bson::serde_helpers::hex_string_as_object_id" + )] + pub id: MongoId, + /// The domain of the provider. + /// + /// For docker registry, this can include 'http://...', + /// however this is not recommended and won't work unless "insecure registries" are enabled + /// on your hosts. See [https://docs.docker.com/reference/cli/dockerd/#insecure-registries]. + #[cfg_attr(feature = "mongo", index)] + #[serde(default = "default_registry_domain")] + #[partial_default(default_registry_domain())] + pub domain: String, + /// The account username + #[cfg_attr(feature = "mongo", index)] + #[serde(default)] + pub username: String, + /// The token in plain text on the db. + /// If the database / host can be accessed this is insecure. + #[serde(default)] + pub token: String, +} + +fn default_registry_domain() -> String { + String::from("docker.io") +} \ No newline at end of file diff --git a/client/core/rs/src/entities/repo.rs b/client/core/rs/src/entities/repo.rs index ba9613054..0ba5eceb1 100644 --- a/client/core/rs/src/entities/repo.rs +++ b/client/core/rs/src/entities/repo.rs @@ -10,7 +10,7 @@ use crate::entities::I64; use super::{ resource::{Resource, ResourceListItem, ResourceQuery}, - SystemCommand, + EnvironmentVar, SystemCommand, }; #[typeshare] @@ -23,6 +23,8 @@ pub struct RepoListItemInfo { pub server_id: String, /// Repo last cloned / pulled timestamp in ms. pub last_pulled_at: I64, + /// Repo last built timestamp in ms. + pub last_built_at: I64, /// The git provider domain pub git_provider: String, /// The configured repo @@ -31,10 +33,14 @@ pub struct RepoListItemInfo { pub branch: String, /// The repo state pub state: RepoState, - /// If the repo is cloned, will be the latest short commit hash. + /// If the repo is cloned, will be the cloned short commit hash. + pub cloned_hash: Option, + /// If the repo is cloned, will be the cloned commit message. + pub cloned_message: Option, + /// If the repo is built, will be the latest built short commit hash. + pub built_hash: Option, + /// Will be the latest remote short commit hash. pub latest_hash: Option, - /// If the repo is cloned, will be the latest commit message. - pub latest_message: Option, } #[typeshare] @@ -62,7 +68,19 @@ pub type Repo = Resource; #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct RepoInfo { /// When repo was last pulled + #[serde(default)] pub last_pulled_at: I64, + /// When repo was last built + #[serde(default)] + pub last_built_at: I64, + /// Latest built short commit hash, or null. + pub built_hash: Option, + /// Latest built commit message, or null. Only for repo based stacks + pub built_message: Option, + /// Latest remote short commit hash, or null. + pub latest_hash: Option, + /// Latest remote commit message, or null + pub latest_message: Option, } #[typeshare(serialized_as = "Partial")] @@ -74,11 +92,15 @@ pub type _PartialRepoConfig = PartialRepoConfig; #[partial(skip_serializing_none, from, diff)] pub struct RepoConfig { /// The server to clone the repo on. - #[serde(default, alias = "server")] - #[partial_attr(serde(alias = "server"))] + #[serde(default)] #[builder(default)] pub server_id: String, + /// Attach a builder to 'build' the repo. + #[serde(default)] + #[builder(default)] + pub builder_id: String, + /// The git provider domain. Default: github.com #[serde(default = "default_git_provider")] #[builder(default = "default_git_provider()")] @@ -106,7 +128,7 @@ pub struct RepoConfig { /// /// Note. A token for the account must be available in the core config or the builder server's periphery config /// for the configured git provider. - #[serde(default, alias = "github_account")] + #[serde(default)] #[builder(default)] pub git_account: String, @@ -118,7 +140,7 @@ pub struct RepoConfig { #[partial_default(default_git_https())] pub git_https: bool, - /// Explicitly specificy the folder to clone the repo in. + /// Explicitly specify the folder to clone the repo in. #[serde(default)] #[builder(default)] pub path: String, @@ -135,6 +157,35 @@ pub struct RepoConfig { #[builder(default)] pub on_pull: SystemCommand, + /// The environment variables passed to the compose file. + /// They will be written to path defined in env_file_path, + /// which is given relative to the run directory. + /// + /// If it is empty, no file will be written. + #[serde( + default, + deserialize_with = "super::env_vars_deserializer" + )] + #[partial_attr(serde( + default, + deserialize_with = "super::option_env_vars_deserializer" + ))] + #[builder(default)] + pub environment: Vec, + + /// The name of the written environment file before `docker compose up`. + /// Relative to the repo root. + /// Default: .env + #[serde(default = "default_env_file_path")] + #[builder(default = "default_env_file_path()")] + #[partial_default(default_env_file_path())] + pub env_file_path: String, + + /// Whether to skip secret interpolation into the repo environment variable file. + #[serde(default)] + #[builder(default)] + pub skip_secret_interp: bool, + /// Whether incoming webhooks actually trigger action. #[serde(default = "default_webhook_enabled")] #[builder(default = "default_webhook_enabled()")] @@ -160,6 +211,10 @@ fn default_branch() -> String { String::from("main") } +fn default_env_file_path() -> String { + String::from(".env") +} + fn default_webhook_enabled() -> bool { true } @@ -168,6 +223,7 @@ impl Default for RepoConfig { fn default() -> Self { Self { server_id: Default::default(), + builder_id: Default::default(), git_provider: default_git_provider(), git_https: default_git_https(), repo: Default::default(), @@ -177,6 +233,9 @@ impl Default for RepoConfig { path: Default::default(), on_clone: Default::default(), on_pull: Default::default(), + environment: Default::default(), + env_file_path: default_env_file_path(), + skip_secret_interp: Default::default(), webhook_enabled: default_webhook_enabled(), } } @@ -189,6 +248,8 @@ pub struct RepoActionState { pub cloning: bool, /// Whether repo currently pulling pub pulling: bool, + /// Whether repo currently building, using the attached builder. + pub building: bool, } #[typeshare] diff --git a/client/core/rs/src/entities/resource.rs b/client/core/rs/src/entities/resource.rs index 0c4c06744..6a9da84c7 100644 --- a/client/core/rs/src/entities/resource.rs +++ b/client/core/rs/src/entities/resource.rs @@ -1,6 +1,4 @@ -use std::str::FromStr; - -use bson::{doc, oid::ObjectId, Document}; +use bson::{doc, Document}; use derive_builder::Builder; use derive_default_builder::DefaultBuilder; use serde::{Deserialize, Serialize}; @@ -8,7 +6,9 @@ use typeshare::typeshare; use crate::entities::{MongoId, I64}; -use super::update::ResourceTargetVariant; +use super::{ + permission::PermissionLevel, update::ResourceTargetVariant, +}; #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize, Builder)] @@ -53,6 +53,12 @@ pub struct Resource { #[serde(default)] #[builder(default)] pub config: Config, + + /// Set a base permission level that all users will have on the + /// resource. + #[serde(default)] + #[builder(default)] + pub base_permission: PermissionLevel, } #[typeshare] @@ -77,8 +83,6 @@ pub struct ResourceListItem { Serialize, Deserialize, Debug, Clone, Default, DefaultBuilder, )] pub struct ResourceQuery { - #[serde(default)] - pub ids: Vec, #[serde(default)] pub names: Vec, /// Pass Vec of tag ids or tag names @@ -108,14 +112,6 @@ impl AddFilters for () {} impl AddFilters for ResourceQuery { fn add_filters(&self, filters: &mut Document) { - if !self.ids.is_empty() { - let ids = self - .ids - .iter() - .flat_map(|id| ObjectId::from_str(id)) - .collect::>(); - filters.insert("_id", doc! { "$in": &ids }); - } if !self.names.is_empty() { filters.insert("name", doc! { "$in": &self.names }); } diff --git a/client/core/rs/src/entities/server/mod.rs b/client/core/rs/src/entities/server/mod.rs index 197de4e2a..4ce7cb961 100644 --- a/client/core/rs/src/entities/server/mod.rs +++ b/client/core/rs/src/entities/server/mod.rs @@ -44,7 +44,10 @@ pub type _PartialServerConfig = PartialServerConfig; #[partial(skip_serializing_none, from, diff)] pub struct ServerConfig { /// The http address of the periphery client. - /// Example: http://localhost:8120 + /// Default: http://localhost:8120 + #[serde(default = "default_address")] + #[builder(default = "default_address()")] + #[partial_default(default_address())] pub address: String, /// An optional region label @@ -142,6 +145,10 @@ impl ServerConfig { } } +fn default_address() -> String { + String::from("http://localhost:8120") +} + fn default_enabled() -> bool { false } diff --git a/client/core/rs/src/entities/stack.rs b/client/core/rs/src/entities/stack.rs new file mode 100644 index 000000000..e7220267d --- /dev/null +++ b/client/core/rs/src/entities/stack.rs @@ -0,0 +1,483 @@ +use std::{collections::HashMap, sync::OnceLock}; + +use bson::{doc, Document}; +use derive_builder::Builder; +use derive_default_builder::DefaultBuilder; +use partial_derive2::Partial; +use serde::{Deserialize, Serialize}; +use strum::Display; +use typeshare::typeshare; + +use super::{ + deployment::ContainerSummary, + resource::{Resource, ResourceListItem, ResourceQuery}, + to_monitor_name, EnvironmentVar, +}; + +#[typeshare] +pub type Stack = Resource; + +impl Stack { + /// If fresh is passed, it will bypass the deployed project name. + /// and get the most up to date one from just project_name field falling back to stack name. + pub fn project_name(&self, fresh: bool) -> String { + if !fresh { + if let Some(project_name) = &self.info.deployed_project_name { + return project_name.clone(); + } + } + self + .config + .project_name + .is_empty() + .then(|| to_monitor_name(&self.name)) + .unwrap_or_else(|| to_monitor_name(&self.config.project_name)) + } + + pub fn file_paths(&self) -> &[String] { + if self.config.file_paths.is_empty() { + default_stack_file_paths() + } else { + &self.config.file_paths + } + } +} + +fn default_stack_file_paths() -> &'static [String] { + static DEFAULT_FILE_PATHS: OnceLock> = OnceLock::new(); + DEFAULT_FILE_PATHS + .get_or_init(|| vec![String::from("compose.yaml")]) +} + +#[typeshare] +pub type StackListItem = ResourceListItem; + +#[typeshare] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct StackListItemInfo { + /// The server that stack is deployed on. + pub server_id: String, + /// The git provider domain + pub git_provider: String, + /// The configured repo + pub repo: String, + /// The configured branch + pub branch: String, + /// The stack state + pub state: StackState, + /// A string given by docker conveying the status of the stack. + pub status: Option, + /// The service names that are part of the stack. + /// If deployed, will be `deployed_services`. + /// Otherwise, its `latest_services` + pub services: Vec, + /// Whether the compose project is missing on the host. + /// Ie, it does not show up in `docker compose ls`. + /// If true, and the stack is not Down, this is an unhealthy state. + pub project_missing: bool, + /// If any compose files are missing in the repo, the path will be here. + /// If there are paths here, this is an unhealthy state, and deploying will fail. + pub missing_files: Vec, + /// Deployed short commit hash, or null. Only for repo based stacks. + pub deployed_hash: Option, + /// Latest short commit hash, or null. Only for repo based stacks + pub latest_hash: Option, +} + +#[typeshare] +#[derive( + Debug, + Clone, + Copy, + Default, + PartialEq, + Eq, + Serialize, + Deserialize, + Display, +)] +// Do this one snake_case in line with DeploymentState. +// Also in line with docker terminology. +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +pub enum StackState { + /// All containers are running. + Running, + /// All containers are paused + Paused, + /// All contianers are stopped + Stopped, + /// All containers are restarting + Restarting, + /// All containers are dead + Dead, + /// The containers are in a mix of states + Unhealthy, + /// The stack is not deployed + Down, + /// Server not reachable + #[default] + Unknown, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct StackInfo { + /// If any of the expected files are missing in the repo, + /// they will be stored here. + #[serde(default)] + pub missing_files: Vec, + + /// The deployed project name. + /// This is updated whenever Monitor successfully deploys the stack. + /// If it is present, Monitor will use it for actions over other options, + /// to ensure control is maintained after changing the project name (there is no rename compose project api). + pub deployed_project_name: Option, + + /// Deployed short commit hash, or null. Only for repo based stacks. + pub deployed_hash: Option, + /// Deployed commit message, or null. Only for repo based stacks + pub deployed_message: Option, + /// Cached json representation of the deployed compose file contents + /// Obtained by calling `docker compose config`. Will be of the deployed config if it exists. + pub deployed_json: Option>, + /// If there was an error in calling `docker compose config`, the message will be here with the associated file path. + pub deployed_json_errors: Option>, + /// The deployed compose file contents. This is updated whenever Monitor successfully deploys the stack. + pub deployed_contents: Option>, + /// The deployed service names. + /// This is updated whenever it is empty, or deployed contents is updated. + pub deployed_services: Option>, + + /// Cached json representation of the compose file contents. + /// Obtained by calling `docker compose config`. Will be of the latest config, not the deployed config. + #[serde(default)] + pub latest_json: Vec, + /// If there was an error in calling `docker compose config` on the latest contents, the message will be here + #[serde(default)] + pub latest_json_errors: Vec, + /// The latest service names. + /// This is updated whenever the stack cache refreshes, using the latest file contents (either db defined or remote). + #[serde(default)] + pub latest_services: Vec, + + /// The remote compose file contents. This is updated whenever Monitor refreshes the stack cache. + /// It will be empty if the file is defined directly in the stack config. + pub remote_contents: Option>, + /// If there was an error in getting the remote contents, it will be here. + pub remote_errors: Option>, + + /// Latest commit hash, or null + pub latest_hash: Option, + /// Latest commit message, or null + pub latest_message: Option, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeProject { + /// The compose project name. + pub name: String, + /// The status of the project, as returned by docker. + pub status: Option, + /// The compose files included in the project. + pub compose_files: Vec, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeContents { + /// The path of the file on the host + pub path: String, + /// The contents of the file + pub contents: String, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct StackServiceNames { + /// The name of the service + pub service_name: String, + /// Will either be the declared container_name in the compose file, + /// or a pattern to match auto named containers. + /// + /// Auto named containers are composed of three parts: + /// + /// 1. The name of the compose project (top level name field of compose file). + /// This defaults to the name of the parent folder of the compose file. + /// Monitor will always set it to be the name of the stack, but imported stacks + /// will have a different name. + /// 2. The service name + /// 3. The replica number + /// + /// Example: stacko-mongo-1. + /// + /// This stores only 1. and 2., ie stacko-mongo. + /// Containers will be matched via regex like `^container_name-?[0-9]*$`` + pub container_name: String, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct StackService { + /// The service name + pub service: String, + /// The container + pub container: Option, +} + +#[typeshare(serialized_as = "Partial")] +pub type _PartialStackConfig = PartialStackConfig; + +/// The compose file configuration. +#[typeshare] +#[derive(Debug, Clone, Serialize, Deserialize, Builder, Partial)] +#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)] +#[partial(skip_serializing_none, from, diff)] +pub struct StackConfig { + /// The server to deploy the stack on. + #[serde(default)] + #[builder(default)] + pub server_id: String, + + /// Optionally specify a custom project name for the stack. + /// If this is empty string, it will default to the stack name. + /// Used with `docker compose -p {project_name}`. + /// + /// Note. Can be used to import pre-existing stacks. + #[serde(default)] + #[builder(default)] + pub project_name: String, + + /// Directory to change to (`cd`) before running `docker compose up -d`. + /// Default: `./` (the repo root) + #[serde(default = "default_run_directory")] + #[builder(default = "default_run_directory()")] + #[partial_default(default_run_directory())] + pub run_directory: String, + + /// Add paths to compose files, relative to the run path. + /// If this is empty, will use file `compose.yaml`. + #[serde(default)] + #[builder(default)] + pub file_paths: Vec, + + /// Used with `registry_account` to login to a registry before docker compose up. + #[serde(default)] + #[builder(default)] + pub registry_provider: String, + + /// Used with `registry_provider` to login to a registry before docker compose up. + #[serde(default)] + #[builder(default)] + pub registry_account: String, + + /// The extra arguments to pass after `docker compose up -d`. + /// If empty, no extra arguments will be passed. + #[serde(default)] + #[builder(default)] + pub extra_args: Vec, + + /// The environment variables passed to the compose file. + /// They will be written to path defined in env_file_path, + /// which is given relative to the run directory. + /// + /// If it is empty, no file will be written. + #[serde( + default, + deserialize_with = "super::env_vars_deserializer" + )] + #[partial_attr(serde( + default, + deserialize_with = "super::option_env_vars_deserializer" + ))] + #[builder(default)] + pub environment: Vec, + + /// The name of the written environment file before `docker compose up`. + /// Relative to the repo root. + /// Default: .env + #[serde(default = "default_env_file_path")] + #[builder(default = "default_env_file_path()")] + #[partial_default(default_env_file_path())] + pub env_file_path: String, + + /// Whether to skip secret interpolation into the stack environment variables. + #[serde(default)] + #[builder(default)] + pub skip_secret_interp: bool, + + /// The contents of the file directly, for management in the UI. + /// If this is empty, it will fall back to checking git config for + /// repo based compose file. + #[serde(default)] + #[builder(default)] + pub file_contents: String, + + /// The git provider domain. Default: github.com + #[serde(default = "default_git_provider")] + #[builder(default = "default_git_provider()")] + #[partial_default(default_git_provider())] + pub git_provider: String, + + /// Whether to use https to clone the repo (versus http). Default: true + /// + /// Note. Monitor does not currently support cloning repos via ssh. + #[serde(default = "default_git_https")] + #[builder(default = "default_git_https()")] + #[partial_default(default_git_https())] + pub git_https: bool, + + /// The git account used to access private repos. + /// Passing empty string can only clone public repos. + /// + /// Note. A token for the account must be available in the core config or the builder server's periphery config + /// for the configured git provider. + #[serde(default)] + #[builder(default)] + pub git_account: String, + + /// The Github repo used as the source of the build. + #[serde(default)] + #[builder(default)] + pub repo: String, + + /// The branch of the repo. + #[serde(default = "default_branch")] + #[builder(default = "default_branch()")] + #[partial_default(default_branch())] + pub branch: String, + + /// Optionally set a specific commit hash. + #[serde(default)] + #[builder(default)] + pub commit: String, + + /// Whether incoming webhooks actually trigger action. + #[serde(default = "default_webhook_enabled")] + #[builder(default = "default_webhook_enabled()")] + #[partial_default(default_webhook_enabled())] + pub webhook_enabled: bool, + + /// Whether to send StackStateChange alerts for this stack. + #[serde(default = "default_send_alerts")] + #[builder(default = "default_send_alerts()")] + #[partial_default(default_send_alerts())] + pub send_alerts: bool, +} + +impl StackConfig { + pub fn builder() -> StackConfigBuilder { + StackConfigBuilder::default() + } +} + +fn default_env_file_path() -> String { + String::from(".env") +} + +fn default_git_provider() -> String { + String::from("github.com") +} + +fn default_git_https() -> bool { + true +} + +fn default_branch() -> String { + String::from("main") +} + +fn default_run_directory() -> String { + String::from("./") +} + +fn default_webhook_enabled() -> bool { + true +} + +fn default_send_alerts() -> bool { + true +} + +impl Default for StackConfig { + fn default() -> Self { + Self { + server_id: Default::default(), + project_name: Default::default(), + run_directory: default_run_directory(), + file_paths: Default::default(), + registry_provider: Default::default(), + registry_account: Default::default(), + file_contents: Default::default(), + extra_args: Default::default(), + environment: Default::default(), + env_file_path: default_env_file_path(), + skip_secret_interp: Default::default(), + git_provider: default_git_provider(), + git_https: default_git_https(), + repo: Default::default(), + branch: default_branch(), + commit: Default::default(), + git_account: Default::default(), + webhook_enabled: default_webhook_enabled(), + send_alerts: default_send_alerts(), + } + } +} + +#[typeshare] +#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)] +pub struct StackActionState { + pub deploying: bool, + pub starting: bool, + pub restarting: bool, + pub pausing: bool, + pub unpausing: bool, + pub stopping: bool, + pub destroying: bool, +} + +#[typeshare] +pub type StackQuery = ResourceQuery; + +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Default, DefaultBuilder, +)] +pub struct StackQuerySpecifics { + /// Filter syncs by their repo. + pub repos: Vec, +} + +impl super::resource::AddFilters for StackQuerySpecifics { + fn add_filters(&self, filters: &mut Document) { + if !self.repos.is_empty() { + filters.insert("config.repo", doc! { "$in": &self.repos }); + } + } +} + +/// Keeping this minimal for now as its only needed to parse the service names / container names +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeFile { + /// If not provided, will default to the parent folder holding the compose file. + pub name: Option, + #[serde(default)] + pub services: HashMap, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeService { + pub image: Option, + pub container_name: Option, + pub deploy: Option, +} + +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeServiceDeploy { + pub replicas: u8, +} diff --git a/client/core/rs/src/entities/sync.rs b/client/core/rs/src/entities/sync.rs index d94989cdc..6e21598c5 100644 --- a/client/core/rs/src/entities/sync.rs +++ b/client/core/rs/src/entities/sync.rs @@ -99,10 +99,14 @@ impl Default for PendingSyncUpdatesData { #[typeshare] #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct PendingSyncUpdatesDataOk { - /// Readable log of any pending server updates - pub server_updates: Option, + /// Readable log of any deploy actions that will be performed + pub deploy_updates: Option, /// Readable log of any pending deployment updates pub deployment_updates: Option, + /// Readable log of any pending deployment updates + pub stack_updates: Option, + /// Readable log of any pending server updates + pub server_updates: Option, /// Readable log of any pending build updates pub build_updates: Option, /// Readable log of any pending repo updates @@ -125,8 +129,10 @@ pub struct PendingSyncUpdatesDataOk { impl PendingSyncUpdatesDataOk { pub fn no_updates(&self) -> bool { - self.server_updates.is_none() + self.deploy_updates.is_none() && self.deployment_updates.is_none() + && self.stack_updates.is_none() + && self.server_updates.is_none() && self.build_updates.is_none() && self.repo_updates.is_none() && self.procedure_updates.is_none() @@ -152,6 +158,15 @@ pub struct SyncUpdate { pub log: String, } +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct SyncDeployUpdate { + /// Resources to deploy + pub to_deploy: i32, + /// A readable log of all the changes to be applied + pub log: String, +} + #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PendingSyncUpdatesDataErr { @@ -202,12 +217,13 @@ pub struct ResourceSyncConfig { /// /// Note. A token for the account must be available in the core config or the builder server's periphery config /// for the configured git provider. - #[serde(default, alias = "github_account")] + #[serde(default)] #[builder(default)] pub git_account: String, - /// The github account used to clone (used to access private repos). - /// Empty string is public clone (only public repos). + /// The path of the resource file(s) to sync, relative to the repo root. + /// Can be a specific file, or a directory containing multiple files / folders. + /// See `https://docs.monitor.dev/docs/sync-resources` for more information. #[serde(default = "default_resource_path")] #[builder(default = "default_resource_path()")] #[partial_default(default_resource_path())] diff --git a/client/core/rs/src/entities/toml.rs b/client/core/rs/src/entities/toml.rs index 2b23f5b9b..77e790512 100644 --- a/client/core/rs/src/entities/toml.rs +++ b/client/core/rs/src/entities/toml.rs @@ -12,6 +12,7 @@ use super::{ repo::PartialRepoConfig, server::PartialServerConfig, server_template::PartialServerTemplateConfig, + stack::PartialStackConfig, sync::PartialResourceSyncConfig, update::{ResourceTarget, ResourceTargetVariant}, variable::Variable, @@ -34,6 +35,13 @@ pub struct ResourcesToml { )] pub deployments: Vec>, + #[serde( + default, + rename = "stack", + skip_serializing_if = "Vec::is_empty" + )] + pub stacks: Vec>, + #[serde( default, rename = "build", @@ -112,20 +120,29 @@ pub struct ResourceToml { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub tags: Vec, - /// Optional. Only relevant for deployments. + /// Optional. Only relevant for deployments / stacks. /// - /// Will ensure deployment is running with the latest configuration. + /// Will ensure deployment / stack is running with the latest configuration. /// Deploy actions to achieve this will be included in the sync. + /// Default is false. #[serde(default, skip_serializing_if = "is_false")] pub deploy: bool, - /// Optional. Only relevant for deployments using the 'deploy' sync feature. + /// Optional. Only relevant for deployments / stacks using the 'deploy' sync feature. /// - /// Specify other deployments as dependencies. - /// The sync will ensure the deployment will only be deployed 'after' its dependencies. + /// Specify other deployments / stacks by name as dependencies. + /// The sync will ensure the deployment / stack will only be deployed 'after' its dependencies. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub after: Vec, + /// Optional. Only relevant for stacks / repos. + /// + /// Will ensure stacks with 'deploy = true' are running with the latest commit hash for the repo. + /// Deploy actions to achieve this will be included in the sync. + /// Default is false. + #[serde(default, skip_serializing_if = "is_false")] + pub latest_hash: bool, + /// Resource specific configuration. #[serde(default)] pub config: PartialConfig, diff --git a/client/core/rs/src/entities/update.rs b/client/core/rs/src/entities/update.rs index 9577d2601..e6ac61dea 100644 --- a/client/core/rs/src/entities/update.rs +++ b/client/core/rs/src/entities/update.rs @@ -11,7 +11,7 @@ use crate::entities::{ use super::{ alerter::Alerter, build::Build, builder::Builder, deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, + server::Server, server_template::ServerTemplate, stack::Stack, sync::ResourceSync, Version, }; @@ -67,10 +67,14 @@ pub struct Update { /// - `Queued` /// - `InProgress` /// - `Complete` + #[cfg_attr(feature = "mongo", index)] pub status: UpdateStatus, /// An optional version on the update, ie build version or deployed version. #[serde(default, skip_serializing_if = "Version::is_none")] pub version: Version, + /// An optional commit hash associated with the update, ie cloned hash or deployed hash. + #[serde(default, skip_serializing_if = "String::is_empty")] + pub commit_hash: String, /// Some unstructured, operation specific data. Not for general usage. #[serde(default, skip_serializing_if = "String::is_empty")] pub other_data: String, @@ -184,6 +188,18 @@ impl Log { ..Default::default() } } + + /// Combines stdout / stderr into one log + pub fn combined(&self) -> String { + match (self.stdout.is_empty(), self.stderr.is_empty()) { + (true, true) => { + format!("stdout: {}\n\nstderr: {}", self.stdout, self.stderr) + } + (true, false) => self.stdout.to_string(), + (false, true) => self.stderr.to_string(), + (false, false) => String::from("No log"), + } + } } /// Used to reference a specific resource across all resource types @@ -225,6 +241,7 @@ pub enum ResourceTarget { Procedure(String), ServerTemplate(String), ResourceSync(String), + Stack(String), } impl ResourceTarget { @@ -242,6 +259,7 @@ impl ResourceTarget { ResourceTarget::Procedure(id) => id, ResourceTarget::ServerTemplate(id) => id, ResourceTarget::ResourceSync(id) => id, + ResourceTarget::Stack(id) => id, }; (self.extract_variant(), id) } @@ -311,6 +329,12 @@ impl From<&ResourceSync> for ResourceTarget { } } +impl From<&Stack> for ResourceTarget { + fn from(resource_sync: &Stack) -> Self { + Self::Stack(resource_sync.id.clone()) + } +} + /// An update's status #[typeshare] #[derive( diff --git a/client/core/rs/src/entities/user.rs b/client/core/rs/src/entities/user.rs index 194256cc6..225570b84 100644 --- a/client/core/rs/src/entities/user.rs +++ b/client/core/rs/src/entities/user.rs @@ -21,7 +21,7 @@ use super::{ pub struct User { /// The Mongo ID of the User. /// This field is de/serialized from/to JSON as - /// `{ "_id": { "$oid": "..." }, ...(rest of serialized User) }` + /// `{ "_id": { "$oid": "..." }, ...(rest of User schema) }` #[serde( default, rename = "_id", @@ -81,7 +81,14 @@ impl User { pub fn is_service_user(user_id: &str) -> bool { matches!( user_id, - "Procedure" | "Github" | "Auto Redeploy" | "Resource Sync" + "Procedure" + | "Github" // Github can be removed later, just keeping for backward compat. + | "Git Webhook" + | "Auto Redeploy" + | "Resource Sync" + | "Stack Wizard" + | "Build Manager" + | "Repo Manager" ) } } @@ -89,9 +96,14 @@ impl User { pub fn admin_service_user(user_id: &str) -> Option { match user_id { "Procedure" => procedure_user().to_owned().into(), - "Github" => github_user().to_owned().into(), + // Github should be removed later, replaced by Git Webhook, just keeping for backward compat. + "Github" => git_webhook_user().to_owned().into(), + "Git Webhook" => git_webhook_user().to_owned().into(), "Auto Redeploy" => auto_redeploy_user().to_owned().into(), "Resource Sync" => sync_user().to_owned().into(), + "Stack Wizard" => stack_user().to_owned().into(), + "Build Manager" => build_user().to_owned().into(), + "Repo Manager" => repo_user().to_owned().into(), _ => None, } } @@ -109,10 +121,10 @@ pub fn procedure_user() -> &'static User { }) } -pub fn github_user() -> &'static User { - static PROCEDURE_USER: OnceLock = OnceLock::new(); - PROCEDURE_USER.get_or_init(|| { - let id_name = String::from("Github"); +pub fn git_webhook_user() -> &'static User { + static GIT_WEBHOOK_USER: OnceLock = OnceLock::new(); + GIT_WEBHOOK_USER.get_or_init(|| { + let id_name = String::from("Git Webhook"); User { id: id_name.clone(), username: id_name, @@ -148,6 +160,45 @@ pub fn sync_user() -> &'static User { }) } +pub fn stack_user() -> &'static User { + static STACK_USER: OnceLock = OnceLock::new(); + STACK_USER.get_or_init(|| { + let id_name = String::from("Stack Wizard"); + User { + id: id_name.clone(), + username: id_name, + admin: true, + ..Default::default() + } + }) +} + +pub fn build_user() -> &'static User { + static BUILD_USER: OnceLock = OnceLock::new(); + BUILD_USER.get_or_init(|| { + let id_name = String::from("Build Manager"); + User { + id: id_name.clone(), + username: id_name, + admin: true, + ..Default::default() + } + }) +} + +pub fn repo_user() -> &'static User { + static REPO_USER: OnceLock = OnceLock::new(); + REPO_USER.get_or_init(|| { + let id_name = String::from("Repo Manager"); + User { + id: id_name.clone(), + username: id_name, + admin: true, + ..Default::default() + } + }) +} + #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type", content = "data")] diff --git a/client/core/rs/src/lib.rs b/client/core/rs/src/lib.rs index d23e70d5b..3b0f7a546 100644 --- a/client/core/rs/src/lib.rs +++ b/client/core/rs/src/lib.rs @@ -16,7 +16,7 @@ //! //! ## Client Example //! ``` -//! dotenv::dotenv().ok(); +//! dotenvy::dotenv().ok(); //! //! let client = MonitorClient::new_from_env()?; //! diff --git a/client/core/ts/src/responses.ts b/client/core/ts/src/responses.ts index ced3297a8..5aee72a49 100644 --- a/client/core/ts/src/responses.ts +++ b/client/core/ts/src/responses.ts @@ -20,8 +20,8 @@ export type ReadResponses = { GetCoreInfo: Types.GetCoreInfoResponse; ListAwsEcrLabels: Types.ListAwsEcrLabelsResponse; ListSecrets: Types.ListSecretsResponse; - ListGitProviders: Types.ListGitProvidersResponse; - ListDockerRegistries: Types.ListDockerRegistriesResponse; + ListGitProvidersFromConfig: Types.ListGitProvidersFromConfigResponse; + ListDockerRegistriesFromConfig: Types.ListDockerRegistriesFromConfigResponse; // ==== USER ==== GetUsername: Types.GetUsernameResponse; @@ -58,10 +58,10 @@ export type ReadResponses = { GetServer: Types.GetServerResponse; GetServerState: Types.GetServerStateResponse; GetPeripheryVersion: Types.GetPeripheryVersionResponse; - GetSystemInformation: Types.GetSystemInformationResponse; - GetDockerContainers: Types.GetDockerContainersResponse; - GetDockerImages: Types.GetDockerImagesResponse; - GetDockerNetworks: Types.GetDockerNetworksResponse; + ListDockerContainers: Types.ListDockerContainersResponse; + ListDockerImages: Types.ListDockerImagesResponse; + ListDockerNetworks: Types.ListDockerNetworksResponse; + ListComposeProjects: Types.ListComposeProjectsResponse; GetServerActionState: Types.GetServerActionStateResponse; GetHistoricalServerStats: Types.GetHistoricalServerStatsResponse; ListServers: Types.ListServersResponse; @@ -98,6 +98,26 @@ export type ReadResponses = { ListRepos: Types.ListReposResponse; ListFullRepos: Types.ListFullReposResponse; + // ==== SYNC ==== + GetResourceSyncsSummary: Types.GetResourceSyncsSummaryResponse; + GetResourceSync: Types.GetResourceSyncResponse; + GetResourceSyncActionState: Types.GetResourceSyncActionStateResponse; + GetSyncWebhooksEnabled: Types.GetSyncWebhooksEnabledResponse; + ListResourceSyncs: Types.ListResourceSyncsResponse; + ListFullResourceSyncs: Types.ListFullResourceSyncsResponse; + + // ==== STACK ==== + GetStacksSummary: Types.GetStacksSummaryResponse; + GetStack: Types.GetStackResponse; + GetStackActionState: Types.GetStackActionStateResponse; + GetStackWebhooksEnabled: Types.GetStackWebhooksEnabledResponse; + GetStackServiceLog: Types.GetStackServiceLogResponse; + SearchStackServiceLog: Types.SearchStackServiceLogResponse; + ListStacks: Types.ListStacksResponse; + ListFullStacks: Types.ListFullStacksResponse; + ListStackServices: Types.ListStackServicesResponse; + ListCommonStackExtraArgs: Types.ListCommonStackExtraArgsResponse; + // ==== BUILDER ==== GetBuildersSummary: Types.GetBuildersSummaryResponse; GetBuilder: Types.GetBuilderResponse; @@ -110,14 +130,6 @@ export type ReadResponses = { ListAlerters: Types.ListAlertersResponse; ListFullAlerters: Types.ListFullAlertersResponse; - // ==== SYNC ==== - GetResourceSyncsSummary: Types.GetResourceSyncsSummaryResponse; - GetResourceSync: Types.GetResourceSyncResponse; - GetResourceSyncActionState: Types.GetResourceSyncActionStateResponse; - GetSyncWebhooksEnabled: Types.GetSyncWebhooksEnabledResponse; - ListResourceSyncs: Types.ListResourceSyncsResponse; - ListFullResourceSyncs: Types.ListFullResourceSyncsResponse; - // ==== TOML ==== ExportAllResourcesToToml: Types.ExportAllResourcesToTomlResponse; ExportResourcesToToml: Types.ExportResourcesToTomlResponse; @@ -135,12 +147,19 @@ export type ReadResponses = { GetAlert: Types.GetAlertResponse; // ==== SERVER STATS ==== + GetSystemInformation: Types.GetSystemInformationResponse; GetSystemStats: Types.GetSystemStatsResponse; - GetSystemProcesses: Types.GetSystemProcessesResponse; + ListSystemProcesses: Types.ListSystemProcessesResponse; // ==== VARIABLE ==== GetVariable: Types.GetVariableResponse; ListVariables: Types.ListVariablesResponse; + + // ==== PROVIDER ==== + GetGitProviderAccount: Types.GetGitProviderAccountResponse; + ListGitProviderAccounts: Types.ListGitProviderAccountsResponse; + GetDockerRegistryAccount: Types.GetDockerRegistryAccountResponse; + ListDockerRegistryAccounts: Types.ListDockerRegistryAccountsResponse; }; export type WriteResponses = { @@ -187,6 +206,7 @@ export type WriteResponses = { CopyBuild: Types.Build; DeleteBuild: Types.Build; UpdateBuild: Types.Build; + RefreshBuildCache: Types.NoData; CreateBuildWebhook: Types.CreateBuildWebhookResponse; DeleteBuildWebhook: Types.DeleteBuildWebhookResponse; @@ -207,6 +227,7 @@ export type WriteResponses = { CopyRepo: Types.Repo; DeleteRepo: Types.Repo; UpdateRepo: Types.Repo; + RefreshRepoCache: Types.NoData; CreateRepoWebhook: Types.CreateRepoWebhookResponse; DeleteRepoWebhook: Types.DeleteRepoWebhookResponse; @@ -231,6 +252,16 @@ export type WriteResponses = { CreateSyncWebhook: Types.CreateSyncWebhookResponse; DeleteSyncWebhook: Types.DeleteSyncWebhookResponse; + // ==== STACK ==== + CreateStack: Types.Stack; + CopyStack: Types.Stack; + DeleteStack: Types.Stack; + UpdateStack: Types.Stack; + RenameStack: Types.Update; + RefreshStackCache: Types.NoData; + CreateStackWebhook: Types.CreateStackWebhookResponse; + DeleteStackWebhook: Types.DeleteStackWebhookResponse; + // ==== TAG ==== CreateTag: Types.Tag; DeleteTag: Types.Tag; @@ -242,10 +273,19 @@ export type WriteResponses = { UpdateVariableValue: Types.UpdateVariableValueResponse; UpdateVariableDescription: Types.UpdateVariableDescriptionResponse; DeleteVariable: Types.DeleteVariableResponse; + + // ==== PROVIDERS ==== + CreateGitProviderAccount: Types.CreateGitProviderAccountResponse; + UpdateGitProviderAccount: Types.UpdateGitProviderAccountResponse; + DeleteGitProviderAccount: Types.DeleteGitProviderAccountResponse; + CreateDockerRegistryAccount: Types.CreateDockerRegistryAccountResponse; + UpdateDockerRegistryAccount: Types.UpdateDockerRegistryAccountResponse; + DeleteDockerRegistryAccount: Types.DeleteDockerRegistryAccountResponse; }; export type ExecuteResponses = { // ==== SERVER ==== + StopAllContainers: Types.Update; PruneContainers: Types.Update; PruneImages: Types.Update; PruneNetworks: Types.Update; @@ -253,17 +293,21 @@ export type ExecuteResponses = { // ==== DEPLOYMENT ==== Deploy: Types.Update; StartContainer: Types.Update; + RestartContainer: Types.Update; + PauseContainer: Types.Update; + UnpauseContainer: Types.Update; StopContainer: Types.Update; - StopAllContainers: Types.Update; RemoveContainer: Types.Update; // ==== BUILD ==== RunBuild: Types.Update; - CancelBuild: Types.CancelBuildResponse; + CancelBuild: Types.Update; // ==== REPO ==== CloneRepo: Types.Update; PullRepo: Types.Update; + BuildRepo: Types.Update; + CancelRepoBuild: Types.Update; // ==== PROCEDURE ==== RunProcedure: Types.Update; @@ -273,4 +317,22 @@ export type ExecuteResponses = { // ==== SYNC ==== RunSync: Types.Update; + + // ==== STACK ==== + DeployStack: Types.Update; + StartStack: Types.Update; + RestartStack: Types.Update; + StopStack: Types.Update; + PauseStack: Types.Update; + UnpauseStack: Types.Update; + DestroyStack: Types.Update; + + // ==== STACK Service ==== + DeployStackService: Types.Update; + StartStackService: Types.Update; + RestartStackService: Types.Update; + StopStackService: Types.Update; + PauseStackService: Types.Update; + UnpauseStackService: Types.Update; + DestroyStackService: Types.Update; }; diff --git a/client/core/ts/src/types.ts b/client/core/ts/src/types.ts index 6e90c051f..0f24bdef5 100644 --- a/client/core/ts/src/types.ts +++ b/client/core/ts/src/types.ts @@ -61,7 +61,7 @@ export interface User { /** * The Mongo ID of the User. * This field is de/serialized from/to JSON as - * `{ "_id": { "$oid": "..." }, ...(rest of serialized User) }` + * `{ "_id": { "$oid": "..." }, ...(rest of User schema) }` */ _id?: MongoId; /** The globally unique username for the user. */ @@ -87,12 +87,6 @@ export interface User { export type GetUserResponse = User; -/** Represents an empty json object: `{}` */ -export interface NoData { -} - -export type CancelBuildResponse = NoData; - /** Severity level of problem. */ export enum SeverityLevel { /** No problem. */ @@ -114,7 +108,8 @@ export type ResourceTarget = | { type: "Alerter", id: string } | { type: "Procedure", id: string } | { type: "ServerTemplate", id: string } - | { type: "ResourceSync", id: string }; + | { type: "ResourceSync", id: string } + | { type: "Stack", id: string }; /** The variants of data related to the alert. */ export type AlertData = @@ -177,7 +172,7 @@ export type AlertData = id: string; /** The name of the deployment */ name: string; - /** The server id of server deployment is on */ + /** The server id of server that the deployment is on */ server_id: string; /** The server name */ server_name: string; @@ -185,6 +180,21 @@ export type AlertData = from: DeploymentState; /** The current container state */ to: DeploymentState; +}} + /** A stack's state has changed unexpectedly. */ + | { type: "StackStateChange", data: { + /** The id of the stack */ + id: string; + /** The name of the stack */ + name: string; + /** The server id of server that the stack is on */ + server_id: string; + /** The server name */ + server_name: string; + /** The previous stack state */ + from: StackState; + /** The current stack state */ + to: StackState; }} /** An AWS builder failed to terminate. */ | { type: "AwsBuilderTerminationFailed", data: { @@ -208,6 +218,13 @@ export type AlertData = name: string; /** The version that failed to build */ version: Version; +}} + /** A repo has failed */ + | { type: "RepoBuildFailed", data: { + /** The id of the repo */ + id: string; + /** The name of the repo */ + name: string; }}; /** Representation of an alert in the system. */ @@ -256,6 +273,11 @@ export interface Resource { info?: Info; /** Resource-specific configuration. */ config?: Config; + /** + * Set a base permission level that all users will have on the + * resource. + */ + base_permission?: PermissionLevel; } export type AlerterEndpoint = @@ -352,6 +374,25 @@ export interface BuildConfig { builder_id?: string; /** The current version of the build. */ version?: Version; + /** + * An alternate name for the image pushed to the repository. + * If this is empty, it will use the build name. + * + * Can be used in conjunction with `image_tag` to direct multiple builds + * with different configs to push to the same image registry, under different, + * independantly versioned tags. + */ + image_name?: string; + /** + * An extra tag put before the build version, for the image pushed to the repository. + * Eg. in image tag of `aarch64` would push to mbecker20/monitor_core:aarch64-1.13.2. + * If this is empty, the image tag will just be the build version. + * + * Can be used in conjunction with `image_name` to direct multiple builds + * with different configs to push to the same image registry, under different, + * independantly versioned tags. + */ + image_tag?: string; /** The git provider domain. Default: github.com */ git_provider: string; /** @@ -418,6 +459,14 @@ export interface BuildConfig { export interface BuildInfo { last_built_at: I64; + /** Latest built short commit hash, or null. */ + built_hash?: string; + /** Latest built commit message, or null. Only for repo based stacks */ + built_message?: string; + /** Latest remote short commit hash, or null. */ + latest_hash?: string; + /** Latest remote commit message, or null */ + latest_message?: string; } export type Build = Resource; @@ -448,6 +497,10 @@ export interface BuildListItemInfo { branch: string; /** State of the build. Reflects whether most recent build successful. */ state: BuildState; + /** Latest built short commit hash, or null. */ + built_hash?: string; + /** Latest short commit hash, or null. Only for repo based stacks */ + latest_hash?: string; } export type BuildListItem = ResourceListItem; @@ -682,8 +735,11 @@ export type GetDeploymentStatsResponse = DockerContainerStats; export interface DeploymentActionState { deploying: boolean; - stopping: boolean; starting: boolean; + restarting: boolean; + pausing: boolean; + unpausing: boolean; + stopping: boolean; removing: boolean; renaming: boolean; } @@ -696,7 +752,7 @@ export interface ProviderAccount { /** The account username. Required. */ username: string; /** The account access token. Required. */ - token: string; + token?: string; } export interface GitProvider { @@ -708,7 +764,7 @@ export interface GitProvider { accounts: ProviderAccount[]; } -export type ListGitProvidersResponse = GitProvider[]; +export type ListGitProvidersFromConfigResponse = GitProvider[]; export interface DockerRegistry { /** The docker provider domain. Default: `docker.io`. */ @@ -722,7 +778,7 @@ export interface DockerRegistry { organizations?: string[]; } -export type ListDockerRegistriesResponse = DockerRegistry[]; +export type ListDockerRegistriesFromConfigResponse = DockerRegistry[]; export type ListAwsEcrLabelsResponse = string[]; @@ -758,17 +814,30 @@ export type Execution = | { type: "None", params: NoData } | { type: "RunProcedure", params: RunProcedure } | { type: "RunBuild", params: RunBuild } + | { type: "CancelBuild", params: CancelBuild } | { type: "Deploy", params: Deploy } | { type: "StartContainer", params: StartContainer } + | { type: "RestartContainer", params: RestartContainer } + | { type: "PauseContainer", params: PauseContainer } + | { type: "UnpauseContainer", params: UnpauseContainer } | { type: "StopContainer", params: StopContainer } - | { type: "StopAllContainers", params: StopAllContainers } | { type: "RemoveContainer", params: RemoveContainer } | { type: "CloneRepo", params: CloneRepo } | { type: "PullRepo", params: PullRepo } + | { type: "BuildRepo", params: BuildRepo } + | { type: "CancelRepoBuild", params: CancelRepoBuild } + | { type: "StopAllContainers", params: StopAllContainers } | { type: "PruneNetworks", params: PruneNetworks } | { type: "PruneImages", params: PruneImages } | { type: "PruneContainers", params: PruneContainers } | { type: "RunSync", params: RunSync } + | { type: "DeployStack", params: DeployStack } + | { type: "StartStack", params: StartStack } + | { type: "RestartStack", params: RestartStack } + | { type: "PauseStack", params: PauseStack } + | { type: "UnpauseStack", params: UnpauseStack } + | { type: "StopStack", params: StopStack } + | { type: "DestroyStack", params: DestroyStack } | { type: "Sleep", params: Sleep }; /** Allows to enable / disabled procedures in the sequence / parallel vec on the fly */ @@ -835,9 +904,73 @@ export interface ProcedureActionState { export type GetProcedureActionStateResponse = ProcedureActionState; +/** + * Configuration to access private git repos from various git providers. + * Note. Cannot create two accounts with the same domain and username. + */ +export interface GitProviderAccount { + /** + * The Mongo ID of the git provider account. + * This field is de/serialized from/to JSON as + * `{ "_id": { "$oid": "..." }, ...(rest of serialized User) }` + */ + _id?: MongoId; + /** + * The domain of the provider. + * + * For git, this cannot include the protocol eg 'http://', + * which is controlled with 'https' field. + */ + domain: string; + /** Whether git provider is accessed over http or https. */ + https: boolean; + /** The account username */ + username?: string; + /** + * The token in plain text on the db. + * If the database / host can be accessed this is insecure. + */ + token?: string; +} + +export type GetGitProviderAccountResponse = GitProviderAccount; + +export type ListGitProviderAccountsResponse = GitProviderAccount[]; + +/** Configuration to access private image repositories on various registries. */ +export interface DockerRegistryAccount { + /** + * The Mongo ID of the docker registry account. + * This field is de/serialized from/to JSON as + * `{ "_id": { "$oid": "..." }, ...(rest of DockerRegistryAccount) }` + */ + _id?: MongoId; + /** + * The domain of the provider. + * + * For docker registry, this can include 'http://...', + * however this is not recommended and won't work unless "insecure registries" are enabled + * on your hosts. See [https://docs.docker.com/reference/cli/dockerd/#insecure-registries]. + */ + domain: string; + /** The account username */ + username?: string; + /** + * The token in plain text on the db. + * If the database / host can be accessed this is insecure. + */ + token?: string; +} + +export type GetDockerRegistryAccountResponse = DockerRegistryAccount; + +export type ListDockerRegistryAccountsResponse = DockerRegistryAccount[]; + export interface RepoConfig { /** The server to clone the repo on. */ server_id?: string; + /** Attach a builder to 'build' the repo. */ + builder_id?: string; /** The git provider domain. Default: github.com */ git_provider: string; /** The github repo to clone. */ @@ -860,7 +993,7 @@ export interface RepoConfig { * Note. Monitor does not currently support cloning repos via ssh. */ git_https: boolean; - /** Explicitly specificy the folder to clone the repo in. */ + /** Explicitly specify the folder to clone the repo in. */ path?: string; /** * Command to be run after the repo is cloned. @@ -872,13 +1005,39 @@ export interface RepoConfig { * The path is relative to the root of the repo. */ on_pull?: SystemCommand; + /** + * The environment variables passed to the compose file. + * They will be written to path defined in env_file_path, + * which is given relative to the run directory. + * + * If it is empty, no file will be written. + */ + environment?: EnvironmentVar[] | string; + /** + * The name of the written environment file before `docker compose up`. + * Relative to the repo root. + * Default: .env + */ + env_file_path: string; + /** Whether to skip secret interpolation into the repo environment variable file. */ + skip_secret_interp?: boolean; /** Whether incoming webhooks actually trigger action. */ webhook_enabled: boolean; } export interface RepoInfo { /** When repo was last pulled */ - last_pulled_at: I64; + last_pulled_at?: I64; + /** When repo was last built */ + last_built_at?: I64; + /** Latest built short commit hash, or null. */ + built_hash?: string; + /** Latest built commit message, or null. Only for repo based stacks */ + built_message?: string; + /** Latest remote short commit hash, or null. */ + latest_hash?: string; + /** Latest remote commit message, or null */ + latest_message?: string; } export type Repo = Resource; @@ -903,6 +1062,8 @@ export interface RepoListItemInfo { server_id: string; /** Repo last cloned / pulled timestamp in ms. */ last_pulled_at: I64; + /** Repo last built timestamp in ms. */ + last_built_at: I64; /** The git provider domain */ git_provider: string; /** The configured repo */ @@ -911,10 +1072,14 @@ export interface RepoListItemInfo { branch: string; /** The repo state */ state: RepoState; - /** If the repo is cloned, will be the latest short commit hash. */ + /** If the repo is cloned, will be the cloned short commit hash. */ + cloned_hash?: string; + /** If the repo is cloned, will be the cloned commit message. */ + cloned_message?: string; + /** If the repo is built, will be the latest built short commit hash. */ + built_hash?: string; + /** Will be the latest remote short commit hash. */ latest_hash?: string; - /** If the repo is cloned, will be the latest commit message. */ - latest_message?: string; } export type RepoListItem = ResourceListItem; @@ -928,6 +1093,8 @@ export interface RepoActionState { cloning: boolean; /** Whether repo currently pulling */ pulling: boolean; + /** Whether repo currently building, using the attached builder. */ + building: boolean; } export type GetRepoActionStateResponse = RepoActionState; @@ -936,7 +1103,7 @@ export type GetRepoActionStateResponse = RepoActionState; export interface ServerConfig { /** * The http address of the periphery client. - * Example: http://localhost:8120 + * Default: http://localhost:8120 */ address: string; /** An optional region label */ @@ -1075,7 +1242,7 @@ export interface DockerNetwork { Labels?: Record; } -export type GetDockerNetworksResponse = DockerNetwork[]; +export type ListDockerNetworksResponse = DockerNetwork[]; /** Summary of a docker image cached on a server */ export interface ImageSummary { @@ -1101,7 +1268,7 @@ export interface ImageSummary { Containers: I64; } -export type GetDockerImagesResponse = ImageSummary[]; +export type ListDockerImagesResponse = ImageSummary[]; /** A summary of a docker container on a server. */ export interface ContainerSummary { @@ -1117,9 +1284,24 @@ export interface ContainerSummary { state: DeploymentState; /** The status string of the docker container. */ status?: string; + /** The network mode of the container. */ + network_mode?: string; + /** Network names attached to the container */ + networks?: string[]; } -export type GetDockerContainersResponse = ContainerSummary[]; +export type ListDockerContainersResponse = ContainerSummary[]; + +export interface ComposeProject { + /** The compose project name. */ + name: string; + /** The status of the project, as returned by docker. */ + status?: string; + /** The compose files included in the project. */ + compose_files: string[]; +} + +export type ListComposeProjectsResponse = ComposeProject[]; /** System information of a server */ export interface SystemInformation { @@ -1221,7 +1403,7 @@ export interface SystemProcess { disk_write_kb: number; } -export type GetSystemProcessesResponse = SystemProcess[]; +export type ListSystemProcessesResponse = SystemProcess[]; export type ServerTemplateConfig = /** Template to launch an AWS EC2 instance */ @@ -1246,6 +1428,266 @@ export type ListServerTemplatesResponse = ServerTemplateListItem[]; export type ListFullServerTemplatesResponse = ServerTemplate[]; +/** The compose file configuration. */ +export interface StackConfig { + /** The server to deploy the stack on. */ + server_id?: string; + /** + * Optionally specify a custom project name for the stack. + * If this is empty string, it will default to the stack name. + * Used with `docker compose -p {project_name}`. + * + * Note. Can be used to import pre-existing stacks. + */ + project_name?: string; + /** + * Directory to change to (`cd`) before running `docker compose up -d`. + * Default: `./` (the repo root) + */ + run_directory: string; + /** + * Add paths to compose files, relative to the run path. + * If this is empty, will use file `compose.yaml`. + */ + file_paths?: string[]; + /** Used with `registry_account` to login to a registry before docker compose up. */ + registry_provider?: string; + /** Used with `registry_provider` to login to a registry before docker compose up. */ + registry_account?: string; + /** + * The extra arguments to pass after `docker compose up -d`. + * If empty, no extra arguments will be passed. + */ + extra_args?: string[]; + /** + * The environment variables passed to the compose file. + * They will be written to path defined in env_file_path, + * which is given relative to the run directory. + * + * If it is empty, no file will be written. + */ + environment?: EnvironmentVar[] | string; + /** + * The name of the written environment file before `docker compose up`. + * Relative to the repo root. + * Default: .env + */ + env_file_path: string; + /** Whether to skip secret interpolation into the stack environment variables. */ + skip_secret_interp?: boolean; + /** + * The contents of the file directly, for management in the UI. + * If this is empty, it will fall back to checking git config for + * repo based compose file. + */ + file_contents?: string; + /** The git provider domain. Default: github.com */ + git_provider: string; + /** + * Whether to use https to clone the repo (versus http). Default: true + * + * Note. Monitor does not currently support cloning repos via ssh. + */ + git_https: boolean; + /** + * The git account used to access private repos. + * Passing empty string can only clone public repos. + * + * Note. A token for the account must be available in the core config or the builder server's periphery config + * for the configured git provider. + */ + git_account?: string; + /** The Github repo used as the source of the build. */ + repo?: string; + /** The branch of the repo. */ + branch: string; + /** Optionally set a specific commit hash. */ + commit?: string; + /** Whether incoming webhooks actually trigger action. */ + webhook_enabled: boolean; + /** Whether to send StackStateChange alerts for this stack. */ + send_alerts: boolean; +} + +export interface ComposeContents { + /** The path of the file on the host */ + path: string; + /** The contents of the file */ + contents: string; +} + +export interface StackServiceNames { + /** The name of the service */ + service_name: string; + /** + * Will either be the declared container_name in the compose file, + * or a pattern to match auto named containers. + * + * Auto named containers are composed of three parts: + * + * 1. The name of the compose project (top level name field of compose file). + * This defaults to the name of the parent folder of the compose file. + * Monitor will always set it to be the name of the stack, but imported stacks + * will have a different name. + * 2. The service name + * 3. The replica number + * + * Example: stacko-mongo-1. + * + * This stores only 1. and 2., ie stacko-mongo. + * Containers will be matched via regex like `^container_name-?[0-9]*$`` + */ + container_name: string; +} + +export interface StackInfo { + /** + * If any of the expected files are missing in the repo, + * they will be stored here. + */ + missing_files?: string[]; + /** + * The deployed project name. + * This is updated whenever Monitor successfully deploys the stack. + * If it is present, Monitor will use it for actions over other options, + * to ensure control is maintained after changing the project name (there is no rename compose project api). + */ + deployed_project_name?: string; + /** Deployed short commit hash, or null. Only for repo based stacks. */ + deployed_hash?: string; + /** Deployed commit message, or null. Only for repo based stacks */ + deployed_message?: string; + /** + * Cached json representation of the deployed compose file contents + * Obtained by calling `docker compose config`. Will be of the deployed config if it exists. + */ + deployed_json?: ComposeContents[]; + /** If there was an error in calling `docker compose config`, the message will be here with the associated file path. */ + deployed_json_errors?: ComposeContents[]; + /** The deployed compose file contents. This is updated whenever Monitor successfully deploys the stack. */ + deployed_contents?: ComposeContents[]; + /** + * The deployed service names. + * This is updated whenever it is empty, or deployed contents is updated. + */ + deployed_services?: StackServiceNames[]; + /** + * Cached json representation of the compose file contents. + * Obtained by calling `docker compose config`. Will be of the latest config, not the deployed config. + */ + latest_json?: ComposeContents[]; + /** If there was an error in calling `docker compose config` on the latest contents, the message will be here */ + latest_json_errors?: ComposeContents[]; + /** + * The latest service names. + * This is updated whenever the stack cache refreshes, using the latest file contents (either db defined or remote). + */ + latest_services?: StackServiceNames[]; + /** + * The remote compose file contents. This is updated whenever Monitor refreshes the stack cache. + * It will be empty if the file is defined directly in the stack config. + */ + remote_contents?: ComposeContents[]; + /** If there was an error in getting the remote contents, it will be here. */ + remote_errors?: ComposeContents[]; + /** Latest commit hash, or null */ + latest_hash?: string; + /** Latest commit message, or null */ + latest_message?: string; +} + +export type Stack = Resource; + +export type GetStackResponse = Stack; + +export interface StackService { + /** The service name */ + service: string; + /** The container */ + container?: ContainerSummary; +} + +export type ListStackServicesResponse = StackService[]; + +export type GetStackServiceLogResponse = Log; + +export type SearchStackServiceLogResponse = Log; + +export type ListCommonStackExtraArgsResponse = string[]; + +export enum StackState { + /** All containers are running. */ + Running = "running", + /** All containers are paused */ + Paused = "paused", + /** All contianers are stopped */ + Stopped = "stopped", + /** All containers are restarting */ + Restarting = "restarting", + /** All containers are dead */ + Dead = "dead", + /** The containers are in a mix of states */ + Unhealthy = "unhealthy", + /** The stack is not deployed */ + Down = "down", + /** Server not reachable */ + Unknown = "unknown", +} + +export interface StackListItemInfo { + /** The server that stack is deployed on. */ + server_id: string; + /** The git provider domain */ + git_provider: string; + /** The configured repo */ + repo: string; + /** The configured branch */ + branch: string; + /** The stack state */ + state: StackState; + /** A string given by docker conveying the status of the stack. */ + status?: string; + /** + * The service names that are part of the stack. + * If deployed, will be `deployed_services`. + * Otherwise, its `latest_services` + */ + services: string[]; + /** + * Whether the compose project is missing on the host. + * Ie, it does not show up in `docker compose ls`. + * If true, and the stack is not Down, this is an unhealthy state. + */ + project_missing: boolean; + /** + * If any compose files are missing in the repo, the path will be here. + * If there are paths here, this is an unhealthy state, and deploying will fail. + */ + missing_files: string[]; + /** Deployed short commit hash, or null. Only for repo based stacks. */ + deployed_hash?: string; + /** Latest short commit hash, or null. Only for repo based stacks */ + latest_hash?: string; +} + +export type StackListItem = ResourceListItem; + +export type ListStacksResponse = StackListItem[]; + +export type ListFullStacksResponse = Stack[]; + +export interface StackActionState { + deploying: boolean; + starting: boolean; + restarting: boolean; + pausing: boolean; + unpausing: boolean; + stopping: boolean; + destroying: boolean; +} + +export type GetStackActionStateResponse = StackActionState; + /** The sync configuration. */ export interface ResourceSyncConfig { /** The git provider domain. Default: github.com */ @@ -1271,8 +1713,9 @@ export interface ResourceSyncConfig { */ git_account?: string; /** - * The github account used to clone (used to access private repos). - * Empty string is public clone (only public repos). + * The path of the resource file(s) to sync, relative to the repo root. + * Can be a specific file, or a directory containing multiple files / folders. + * See `https://docs.monitor.dev/docs/sync-resources` for more information. */ resource_path: string; /** @@ -1406,8 +1849,11 @@ export enum Operation { UpdateDeployment = "UpdateDeployment", DeleteDeployment = "DeleteDeployment", Deploy = "Deploy", - StopContainer = "StopContainer", StartContainer = "StartContainer", + RestartContainer = "RestartContainer", + PauseContainer = "PauseContainer", + UnpauseContainer = "UnpauseContainer", + StopContainer = "StopContainer", RemoveContainer = "RemoveContainer", RenameDeployment = "RenameDeployment", CreateRepo = "CreateRepo", @@ -1415,6 +1861,8 @@ export enum Operation { DeleteRepo = "DeleteRepo", CloneRepo = "CloneRepo", PullRepo = "PullRepo", + BuildRepo = "BuildRepo", + CancelRepoBuild = "CancelRepoBuild", CreateAlerter = "CreateAlerter", UpdateAlerter = "UpdateAlerter", DeleteAlerter = "DeleteAlerter", @@ -1430,9 +1878,32 @@ export enum Operation { UpdateResourceSync = "UpdateResourceSync", DeleteResourceSync = "DeleteResourceSync", RunSync = "RunSync", + CreateStack = "CreateStack", + UpdateStack = "UpdateStack", + RenameStack = "RenameStack", + DeleteStack = "DeleteStack", + RefreshStackCache = "RefreshStackCache", + DeployStack = "DeployStack", + StartStack = "StartStack", + RestartStack = "RestartStack", + PauseStack = "PauseStack", + UnpauseStack = "UnpauseStack", + StopStack = "StopStack", + DestroyStack = "DestroyStack", + StartStackService = "StartStackService", + RestartStackService = "RestartStackService", + PauseStackService = "PauseStackService", + UnpauseStackService = "UnpauseStackService", + StopStackService = "StopStackService", CreateVariable = "CreateVariable", UpdateVariableValue = "UpdateVariableValue", DeleteVariable = "DeleteVariable", + CreateGitProviderAccount = "CreateGitProviderAccount", + UpdateGitProviderAccount = "UpdateGitProviderAccount", + DeleteGitProviderAccount = "DeleteGitProviderAccount", + CreateDockerRegistryAccount = "CreateDockerRegistryAccount", + UpdateDockerRegistryAccount = "UpdateDockerRegistryAccount", + DeleteDockerRegistryAccount = "DeleteDockerRegistryAccount", } /** An update's status */ @@ -1483,6 +1954,8 @@ export interface Update { status: UpdateStatus; /** An optional version on the update, ie build version or deployed version. */ version?: Version; + /** An optional commit hash associated with the update, ie cloned hash or deployed hash. */ + commit_hash?: string; /** Some unstructured, operation specific data. Not for general usage. */ other_data?: string; } @@ -1562,6 +2035,10 @@ export type GetVariableResponse = Variable; export type ListVariablesResponse = Variable[]; +/** Represents an empty json object: `{}` */ +export interface NoData { +} + export type PushRecentlyViewedResponse = NoData; export type SetLastSeenUpdateResponse = NoData; @@ -1605,10 +2082,26 @@ export type DeleteProcedureResponse = Procedure; export type UpdateProcedureResponse = Procedure; +export type CreateGitProviderAccountResponse = GitProviderAccount; + +export type UpdateGitProviderAccountResponse = GitProviderAccount; + +export type DeleteGitProviderAccountResponse = GitProviderAccount; + +export type CreateDockerRegistryAccountResponse = DockerRegistryAccount; + +export type UpdateDockerRegistryAccountResponse = DockerRegistryAccount; + +export type DeleteDockerRegistryAccountResponse = DockerRegistryAccount; + export type CreateRepoWebhookResponse = NoData; export type DeleteRepoWebhookResponse = NoData; +export type CreateStackWebhookResponse = NoData; + +export type DeleteStackWebhookResponse = NoData; + export type CreateSyncWebhookResponse = NoData; export type DeleteSyncWebhookResponse = NoData; @@ -1638,7 +2131,6 @@ export enum TagBehavior { /** Passing empty Vec is the same as not filtering by that field */ export interface ResourceQuery { - ids?: string[]; names?: string[]; /** Pass Vec of tag ids or tag names */ tags?: string[]; @@ -1701,6 +2193,8 @@ export type U64 = number; export type MongoDocument = any; +export type JsonValue = any; + export interface __Serror { error: string; trace: string[]; @@ -1715,6 +2209,10 @@ export interface ProcedureQuerySpecifics { export type ProcedureQuery = ResourceQuery; +export type _PartialGitProviderAccount = Partial; + +export type _PartialDockerRegistryAccount = Partial; + export type _PartialRepoConfig = Partial; export interface RepoQuerySpecifics { @@ -1742,6 +2240,15 @@ export interface ServerTemplateQuerySpecifics { export type ServerTemplateQuery = ResourceQuery; +export type _PartialStackConfig = Partial; + +export interface StackQuerySpecifics { + /** Filter syncs by their repo. */ + repos: string[]; +} + +export type StackQuery = ResourceQuery; + export type _PartialResourceSyncConfig = Partial; export interface ResourceSyncQuerySpecifics { @@ -1853,9 +2360,15 @@ export interface CancelBuild { export interface Deploy { /** Name or id */ deployment: string; - /** Override the default termination signal specified in the deployment. */ + /** + * Override the default termination signal specified in the deployment. + * Only used when deployment needs to be taken down before redeploy. + */ stop_signal?: TerminationSignal; - /** Override the default termination max time. */ + /** + * Override the default termination max time. + * Only used when deployment needs to be taken down before redeploy. + */ stop_time?: number; } @@ -1869,6 +2382,38 @@ export interface StartContainer { deployment: string; } +/** + * Restarts the container for the target deployment. Response: [Update] + * + * 1. Runs `docker restart ${container_name}`. + */ +export interface RestartContainer { + /** Name or id */ + deployment: string; +} + +/** + * Pauses the container for the target deployment. Response: [Update] + * + * 1. Runs `docker pause ${container_name}`. + */ +export interface PauseContainer { + /** Name or id */ + deployment: string; +} + +/** + * Unpauses the container for the target deployment. Response: [Update] + * + * 1. Runs `docker unpause ${container_name}`. + * + * Note. This is the only way to restart a paused container. + */ +export interface UnpauseContainer { + /** Name or id */ + deployment: string; +} + /** * Stops the container for the target deployment. Response: [Update] * @@ -1883,16 +2428,6 @@ export interface StopContainer { time?: number; } -/** - * Stops all deployments on the target server. Response: [Update] - * - * 1. Runs [StopContainer] on all deployments on the server concurrently. - */ -export interface StopAllContainers { - /** Name or id */ - server: string; -} - /** * Stops and removes the container for the target deployment. * Reponse: [Update]. @@ -1921,6 +2456,8 @@ export interface RunProcedure { /** * Clones the target repo. Response: [Update]. * + * Note. Repo must have server attached at `server_id`. + * * 1. Clones the repo on the target server using `git clone https://{$token?}@github.com/${repo} -b ${branch}`. * The token will only be used if a github account is specified, * and must be declared in the periphery configuration on the target server. @@ -1935,6 +2472,8 @@ export interface CloneRepo { /** * Pulls the target repo. Response: [Update]. * + * Note. Repo must have server attached at `server_id`. + * * 1. Pulls the repo on the target server using `git pull`. * 2. If `on_pull` is specified, it will be executed after the pull is complete. */ @@ -1943,6 +2482,39 @@ export interface PullRepo { repo: string; } +/** + * Builds the target repo, using the attached builder. Response: [Update]. + * + * Note. Repo must have builder attached at `builder_id`. + * + * 1. Spawns the target builder instance (For AWS type. For Server type, just use CloneRepo). + * 2. Clones the repo on the builder using `git clone https://{$token?}@github.com/${repo} -b ${branch}`. + * The token will only be used if a github account is specified, + * and must be declared in the periphery configuration on the builder instance. + * 3. If `on_clone` and `on_pull` are specified, they will be executed. + * `on_clone` will be executed before `on_pull`. + */ +export interface BuildRepo { + /** Id or name */ + repo: string; +} + +/** + * Cancels the target repo build. + * Only does anything if the repo build is `building` when called. + * Response: [Update] + */ +export interface CancelRepoBuild { + /** Can be id or name */ + repo: string; +} + +/** Stops all containers on the target server. Response: [Update] */ +export interface StopAllContainers { + /** Name or id */ + server: string; +} + /** * Prunes the docker networks on the target server. Response: [Update]. * @@ -1984,6 +2556,77 @@ export interface LaunchServer { server_template: string; } +/** + * Deploys the target stack. `docker compose up`. Response: [Update] + * + * Note. If the stack is already deployed, it will be destroyed first. + */ +export interface DeployStack { + /** Id or name */ + stack: string; + /** + * Override the default termination max time. + * Only used if the stack needs to be taken down first. + */ + stop_time?: number; +} + +/** Starts the target stack. `docker compose start`. Response: [Update] */ +export interface StartStack { + /** Id or name */ + stack: string; + /** Optionally specify a specific service to start */ + service?: string; +} + +/** Restarts the target stack. `docker compose restart`. Response: [Update] */ +export interface RestartStack { + /** Id or name */ + stack: string; + /** Optionally specify a specific service to restart */ + service?: string; +} + +/** Pauses the target stack. `docker compose pause`. Response: [Update] */ +export interface PauseStack { + /** Id or name */ + stack: string; + /** Optionally specify a specific service to pause */ + service?: string; +} + +/** + * Unpauses the target stack. `docker compose unpause`. Response: [Update]. + * + * Note. This is the only way to restart a paused container. + */ +export interface UnpauseStack { + /** Id or name */ + stack: string; + /** Optionally specify a specific service to unpause */ + service?: string; +} + +/** Starts the target stack. `docker compose stop`. Response: [Update] */ +export interface StopStack { + /** Id or name */ + stack: string; + /** Override the default termination max time. */ + stop_time?: number; + /** Optionally specify a specific service to stop */ + service?: string; +} + +/** Destoys the target stack. `docker compose down`. Response: [Update] */ +export interface DestroyStack { + /** Id or name */ + stack: string; + /** Pass `--remove-orphans` */ + remove_orphans?: boolean; + /** Override the default termination max time. */ + stop_time?: number; +} + /** Runs the target resource sync. Response: [Update] */ export interface RunSync { /** Id or name */ @@ -2390,15 +3033,15 @@ export interface GetCoreInfoResponse { } /** - * List the git providers. - * Response: [ListGitProvidersResponse]. + * List the git providers available in Core / Periphery config files. + * Response: [ListGitProvidersFromConfigResponse]. * * Includes: * - providers in core config * - providers configured on builds, repos, syncs * - providers on the optional Server or Builder */ -export interface ListGitProviders { +export interface ListGitProvidersFromConfig { /** * Accepts an optional Server or Builder target to expand the core list with * providers available on that specific resource. @@ -2407,15 +3050,15 @@ export interface ListGitProviders { } /** - * List the suggested docker registry providers. - * Response: [ListDockerRegistriesResponse]. + * List the docker registry providers available in Core / Periphery config files. + * Response: [ListDockerRegistriesFromConfigResponse]. * * Includes: * - registries in core config * - registries configured on builds, deployments * - registries on the optional Server or Builder */ -export interface ListDockerRegistries { +export interface ListDockerRegistriesFromConfig { /** * Accepts an optional Server or Builder target to expand the core list with * providers available on that specific resource. @@ -2514,6 +3157,44 @@ export interface GetProceduresSummaryResponse { unknown: number; } +/** + * Get a specific git provider account. + * Response: [GetGitProviderAccountResponse]. + */ +export interface GetGitProviderAccount { + id: string; +} + +/** + * List git provider accounts matching optional query. + * Response: [ListGitProvidersResponse]. + */ +export interface ListGitProviderAccounts { + /** Optionally filter by accounts with a specific domain. */ + domain?: string; + /** Optionally filter by accounts with a specific username. */ + username?: string; +} + +/** + * Get a specific docker registry account. + * Response: [GetDockerRegistryAccountResponse]. + */ +export interface GetDockerRegistryAccount { + id: string; +} + +/** + * List docker registry accounts matching optional query. + * Response: [ListDockerRegistrysResponse]. + */ +export interface ListDockerRegistryAccounts { + /** Optionally filter by accounts with a specific domain. */ + domain?: string; + /** Optionally filter by accounts with a specific username. */ + username?: string; +} + /** Get a specific repo. Response: [Repo]. */ export interface GetRepo { /** Id or name */ @@ -2653,26 +3334,35 @@ export interface GetPeripheryVersionResponse { version: string; } -/** Get the docker networks on the server. Response: [GetDockerNetworksResponse]. */ -export interface GetDockerNetworks { +/** List the docker networks on the server. Response: [ListDockerNetworksResponse]. */ +export interface ListDockerNetworks { /** Id or name */ server: string; } /** - * Get the docker images locally cached on the target server. - * Response: [GetDockerImagesResponse]. + * List the docker images locally cached on the target server. + * Response: [ListDockerImagesResponse]. */ -export interface GetDockerImages { +export interface ListDockerImages { /** Id or name */ server: string; } /** - * Get all docker containers on the target server. - * Response: [GetDockerContainersResponse]. + * List all docker containers on the target server. + * Response: [ListDockerContainersResponse]. */ -export interface GetDockerContainers { +export interface ListDockerContainers { + /** Id or name */ + server: string; +} + +/** + * List all compose projects on the target server. + * Response: [ListComposeProjectsResponse]. + */ +export interface ListComposeProjects { /** Id or name */ server: string; } @@ -2699,14 +3389,14 @@ export interface GetSystemStats { } /** - * Get the processes running on the target server. - * Response: [GetSystemProcessesResponse]. + * List the processes running on the target server. + * Response: [ListSystemProcessesResponse]. * * Note. This does not hit the server directly. The procedures come from an * in memory cache on the core, which hits the server periodically * to keep it up to date. */ -export interface GetSystemProcesses { +export interface ListSystemProcesses { /** Id or name */ server: string; } @@ -2803,6 +3493,131 @@ export interface GetServerTemplatesSummaryResponse { total: number; } +/** Get a specific stack. Response: [Stack]. */ +export interface GetStack { + /** Id or name */ + stack: string; +} + +/** Lists a specific stacks services (the containers). Response: [ListStackServicesResponse]. */ +export interface ListStackServices { + /** Id or name */ + stack: string; +} + +/** Get a stack service's log. Response: [GetStackContainersResponse]. */ +export interface GetStackServiceLog { + /** Id or name */ + stack: string; + /** The service to get the log for. */ + service: string; + /** + * The number of lines of the log tail to include. + * Default: 100. + * Max: 5000. + */ + tail: U64; +} + +/** + * Search the deployment log's tail using `grep`. All lines go to stdout. + * Response: [Log]. + * + * Note. This call will hit the underlying server directly for most up to date log. + */ +export interface SearchStackServiceLog { + /** Id or name */ + stack: string; + /** The service to get the log for. */ + service: string; + /** The terms to search for. */ + terms: string[]; + /** + * When searching for multiple terms, can use `AND` or `OR` combinator. + * + * - `AND`: Only include lines with **all** terms present in that line. + * - `OR`: Include lines that have one or more matches in the terms. + */ + combinator?: SearchCombinator; + /** Invert the results, ie return all lines that DON'T match the terms / combinator. */ + invert?: boolean; +} + +/** + * Gets a list of existing values used as extra args across other stacks. + * Useful to offer suggestions. Response: [ListCommonStackExtraArgsResponse] + */ +export interface ListCommonStackExtraArgs { + /** optional structured query to filter stacks. */ + query?: StackQuery; +} + +/** List stacks matching optional query. Response: [ListStacksResponse]. */ +export interface ListStacks { + /** optional structured query to filter syncs. */ + query?: StackQuery; +} + +/** List stacks matching optional query. Response: [ListFullStacksResponse]. */ +export interface ListFullStacks { + /** optional structured query to filter stacks. */ + query?: StackQuery; +} + +/** Get current action state for the stack. Response: [StackActionState]. */ +export interface GetStackActionState { + /** Id or name */ + stack: string; +} + +/** + * Gets a summary of data relating to all syncs. + * Response: [GetStacksSummaryResponse]. + */ +export interface GetStacksSummary { +} + +/** Response for [GetStacksSummary] */ +export interface GetStacksSummaryResponse { + /** The total number of stacks */ + total: number; + /** The number of stacks with Running state. */ + running: number; + /** The number of stacks with Paused state. */ + paused: number; + /** The number of stacks with Stopped state. */ + stopped: number; + /** The number of stacks with Restarting state. */ + restarting: number; + /** The number of stacks with Dead state. */ + dead: number; + /** The number of stacks with Unhealthy state. */ + unhealthy: number; + /** The number of stacks with Down state. */ + down: number; + /** The number of stacks with Unknown state. */ + unknown: number; +} + +/** Get a target stack's configured webhooks. Response: [GetStackWebhooksEnabledResponse]. */ +export interface GetStackWebhooksEnabled { + /** Id or name */ + stack: string; +} + +/** Response for [GetStackWebhooksEnabled] */ +export interface GetStackWebhooksEnabledResponse { + /** + * Whether the repo webhooks can even be managed. + * The repo owner must be in `github_webhook_app.owners` list to be managed. + */ + managed: boolean; + /** Whether pushes to branch trigger refresh. Will always be false if managed is false. */ + refresh_enabled: boolean; + /** Whether pushes to branch trigger stack execution. Will always be false if managed is false. */ + deploy_enabled: boolean; +} + /** Get a specific sync. Response: [ResourceSync]. */ export interface GetResourceSync { /** Id or name */ @@ -3215,6 +4030,12 @@ export interface UpdateBuild { config: _PartialBuildConfig; } +/** Trigger a refresh of the cached latest hash and message. */ +export interface RefreshBuildCache { + /** Id or name */ + build: string; +} + /** * Create a webhook on the github repo attached to the build * passed in request. Response: [CreateBuildWebhookResponse] @@ -3318,6 +4139,9 @@ export interface DeleteDeployment { * Update the deployment at the given id, and return the updated deployment. * Response: [Deployment]. * + * Note. If the attached server for the deployment changes, + * the deployment will be deleted / cleaned up on the old server. + * * Note. This method updates only the fields which are set in the [_PartialDeploymentConfig], * effectively merging diffs into the final document. * This is helpful when multiple users are using @@ -3441,6 +4265,66 @@ export interface UpdateProcedure { config: _PartialProcedureConfig; } +/** + * **Admin only.** Create a git provider account. + * Response: [GitProviderAccount]. + */ +export interface CreateGitProviderAccount { + /** + * The initial account config. Anything in the _id field will be ignored, + * as this is generated on creation. + */ + account: _PartialGitProviderAccount; +} + +/** + * **Admin only.** Update a git provider account. + * Response: [GitProviderAccount]. + */ +export interface UpdateGitProviderAccount { + /** The id of the git provider account to update. */ + id: string; + /** The partial git provider account. */ + account: _PartialGitProviderAccount; +} + +/** + * **Admin only.** Delete a git provider account. + * Response: [User]. + */ +export interface DeleteGitProviderAccount { + /** The id of the git provider to delete */ + id: string; +} + +/** + * **Admin only.** Create a docker registry account. + * Response: [DockerRegistryAccount]. + */ +export interface CreateDockerRegistryAccount { + account: _PartialDockerRegistryAccount; +} + +/** + * **Admin only.** Update a docker registry account. + * Response: [DockerRegistryAccount]. + */ +export interface UpdateDockerRegistryAccount { + /** The id of the docker registry to update */ + id: string; + /** The partial docker registry account. */ + account: _PartialDockerRegistryAccount; +} + +/** + * **Admin only.** Delete a docker registry account. + * Response: [DockerRegistryAccount]. + */ +export interface DeleteDockerRegistryAccount { + /** The id of the docker registry account to delete */ + id: string; +} + /** Create a repo. Response: [Repo]. */ export interface CreateRepo { /** The name given to newly created repo. */ @@ -3489,6 +4373,12 @@ export interface UpdateRepo { config: _PartialRepoConfig; } +/** Trigger a refresh of the cached latest hash and message. */ +export interface RefreshRepoCache { + /** Id or name */ + repo: string; +} + export enum RepoWebhookAction { Clone = "Clone", Pull = "Pull", @@ -3629,6 +4519,100 @@ export interface UpdateServerTemplate { config: PartialServerTemplateConfig; } +/** Create a stack. Response: [Stack]. */ +export interface CreateStack { + /** The name given to newly created stack. */ + name: string; + /** Optional partial config to initialize the stack with. */ + config: _PartialStackConfig; +} + +/** + * Creates a new stack with given `name` and the configuration + * of the stack at the given `id`. Response: [Stack]. + */ +export interface CopyStack { + /** The name of the new stack. */ + name: string; + /** The id of the stack to copy. */ + id: string; +} + +/** + * Deletes the stack at the given id, and returns the deleted stack. + * Response: [Stack] + */ +export interface DeleteStack { + /** The id or name of the stack to delete. */ + id: string; +} + +/** + * Update the stack at the given id, and return the updated stack. + * Response: [Stack]. + * + * Note. If the attached server for the stack changes, + * the stack will be deleted / cleaned up on the old server. + * + * Note. This method updates only the fields which are set in the [_PartialStackConfig], + * merging diffs into the final document. + * This is helpful when multiple users are using + * the same resources concurrently by ensuring no unintentional + * field changes occur from out of date local state. + */ +export interface UpdateStack { + /** The id of the Stack to update. */ + id: string; + /** The partial config update to apply. */ + config: _PartialStackConfig; +} + +/** Rename the stack at id to the given name. Response: [Update]. */ +export interface RenameStack { + /** The id of the stack to rename. */ + id: string; + /** The new name. */ + name: string; +} + +/** + * Trigger a refresh of the cached compose file contents. + * Refreshes: + * - Whether the remote file is missing + * - The latest json, and for repos, the remote contents, hash, and message. + */ +export interface RefreshStackCache { + /** Id or name */ + stack: string; +} + +export enum StackWebhookAction { + Refresh = "Refresh", + Deploy = "Deploy", +} + +/** + * Create a webhook on the github repo attached to the stack + * passed in request. Response: [CreateStackWebhookResponse] + */ +export interface CreateStackWebhook { + /** Id or name */ + stack: string; + /** "Refresh" or "Deploy" */ + action: StackWebhookAction; +} + +/** + * Delete the webhook on the github repo attached to the stack + * passed in request. Response: [DeleteStackWebhookResponse] + */ +export interface DeleteStackWebhook { + /** Id or name */ + stack: string; + /** "Refresh" or "Deploy" */ + action: StackWebhookAction; +} + /** Create a sync. Response: [ResourceSync]. */ export interface CreateResourceSync { /** The name given to newly created sync. */ @@ -3661,9 +4645,6 @@ export interface DeleteResourceSync { * Update the sync at the given id, and return the updated sync. * Response: [ResourceSync]. * - * Note. If the attached server for the sync changes, - * the sync will be deleted / cleaned up on the old server. - * * Note. This method updates only the fields which are set in the [_PartialResourceSyncConfig], * effectively merging diffs into the final document. * This is helpful when multiple users are using @@ -4126,6 +5107,30 @@ export interface HetznerServerTemplateConfig { port: number; } +export interface ComposeServiceDeploy { + replicas: number; +} + +export interface ComposeService { + image?: string; + container_name?: string; + deploy?: ComposeServiceDeploy; +} + +/** Keeping this minimal for now as its only needed to parse the service names / container names */ +export interface ComposeFile { + /** If not provided, will default to the parent folder holding the compose file. */ + name?: string; + services?: Record; +} + +export interface SyncDeployUpdate { + /** Resources to deploy */ + to_deploy: number; + /** A readable log of all the changes to be applied */ + log: string; +} + export interface SyncUpdate { /** Resources to create */ to_create: number; @@ -4138,10 +5143,14 @@ export interface SyncUpdate { } export interface PendingSyncUpdatesDataOk { - /** Readable log of any pending server updates */ - server_updates?: SyncUpdate; + /** Readable log of any deploy actions that will be performed */ + deploy_updates?: SyncDeployUpdate; /** Readable log of any pending deployment updates */ deployment_updates?: SyncUpdate; + /** Readable log of any pending deployment updates */ + stack_updates?: SyncUpdate; + /** Readable log of any pending server updates */ + server_updates?: SyncUpdate; /** Readable log of any pending build updates */ build_updates?: SyncUpdate; /** Readable log of any pending repo updates */ @@ -4174,18 +5183,30 @@ export type AuthRequest = | { type: "GetUser", params: GetUser }; export type ExecuteRequest = + | { type: "StopAllContainers", params: StopAllContainers } | { type: "PruneContainers", params: PruneContainers } | { type: "PruneImages", params: PruneImages } | { type: "PruneNetworks", params: PruneNetworks } | { type: "Deploy", params: Deploy } | { type: "StartContainer", params: StartContainer } + | { type: "RestartContainer", params: RestartContainer } + | { type: "PauseContainer", params: PauseContainer } + | { type: "UnpauseContainer", params: UnpauseContainer } | { type: "StopContainer", params: StopContainer } - | { type: "StopAllContainers", params: StopAllContainers } | { type: "RemoveContainer", params: RemoveContainer } + | { type: "DeployStack", params: DeployStack } + | { type: "StartStack", params: StartStack } + | { type: "RestartStack", params: RestartStack } + | { type: "StopStack", params: StopStack } + | { type: "PauseStack", params: PauseStack } + | { type: "UnpauseStack", params: UnpauseStack } + | { type: "DestroyStack", params: DestroyStack } | { type: "RunBuild", params: RunBuild } | { type: "CancelBuild", params: CancelBuild } | { type: "CloneRepo", params: CloneRepo } | { type: "PullRepo", params: PullRepo } + | { type: "BuildRepo", params: BuildRepo } + | { type: "CancelRepoBuild", params: CancelRepoBuild } | { type: "RunProcedure", params: RunProcedure } | { type: "LaunchServer", params: LaunchServer } | { type: "RunSync", params: RunSync }; @@ -4195,8 +5216,8 @@ export type ReadRequest = | { type: "GetCoreInfo", params: GetCoreInfo } | { type: "ListAwsEcrLabels", params: ListAwsEcrLabels } | { type: "ListSecrets", params: ListSecrets } - | { type: "ListGitProviders", params: ListGitProviders } - | { type: "ListDockerRegistries", params: ListDockerRegistries } + | { type: "ListGitProvidersFromConfig", params: ListGitProvidersFromConfig } + | { type: "ListDockerRegistriesFromConfig", params: ListDockerRegistriesFromConfig } | { type: "GetUsername", params: GetUsername } | { type: "GetPermissionLevel", params: GetPermissionLevel } | { type: "FindUser", params: FindUser } @@ -4221,13 +5242,14 @@ export type ReadRequest = | { type: "GetServer", params: GetServer } | { type: "GetServerState", params: GetServerState } | { type: "GetPeripheryVersion", params: GetPeripheryVersion } - | { type: "GetDockerContainers", params: GetDockerContainers } - | { type: "GetDockerImages", params: GetDockerImages } - | { type: "GetDockerNetworks", params: GetDockerNetworks } | { type: "GetServerActionState", params: GetServerActionState } | { type: "GetHistoricalServerStats", params: GetHistoricalServerStats } | { type: "ListServers", params: ListServers } | { type: "ListFullServers", params: ListFullServers } + | { type: "ListDockerContainers", params: ListDockerContainers } + | { type: "ListDockerNetworks", params: ListDockerNetworks } + | { type: "ListDockerImages", params: ListDockerImages } + | { type: "ListComposeProjects", params: ListComposeProjects } | { type: "GetDeploymentsSummary", params: GetDeploymentsSummary } | { type: "GetDeployment", params: GetDeployment } | { type: "GetDeploymentContainer", params: GetDeploymentContainer } @@ -4259,6 +5281,16 @@ export type ReadRequest = | { type: "GetSyncWebhooksEnabled", params: GetSyncWebhooksEnabled } | { type: "ListResourceSyncs", params: ListResourceSyncs } | { type: "ListFullResourceSyncs", params: ListFullResourceSyncs } + | { type: "GetStacksSummary", params: GetStacksSummary } + | { type: "GetStack", params: GetStack } + | { type: "GetStackActionState", params: GetStackActionState } + | { type: "GetStackWebhooksEnabled", params: GetStackWebhooksEnabled } + | { type: "GetStackServiceLog", params: GetStackServiceLog } + | { type: "SearchStackServiceLog", params: SearchStackServiceLog } + | { type: "ListStacks", params: ListStacks } + | { type: "ListFullStacks", params: ListFullStacks } + | { type: "ListStackServices", params: ListStackServices } + | { type: "ListCommonStackExtraArgs", params: ListCommonStackExtraArgs } | { type: "GetBuildersSummary", params: GetBuildersSummary } | { type: "GetBuilder", params: GetBuilder } | { type: "ListBuilders", params: ListBuilders } @@ -4277,9 +5309,13 @@ export type ReadRequest = | { type: "GetAlert", params: GetAlert } | { type: "GetSystemInformation", params: GetSystemInformation } | { type: "GetSystemStats", params: GetSystemStats } - | { type: "GetSystemProcesses", params: GetSystemProcesses } + | { type: "ListSystemProcesses", params: ListSystemProcesses } | { type: "GetVariable", params: GetVariable } - | { type: "ListVariables", params: ListVariables }; + | { type: "ListVariables", params: ListVariables } + | { type: "GetGitProviderAccount", params: GetGitProviderAccount } + | { type: "ListGitProviderAccounts", params: ListGitProviderAccounts } + | { type: "GetDockerRegistryAccount", params: GetDockerRegistryAccount } + | { type: "ListDockerRegistryAccounts", params: ListDockerRegistryAccounts }; export type UserRequest = | { type: "PushRecentlyViewed", params: PushRecentlyViewed } @@ -4317,6 +5353,7 @@ export type WriteRequest = | { type: "CopyBuild", params: CopyBuild } | { type: "DeleteBuild", params: DeleteBuild } | { type: "UpdateBuild", params: UpdateBuild } + | { type: "RefreshBuildCache", params: RefreshBuildCache } | { type: "CreateBuildWebhook", params: CreateBuildWebhook } | { type: "DeleteBuildWebhook", params: DeleteBuildWebhook } | { type: "CreateBuilder", params: CreateBuilder } @@ -4331,6 +5368,7 @@ export type WriteRequest = | { type: "CopyRepo", params: CopyRepo } | { type: "DeleteRepo", params: DeleteRepo } | { type: "UpdateRepo", params: UpdateRepo } + | { type: "RefreshRepoCache", params: RefreshRepoCache } | { type: "CreateRepoWebhook", params: CreateRepoWebhook } | { type: "DeleteRepoWebhook", params: DeleteRepoWebhook } | { type: "CreateAlerter", params: CreateAlerter } @@ -4348,6 +5386,14 @@ export type WriteRequest = | { type: "RefreshResourceSyncPending", params: RefreshResourceSyncPending } | { type: "CreateSyncWebhook", params: CreateSyncWebhook } | { type: "DeleteSyncWebhook", params: DeleteSyncWebhook } + | { type: "CreateStack", params: CreateStack } + | { type: "CopyStack", params: CopyStack } + | { type: "DeleteStack", params: DeleteStack } + | { type: "UpdateStack", params: UpdateStack } + | { type: "RenameStack", params: RenameStack } + | { type: "RefreshStackCache", params: RefreshStackCache } + | { type: "CreateStackWebhook", params: CreateStackWebhook } + | { type: "DeleteStackWebhook", params: DeleteStackWebhook } | { type: "CreateTag", params: CreateTag } | { type: "DeleteTag", params: DeleteTag } | { type: "RenameTag", params: RenameTag } @@ -4355,7 +5401,13 @@ export type WriteRequest = | { type: "CreateVariable", params: CreateVariable } | { type: "UpdateVariableValue", params: UpdateVariableValue } | { type: "UpdateVariableDescription", params: UpdateVariableDescription } - | { type: "DeleteVariable", params: DeleteVariable }; + | { type: "DeleteVariable", params: DeleteVariable } + | { type: "CreateGitProviderAccount", params: CreateGitProviderAccount } + | { type: "UpdateGitProviderAccount", params: UpdateGitProviderAccount } + | { type: "DeleteGitProviderAccount", params: DeleteGitProviderAccount } + | { type: "CreateDockerRegistryAccount", params: CreateDockerRegistryAccount } + | { type: "UpdateDockerRegistryAccount", params: UpdateDockerRegistryAccount } + | { type: "DeleteDockerRegistryAccount", params: DeleteDockerRegistryAccount }; export type WsLoginMessage = | { type: "Jwt", params: { diff --git a/client/periphery/rs/src/api/build.rs b/client/periphery/rs/src/api/build.rs index 6bef1df42..9018bf9c4 100644 --- a/client/periphery/rs/src/api/build.rs +++ b/client/periphery/rs/src/api/build.rs @@ -16,6 +16,9 @@ pub struct Build { /// Propogate any secret replacers from core interpolation. #[serde(default)] pub replacers: Vec<(String, String)>, + /// Add more tags for this build in addition to the version tags. + #[serde(default)] + pub additional_tags: Vec, } pub type BuildResponse = Vec; diff --git a/client/periphery/rs/src/api/compose.rs b/client/periphery/rs/src/api/compose.rs new file mode 100644 index 000000000..74b72d753 --- /dev/null +++ b/client/periphery/rs/src/api/compose.rs @@ -0,0 +1,102 @@ +use monitor_client::entities::{ + stack::{ComposeContents, ComposeProject, Stack}, update::Log, SearchCombinator, +}; +use resolver_api::derive::Request; +use serde::{Deserialize, Serialize}; + +/// List the compose project names that are on the host. +/// List running `docker compose ls` +/// +/// Incoming from docker like: +/// [{"Name":"project_name","Status":"running(1)","ConfigFiles":"/root/compose/compose.yaml,/root/compose/compose2.yaml"}] +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Vec)] +pub struct ListComposeProjects {} + +// + +/// The stack folder must already exist for this to work +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Log)] +pub struct GetComposeServiceLog { + /// The name of the project + pub project: String, + /// The service name + pub service: String, + /// pass `--tail` for only recent log contents + #[serde(default = "default_tail")] + pub tail: u64, +} + +fn default_tail() -> u64 { + 50 +} + +// + +/// The stack folder must already exist for this to work +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Log)] +pub struct GetComposeServiceLogSearch { + /// The name of the project + pub project: String, + /// The service name + pub service: String, + /// The search terms. + pub terms: Vec, + /// And: Only lines matching all terms + /// Or: Lines matching any one of the terms + #[serde(default)] + pub combinator: SearchCombinator, + /// Invert the search (search for everything not matching terms) + #[serde(default)] + pub invert: bool, +} + +// + +/// Rewrites the compose directory, pulls any images, takes down existing containers, +/// and runs docker compose up. +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(ComposeUpResponse)] +pub struct ComposeUp { + /// The stack to deploy + pub stack: Stack, + /// Only deploy one service + pub service: Option, + /// If provided, use it to login in. Otherwise check periphery local registries. + pub git_token: Option, + /// If provided, use it to login in. Otherwise check periphery local registries. + pub registry_token: Option, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct ComposeUpResponse { + /// If any of the required files are missing, they will be here. + pub missing_files: Vec, + /// The logs produced by the deploy + pub logs: Vec, + /// whether stack was successfully deployed + pub deployed: bool, + /// The deploy compose file contents if they could be acquired, or empty vec. + pub file_contents: Vec, + /// The error in getting remote file contents at the path, or null + pub remote_errors: Vec, + /// If its a repo based stack, will include the latest commit hash + pub commit_hash: Option, + /// If its a repo based stack, will include the latest commit message + pub commit_message: Option, +} + +// + +/// General compose command runner +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Log)] +pub struct ComposeExecution { + /// The compose project name to run the execution on. + /// Usually its he name of the stack / folder under the `stack_dir`. + pub project: String, + /// The command in `docker compose -p {project} {command}` + pub command: String, +} diff --git a/client/periphery/rs/src/api/container.rs b/client/periphery/rs/src/api/container.rs index 1f6909da2..63726faf6 100644 --- a/client/periphery/rs/src/api/container.rs +++ b/client/periphery/rs/src/api/container.rs @@ -57,6 +57,23 @@ pub struct GetContainerStatsList {} // +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct Deploy { + pub deployment: Deployment, + pub stop_signal: Option, + pub stop_time: Option, + /// Override registry token with one sent from core. + pub registry_token: Option, + /// Propogate AwsEcrConfig from core + pub aws_ecr: Option, + /// Propogate any secret replacers from core interpolation. + #[serde(default)] + pub replacers: Vec<(String, String)>, +} + +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct StartContainer { @@ -65,6 +82,30 @@ pub struct StartContainer { // +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct RestartContainer { + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct PauseContainer { + pub name: String, +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Log)] +pub struct UnpauseContainer { + pub name: String, +} + +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct StopContainer { @@ -75,6 +116,12 @@ pub struct StopContainer { // +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(Vec)] +pub struct StopAllContainers {} + +// + #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(Log)] pub struct RemoveContainer { @@ -99,18 +146,3 @@ pub struct RenameContainer { pub struct PruneContainers {} // - -#[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Log)] -pub struct Deploy { - pub deployment: Deployment, - pub stop_signal: Option, - pub stop_time: Option, - /// Override registry token with one sent from core. - pub registry_token: Option, - /// Propogate AwsEcrConfig from core - pub aws_ecr: Option, - /// Propogate any secret replacers from core interpolation. - #[serde(default)] - pub replacers: Vec<(String, String)>, -} diff --git a/client/periphery/rs/src/api/git.rs b/client/periphery/rs/src/api/git.rs index 57f6c9e2f..306cfcc95 100644 --- a/client/periphery/rs/src/api/git.rs +++ b/client/periphery/rs/src/api/git.rs @@ -1,5 +1,7 @@ +use std::path::PathBuf; + use monitor_client::entities::{ - update::Log, CloneArgs, LatestCommit, SystemCommand, + update::Log, CloneArgs, EnvironmentVar, LatestCommit, SystemCommand, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; @@ -11,22 +13,80 @@ pub struct GetLatestCommit { } #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] +#[response(RepoActionResponse)] pub struct CloneRepo { pub args: CloneArgs, + #[serde(default)] + pub environment: Vec, + #[serde(default = "default_env_file_path")] + pub env_file_path: String, + #[serde(default)] + pub skip_secret_interp: bool, /// Override git token with one sent from core. pub git_token: Option, } +fn default_env_file_path() -> String { + String::from(".env") +} + // #[derive(Serialize, Deserialize, Debug, Clone, Request)] -#[response(Vec)] +#[response(RepoActionResponse)] pub struct PullRepo { pub name: String, pub branch: Option, pub commit: Option, pub on_pull: Option, + #[serde(default)] + pub environment: Vec, + #[serde(default = "default_env_file_path")] + pub env_file_path: String, + #[serde(default)] + pub skip_secret_interp: bool, +} + +// + +/// Backward compat adapter for v1.13 upgrade. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum RepoActionResponse { + V1_13(RepoActionResponseV1_13), + V1_12(Vec), +} + +impl From for RepoActionResponseV1_13 { + fn from(value: RepoActionResponse) -> Self { + match value { + RepoActionResponse::V1_13(response) => response, + RepoActionResponse::V1_12(logs) => RepoActionResponseV1_13 { + logs, + commit_hash: None, + commit_message: None, + env_file_path: None, + }, + } + } +} + +impl From for RepoActionResponse { + fn from(value: RepoActionResponseV1_13) -> Self { + RepoActionResponse::V1_13(value) + } +} + +// + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct RepoActionResponseV1_13 { + pub logs: Vec, + pub commit_hash: Option, + pub commit_message: Option, + /// Don't need to send this one to core, its only needed for calls local to single periphery + #[serde(skip_serializing)] + pub env_file_path: Option, } // diff --git a/client/periphery/rs/src/api/mod.rs b/client/periphery/rs/src/api/mod.rs index be53186fa..b3b256482 100644 --- a/client/periphery/rs/src/api/mod.rs +++ b/client/periphery/rs/src/api/mod.rs @@ -1,12 +1,19 @@ use monitor_client::entities::{ config::{DockerRegistry, GitProvider}, + deployment::ContainerSummary, + server::{ + docker_image::ImageSummary, docker_network::DockerNetwork, + }, + stack::ComposeProject, update::Log, SystemCommand, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; +use serror::Serror; pub mod build; +pub mod compose; pub mod container; pub mod git; pub mod network; @@ -32,6 +39,19 @@ pub struct GetVersionResponse { pub version: String, } +/// Returns all containers, networks, images, compose projects +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(GetDockerListsResponse)] +pub struct GetDockerLists {} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct GetDockerListsResponse { + pub containers: Result, Serror>, + pub networks: Result, Serror>, + pub images: Result, Serror>, + pub projects: Result, Serror>, +} + // #[derive(Serialize, Deserialize, Debug, Clone, Request)] diff --git a/config_example/core.compose.yaml b/config_example/core.compose.yaml new file mode 100644 index 000000000..a07bcbca0 --- /dev/null +++ b/config_example/core.compose.yaml @@ -0,0 +1,56 @@ +services: + monitor-core: + image: ghcr.io/mbecker20/monitor:latest ## use ghcr.io/mbecker20/monitor:latest-aarch64 for arm support + restart: unless-stopped + depends_on: + monitor-mongo: + logging: + driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/` + networks: + - monitor-network + ports: + - 9120:9120 + extra_hosts: # allows for local periphery connection at "http://host.docker.internal:8120" + - host.docker.internal:host-gateway + environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml + MONITOR_HOST: https://demo.monitor.dev + ## MONGO + MONITOR_MONGO_ADDRESS: monitor-mongo:27017 + MONITOR_MONGO_USERNAME: admin # match ones below + MONITOR_MONGO_PASSWORD: admin + ## KEYS + MONITOR_PASSKEY: a_random_passkey # used to auth against periphery + MONITOR_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks + MONITOR_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart. + ## AUTH + MONITOR_LOCAL_AUTH: true # the default is false. + # MONITOR_GITHUB_OAUTH_ENABLED: true # also support google oauth + # MONITOR_GITHUB_OAUTH_ID: your_oauth_id + # MONITOR_GITHUB_OAUTH_SECRET: your_oauth_secret + ## AWS + # MONITOR_AWS_ACCESS_KEY_ID: your_aws_key_id + # MONITOR_AWS_SECRET_ACCESS_KEY: your_secret_access_key + ## HETZNER + # MONITOR_HETZNER_TOKEN: your_hetzner_token + + monitor-mongo: + image: mongo + command: --quiet # suppress mongo logs a bit + restart: unless-stopped + logging: + driver: local + networks: + - monitor-network + ports: + - 27017:27017 + volumes: + - db-data:/data/db + environment: + MONGO_INITDB_ROOT_USERNAME: admin # change these + MONGO_INITDB_ROOT_PASSWORD: admin + +volumes: + db-data: + +networks: + monitor-network: {} \ No newline at end of file diff --git a/config_example/core.config.example.toml b/config_example/core.config.example.toml index 962c90c99..f8a07f5ef 100644 --- a/config_example/core.config.example.toml +++ b/config_example/core.config.example.toml @@ -1,121 +1,298 @@ -## this will be the document title on the web page (shows up as text in the browser tab). -## default: 'Monitor' +####################### +# MONITOR CORE CONFIG # +####################### + +## This is the offical "Default" config file for Monitor. +## It serves as documentation for the meaning of the fields. +## It is located at [https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). + +## This file is bundled into the official image, `ghcr.io/mbecker20/monitor`, +## as the default config at `/config/config.toml`. +## Monitor can start with no external config file mounted. + +## There is usually no need to create this file on your host. +## Most fields can instead be configured using environment variables. + +## This will be the document title on the web page (shows up as text in the browser tab). +## Env: MONITOR_TITLE +## Default: 'Monitor' # title = "Monitor-02" -## required for oauth functionality. this should be the url used to access monitor in browser, potentially behind DNS. -## eg https://monitor.dev or http://12.34.56.78:9000. this should match the address configured in your oauth app. -## no default +## This should be the url used to access Monitor in browser, potentially behind DNS. +## Eg https://monitor.dev or http://12.34.56.78:9120. This should match the address configured in your Oauth app. +## Env: MONITOR_HOST +## Required to start Monitor, no default. host = "https://monitor.dev" -## the port the core system will run on. if running core in docker container, leave as this port as 9000 and use port bind eg. -p 9001:9000 -## default: 9000 -# port = 9001 +## The port the core system will run on. +## Env: MONITOR_PORT +## Default: 9120 +# port = 9121 -## required to match a passkey in periphery config. token used to authenticate core requests to periphery -## no default +## This is the token used to authenticate core requests to periphery. +## Ensure this matches a passkey in the connected periphery configs. +## If the periphery servers don't have passkeys configured, this doesn't need to be changed. +## Env: MONITOR_PASSKEY +## Required to start Monitor, no default passkey = "a_random_passkey" -## token that has to be given to git provider during repo webhook config as the secret -## default: empty (none) -webhook_secret = "a_random_webhook_secret" - -## an alternate base url that is used to recieve git webhook requests -## if empty or not specified, will use 'host' address as base -## default: empty (none) -# webhook_base_url = "https://git-webhook.monitor.dev" - -## specify the log level of the monitor core application -## default: info -## options: off, error, warn, info, debug, trace -# logging.level = "info" - -## specify the logging format for stdout / stderr. -## default: standard -## options: standard, json, none -# logging.stdio = "standard" - -## specify a opentelemetry otlp endpoint to send traces to -## optional, default unassigned -# logging.otlp_endpoint = "http://localhost:4317" - -## Set the opentelemetry service name attached to the telemetry this core will send. -## Default: "Monitor" -# logging.opentelemetry_service_name = "Monitor-02" - -## specify how long an issued jwt stays valid. all jwts are invalidated on application restart. -## default: 1-day. -## options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day -# jwt_valid_for = "3-day" - -## controls the granularity of the system stats collection by monitor core -## default: 15-sec -## options: 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min -# monitoring_interval = "5-sec" - -## number of days to keep stats around, or 0 to disable pruning. -## stats older than this number of days are deleted daily -## default: 0 (pruning disabled) -# keep_stats_for_days = 14 - -## number of days to keep alerts around, or 0 to disable pruning. -## alerts older than this number of days are deleted daily -## default: 0 (pruning disabled) -# keep_alerts_for_days = 14 - -## allows all users to have read access on all resources -## default: false -# transparent_mode = true - -## disables write support on resources in the UI -## default: false +## Disables write support on resources in the UI. +## This protects users that that would normally have write priviledges during their UI usage, +## when they intend to fully rely on ResourceSyncs to manage config. +## Env: MONITOR_UI_WRITE_DISABLED +## Default: false # ui_write_disabled = true -## allow or deny user login with username / password -## default: false +############ +# DATABASE # +############ + +## Configure the database connection in one of the following ways: + +## Pass a full Mongo URI. Suitable for Mongo Atlas. +## Env: MONITOR_MONGO_URI +# mongo.uri = "mongodb://username:password@localhost:27017" + +## ==== * OR * ==== ## + +# Construct the address as mongodb://{username}:{password}@{address} +## Env: MONITOR_MONGO_ADDRESS +mongo.address = "localhost:27017" +## Env: MONITOR_MONGO_USERNAME +# mongo.username = "admin" +## Env: MONITOR_MONGO_PASSWORD +# mongo.password = "admin" + +## ==== other ==== + +## Monitor will create its collections under this database name. +## The only reason to change this is if multiple Monitors share the same db. +## Env: MONITOR_MONGO_DB_NAME +## Default: monitor. +# mongo.db_name = "monitor" + +## This is the assigned app_name of the mongo client. +## The only reason to change this is if multiple Monitors share the same db. +## Env: MONITOR_MONGO_APP_NAME +## Default: monitor_core. +# mongo.app_name = "monitor_core" + +################ +# AUTH / LOGIN # +################ + +## Allow user login with a username / password. +## The password will be hashed and stored in the db for login comparison. +## +## NOTE: +## Monitor has no API to recover account logins, but if this happens you can doctor the db using Mongo Compass. +## Create a new user, login to the database with Compass, note down your old users username and _id. +## Then delete the old user, and update the new user to have the same username and _id. +## Make sure to set `enabled: true` and maybe `admin: true` on the new user as well, while using Compass. +## +## Env: MONITOR_LOCAL_AUTH +## Default: false # local_auth = true +## Allows all users to have Read level access to all resources. +## Env: MONITOR_TRANSPARENT_MODE +## Default: false +# transparent_mode = true + +## New users will be automatically enabled when they sign up. +## Otherwise, new users will be disabled on first login. +## The first user to login will always be enabled on creation. +## Env: MONITOR_ENABLE_NEW_USERS +## Default: false +# enable_new_users = true + +## Optionally provide a specific jwt secret. +## Passing nothing or an empty string will cause one to be generated on every startup. +## This means users will have to log in again if Monitor restarts. +## Env: MONITOR_JWT_SECRET +# jwt_secret = "your_random_secret" + +## Specify how long a user can stay logged in before they have to log in again. +## All jwts are invalidated on application restart unless `jwt_secret` is set. +## Env: MONITOR_JWT_TTL +## Default: 1-day. +## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day +# jwt_ttl = "3-day" + +######### +# OAUTH # +######### + +## Google + +## Env: MONITOR_GOOGLE_OAUTH_ENABLED +## Default: false # google_oauth.enabled = true + +## Env: MONITOR_GOOGLE_OAUTH_ID +## Required if google_oauth is enabled. # google_oauth.id = "your_google_client_id" + +## Env: MONITOR_GOOGLE_OAUTH_SECRET +## Required if google_oauth is enabled. # google_oauth.secret = "your_google_client_secret" +## Github + +## Env: MONITOR_GITHUB_OAUTH_ENABLED +## Default: false # github_oauth.enabled = true + +## Env: MONITOR_GITHUB_OAUTH_ID +## Required if github_oauth is enabled. # github_oauth.id = "your_github_client_id" + +## Env: MONITOR_GITHUB_OAUTH_SECRET +## Required if github_oauth is enabled. # github_oauth.secret = "your_github_client_secret" -## Configure github webhook app. Enables webhook management apis. +############ +# WEBHOOKS # +############ + +## This token must be given to git provider during repo webhook config. +## The secret configured on the git provider side must match the secret configured here. +## Env: MONITOR_WEBHOOK_SECRET +## Default: empty (none) +webhook_secret = "a_random_webhook_secret" + +## An alternate base url that is used to recieve git webhook requests. +## If empty or not specified, will use 'host' address as base. +## This is useful if Monitor is on an internal network, but can have a +## proxy just allowing through the webhook api using NGINX. +## Env: MONITOR_WEBHOOK_BASE_URL +## Default: empty (none) +# webhook_base_url = "https://git-webhook.monitor.dev" + +## Configure Github webhook app. Enables webhook management apis. +## +## Env: MONITOR_GITHUB_WEBHOOK_APP_APP_ID # github_webhook_app.app_id = 1234455 # Find on the app page. +## Env: +## - MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS +## - MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES # github_webhook_app.installations = [ # ## Find the id after installing the app to user / organization. "namespace" is the username / organization name. # { id = 1234, namespace = "mbecker20" } # ] -## Path to github webhook app private key. -## This is defaulted to `/github/private-key.pem`, and doesn't need to be changed if running in Docker. +## The path to Github webhook app private key. +## This is defaulted to `/github/private-key.pem`, and doesn't need to be changed if running core in Docker. ## Just mount the private key pem file on the host to `/github/private-key.pem` in the container. +## Eg. `/your/path/to/key.pem : /github/private-key.pem` +## Env: MONITOR_GITHUB_WEBHOOK_APP_PK_PATH # github_webhook_app.pk_path = "/path/to/pk.pem" -# mongo.uri = "mongodb://username:password@localhost:27017" -## ==== or ==== -mongo.address = "localhost:27017" -# mongo.username = "username" -# mongo.password = "password" -## ==== other ==== -## default: monitor. this is the name of the mongo database that monitor will create its collections in. -# mongo.db_name = "monitor" -## default: monitor_core. this is the assigned app_name of the mongo client -# mongo.app_name = "monitor_core" +########### +# LOGGING # +########### -## provide aws api keys for ephemeral builders / server launch +## Specify the log level of the monitor core application +## Env: MONITOR_LOGGING_LEVEL +## Options: off, error, warn, info, debug, trace +## Default: info +# logging.level = "info" + +## Specify the logging format for stdout / stderr. +## Env: MONITOR_LOGGING_STDIO +## Options: standard, json, none +## Default: standard +# logging.stdio = "standard" + +## Optionally specify a opentelemetry otlp endpoint to send traces to. +## Env: MONITOR_OTLP_ENDPOINT +# logging.otlp_endpoint = "http://localhost:4317" + +## Set the opentelemetry service name. +## This will be attached to the telemetry Monitor will send. +## Env: MONITOR_OPENTELEMETRY_SERVICE_NAME +## Default: "Monitor" +# logging.opentelemetry_service_name = "Monitor-02" + +########### +# PRUNING # +########### + +## The number of days to keep historical system stats around, or 0 to disable pruning. +## Stats older that are than this number of days are deleted on a daily cycle. +## Env: MONITOR_KEEP_STATS_FOR_DAYS +## Default: 14 +# keep_stats_for_days = 14 + +## The number of days to keep alerts around, or 0 to disable pruning. +## Alerts older that are than this number of days are deleted on a daily cycle. +## Env: MONITOR_KEEP_ALERTS_FOR_DAYS +## Default: 14 +# keep_alerts_for_days = 14 + +################## +# POLL INTERVALS # +################## + +## Interval at which to poll Stacks for any updates / automated actions. +## Env: MONITOR_STACK_POLL_INTERVAL +## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. +## Default: `5-min`. +# stack_poll_interval = "1-min" + +## Interval at which to poll Syncs for any updates / automated actions. +## Env: MONITOR_SYNC_POLL_INTERVAL +## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. +## Default: `5-min`. +# sync_poll_interval = "1-min" + +## Interval at which to poll Builds (latest commit hash) for any updates / automated actions. +## Env: MONITOR_STACK_POLL_INTERVAL +## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. +## Default: `5-min`. +# build_poll_interval = "1-min" + +## Interval at which to poll Repos (latest commit hash) for any updates / automated actions. +## Env: MONITOR_REPO_POLL_INTERVAL +## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. +## Default: `5-min`. +# repo_poll_interval = "1-min" + +## Controls the rate at which servers are polled for health, system stats, and container status. +## This affects network usage, and the size of the stats stored in mongo. +## Default: 15-sec +## Options: 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min +# monitoring_interval = "5-sec" + +################### +# CLOUD PROVIDERS # +################### + +## Monitor can build images on purpose deployed AWS EC2 instances, +## and afterwards destroying the instance. + +## Additionally, Monitor can deploy cloud VPS on AWS EC2 and Hetzner. +## Use the Template resource to configure launch preferences. +## Hetzner is not supported for builds as their pricing model is by the hour, +## while AWS is by the minute. This is very important for builds. + +## Provide aws api keys for ephemeral builders / server launch +## Env: MONITOR_AWS_ACCESS_KEY_ID # aws.access_key_id = "your_aws_key_id" +## Env: MONITOR_AWS_SECRET_ACCESS_KEY # aws.secret_access_key = "your_aws_secret_key" -## provide hetzner api token for ephemeral builders / server launch +## Provide hetzner api token for server launch +## Env: MONITOR_HETZNER_TOKEN # hetzner.token = "your_hetzner_token" -## provide core-base secrets -# [secrets] -# SECRET_1 = "value_1" -# SECRET_2 = "value_2" +################# +# GIT PROVIDERS # +################# + +## These will be available to attach to Builds, Repos, Stacks, and Syncs. +## They allow these Resources to clone private repositories. +## They cannot be configured on the environment. ## configure git providers # [[git_provider]] @@ -138,6 +315,14 @@ mongo.address = "localhost:27017" # { username = "mbecker20", token = "access_token_for_account" }, # ] +###################### +# REGISTRY PROVIDERS # +###################### + +## These will be available to attach to Builds and Stacks. +## They allow these Resources to pull private images. +## They cannot be configured on the environment. + ## configure docker registries # [[docker_registry]] # domain = "docker.io" @@ -153,7 +338,11 @@ mongo.address = "localhost:27017" # ] # organizations = ["Mogh"] # These become available in the UI -## configure aws ecr registries +## Configure AWS ECR registries. +## Ecr is a special case of registry, as using it is pretty different than others. +## You can configure multiple of these with different "labels", and select +## then by label in the UI. + # [aws_ecr_registry.label_1] # region = "us-east-1" # account_id = "1234455" @@ -164,4 +353,19 @@ mongo.address = "localhost:27017" # region = "us-west-1" # account_id = "1234455" # access_key_id = "your_aws_key_id_2" -# secret_access_key = "your_aws_secret_key_2" \ No newline at end of file +# secret_access_key = "your_aws_secret_key_2" + +########### +# SECRETS # +########### + +## Provide core-based secrets. +## These will be available to interpolate into your Deployment / Stack environments, +## and will be hidden in the UI and logs. +## These are available to use on any periphery (Server), +## but you can also limit access more by placing them in a single peripheries config instead. +## These cannot be configured on the environment. + +# [secrets] +# SECRET_1 = "value_1" +# SECRET_2 = "value_2" \ No newline at end of file diff --git a/config_example/core.env.example b/config_example/core.env.example deleted file mode 100644 index 1c8ef4887..000000000 --- a/config_example/core.env.example +++ /dev/null @@ -1,48 +0,0 @@ -# optional. default is /frontend -MONITOR_FRONTEND_PATH=/frontend - -# optional. default is /config/config.toml -MONITOR_CONFIG_PATH=/config/config.toml - -## All config file fields are optionally available to override on environment -## The config fields are prefixed with 'MONITOR_' -## Nested config fields are set by converting nesting to 'MONITOR_{field}', where field is uppercase - -## Note. ALL the following are optional, and could also be specified in the config file -MONITOR_TITLE=Monitor -MONITOR_HOST=https://monitor.dev -MONITOR_PORT=9000 -MONITOR_PASSKEY=asdfasdf -MONITOR_JWT_VALID_FOR=1-day -MONITOR_MONITORING_INTERVAL=15-sec -MONITOR_KEEP_STATS_FOR_DAYS=0 -MONITOR_KEEP_ALERTS_FOR_DAYS=0 -MONITOR_WEBHOOK_SECRET=asdfasdf -MONITOR_WEBHOOK_BASE_URL=https://github-listener.monitor.dev - -MONITOR_LOGGING_LEVEL=info -MONITOR_LOGGING_STDIO=standard -MONITOR_LOGGING_OTLP_ENDPOINT=http://localhost:4317 -MONITOR_LOGGING_OPENTELEMETRY_SERVICE_NAME=Monitor - -MONITOR_LOCAL_AUTH=true - -MONITOR_GITHUB_OAUTH_ENABLED=true -MONITOR_GITHUB_OAUTH_ID=asdfasdf -MONITOR_GITHUB_OAUTH_SECRET=asdfasdf - -MONITOR_GOOGLE_OAUTH_ENABLED=true -MONITOR_GOOGLE_OAUTH_ID=asdfasdf -MONITOR_GOOGLE_OAUTH_SECRET=asdfasdf - -MONITOR_MONGO_URI=mongodb://admin:admin@localhost:27017 -# or -MONITOR_MONGO_ADDRESS=localhost:27017 -MONITOR_MONGO_USERNAME=admin -MONITOR_MONGO_PASSWORD=admin - -MONITOR_MONGO_APP_NAME=monitor_core -MONITOR_MONGO_DB_NAME=monitor - -MONITOR_AWS_ACCESS_KEY_ID=asdfasdf -MONITOR_AWS_SECRET_ACCESS_KEY=asdfasdf diff --git a/config_example/periphery.config.example.toml b/config_example/periphery.config.example.toml index 0dd02f913..e9fe6415f 100644 --- a/config_example/periphery.config.example.toml +++ b/config_example/periphery.config.example.toml @@ -1,41 +1,53 @@ -## optional. 8120 is default +############################ +# MONITOR PERIPHERY CONFIG # +############################ + +## Optional. The port the server runs on. 8120 is default # port = 8120 -## optional. /etc/monitor/repos is default. +## Optional. /etc/monitor/repos is default. +## The directory periphery will use to manage repos. +## The periphery user must have write access to this directory. # repo_dir = "/home/ubuntu/monitor/repos" -## optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded +## Optional. /etc/monitor/stacks is default. +## The directory periphery will use to manage stacks. +## The periphery user must have write access to this directory. +# stack_dir = "/home/ubuntu/monitor/stacks" + +## Optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded # stats_polling_rate = "1-sec" -## optional. default is empty, which will not block any request by ip. +## Whether stack actions should use `docker-compose ...` +## instead of `docker compose ...`. +## default: false +# legacy_compose_cli = true + +######## +# AUTH # +######## + +## Optional. Limit the ip addresses which can call the periphery api. +## Default is empty, which will not block any request by ip. # allowed_ips = ["127.0.0.1"] -## optional. default is empty, which will not require any passkey to be passed by core. +## Optional. Require callers to provide on of the provided passkeys to access the periphery api. +## Default is empty, which will not require any passkey to be passed by core. # passkeys = ["abcdefghijk"] -## specify the log level of the monitor core application -## default: info -## options: off, error, warn, info, debug, trace -# logging.level = "debug" - -## specify the logging format for stdout / stderr. -## default: standard -## options: standard, json, none -# logging.stdio = "json" - -## specify a opentelemetry otlp endpoint to send traces to -## optional, default unassigned -# logging.otlp_endpoint = "http://localhost:4317" - -## Set the opentelemetry service name attached to the telemetry this periphery will send. -## Default: "Monitor" -# logging.opentelemetry_service_name = "Monitor-02" +########### +# SECRETS # +########### ## provide periphery-based secrets # [secrets] # SECRET_1 = "value_1" # SECRET_2 = "value_2" +################# +# GIT PROVIDERS # +################# + ## configure periphery-based git providers # [[git_provider]] # domain = "github.com" @@ -57,6 +69,10 @@ # { username = "mbecker20", token = "access_token_for_account" }, # ] +###################### +# REGISTRY PROVIDERS # +###################### + ## configure periphery-based docker registries # [[docker_registry]] # domain = "docker.io" @@ -71,3 +87,25 @@ # { username = "mbecker20", token = "access_token_for_account" }, # ] # organizations = ["Mogh"] # These become available in the UI + +########### +# LOGGING # +########### + +## Specify the log level of the monitor core application +## Default: info +## Options: off, error, warn, info, debug, trace +# logging.level = "debug" + +## Specify the logging format for stdout / stderr. +## Default: standard +## Options: standard, json, none +# logging.stdio = "json" + +## Specify a opentelemetry otlp endpoint to send traces to +## Optional, default unassigned +# logging.otlp_endpoint = "http://localhost:4317" + +## Set the opentelemetry service name attached to the telemetry this periphery will send. +## Default: "Monitor" +# logging.opentelemetry_service_name = "Periphery-02" \ No newline at end of file diff --git a/docsite/docs/api.md b/docsite/docs/api.md index 16bdf9428..51271afd4 100644 --- a/docsite/docs/api.md +++ b/docsite/docs/api.md @@ -1,3 +1,6 @@ # API -Monitor Core exposes an http API to read data, write configuration, and execute actions. The API documentation is generated from the code and is [available here](https://docs.rs/monitor_client/latest/monitor_client/api/index.html). \ No newline at end of file +Monitor Core exposes an http API to read data, write configuration, and execute actions. The API documentation is generated from the code and is [available here](https://docs.rs/monitor_client/latest/monitor_client/api/index.html). + +You can also install the [Monitor CLI](https://crates.io/crates/monitor_cli) to execute actions like RunBuild or DeployStack from the command line. +This can be coupled with scripts in Monitor Repos to achieve unlimited automation. \ No newline at end of file diff --git a/docsite/docs/build-images/index.mdx b/docsite/docs/build-images/index.mdx index 7a5aa130e..6bac601bb 100644 --- a/docsite/docs/build-images/index.mdx +++ b/docsite/docs/build-images/index.mdx @@ -7,9 +7,6 @@ slug: /build-images Monitor builds docker images by cloning the source repository from the configured git provider, running `docker build`, and pushing the resulting image to the configured docker registry. Any repo containing a `Dockerfile` is buildable using this method. -Build configuration involves passing file / directory paths, -for more details about passing file paths, see the [file paths doc](/docs/file-paths). - ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docsite/docs/connecting-servers/setup-periphery.md b/docsite/docs/connecting-servers.md similarity index 81% rename from docsite/docs/connecting-servers/setup-periphery.md rename to docsite/docs/connecting-servers.md index 2455b72b6..b0d920f02 100644 --- a/docsite/docs/connecting-servers/setup-periphery.md +++ b/docsite/docs/connecting-servers.md @@ -1,18 +1,33 @@ -# Setup Monitor Periphery +# Connecting Servers -The easiest way to setup periphery is to use the setup script (as root user): +Connecting a server to monitor has 2 steps: + +1. Install the Periphery agent on the server +2. Adding the server to monitor via the core API + +Once step 1. is complete, you can just connect the server to Monitor Core from the UI. + +## Install the Periphery agent + +The easiest way to setup and update periphery is to use the setup script (as root user): ```sh curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 ``` +Periphery can also be installed to run as the calling user, just note this comes with some additional configuration. + +```sh +curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3 - --user +``` + You can find more information (and view the script) in the [readme](https://github.com/mbecker20/monitor/tree/main/scripts). -:::note +:::info This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Monitor version release, and it will update the periphery version. ::: -### Manual install steps +## Manual install steps 1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases). diff --git a/docsite/docs/connecting-servers/add-server.md b/docsite/docs/connecting-servers/add-server.md deleted file mode 100644 index 786f8c1ab..000000000 --- a/docsite/docs/connecting-servers/add-server.md +++ /dev/null @@ -1,10 +0,0 @@ -# Adding Servers to Monitor - -The easiest way to add servers is with the GUI. -Navigate to the Servers page, click the New Server button, input the name, and hit create. -This will navigate to the created server, where you can configure it's address. -The address is the full http/s url to the periphery server, eg `http://12.34.56.78:8120`. - -Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline. - -Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server. \ No newline at end of file diff --git a/docsite/docs/connecting-servers/index.mdx b/docsite/docs/connecting-servers/index.mdx deleted file mode 100644 index 7ae24e08e..000000000 --- a/docsite/docs/connecting-servers/index.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -slug: /connecting-servers ---- - -# Connecting Servers - -Integrating a device into the monitor system has 2 steps: - - 1. Setup and start the periphery agent on the server - 2. Adding the server to monitor via the core API - -```mdx-code-block -import DocCardList from '@theme/DocCardList'; - - -``` diff --git a/docsite/docs/connecting-servers/templates.md b/docsite/docs/connecting-servers/templates.md deleted file mode 100644 index ca2fd85c3..000000000 --- a/docsite/docs/connecting-servers/templates.md +++ /dev/null @@ -1,2 +0,0 @@ -# Cloud Templates - diff --git a/docsite/docs/core-setup.md b/docsite/docs/core-setup.md index 64030b6b0..d7c06a02f 100644 --- a/docsite/docs/core-setup.md +++ b/docsite/docs/core-setup.md @@ -1,12 +1,51 @@ -# Core Setup +# Monitor Core Setup -To run Monitor Core, you will need: +To run Monitor Core, you will need Docker. See [the docker install docs](https://docs.docker.com/engine/install/). - - A valid configuration file. - - An instance of MongoDB to which Core can connect. - - Docker must be installed on the host. See [the install docs](https://docs.docker.com/engine/install/). +:::info +Monitor Core itself can really only run remote builds. +You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers** +in order to alert / deploy etc. -## Mongo +You can currently and always will be able to **connect as many servers an you like** using the Periphery agent. +::: + +### Deploy Monitor Core with Docker Compose + +There is an example compose file here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml). + +Copy the contents to a `compose.yaml`, and deploy it with `docker compose up -d`. + +### Configuration + +You can configure Monitor with environment variables, or using a config file. + +The example config file in the Monitor repo documents all the configuration options, along with the corresponding environment variables. +It can be found here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml). + +Note that configuration passed in environment variables will take precedent over what is given in the file. + +:::note +To enable OAuth2 login, you must create a client on the respective OAuth provider, +for example [google](https://developers.google.com/identity/protocols/oauth2) +or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps). +Monitor uses the `web application` login flow. +The redirect uri is `/auth/google/callback` for google and `/auth/github/callback` for github. +::: + +### First login + +Core should now be accessible on the specified port, so navigating to `http://
:` will display the login page. + +The first user to log in will be auto enabled and made an admin. Any additional users to create accounts will be disabled by default, and must be enabled by an admin. + +### Tls + +Core itself only supports http, so a reverse proxy like [caddy](https://caddyserver.com/) should be used for https. + +## Deploy with Docker cli + +### 1. Start Mongo Mongo can be run locally using the docker cli: @@ -22,48 +61,25 @@ docker run --name monitor-mongo \ You should replace the username and password with your own. See [the image docs](https://hub.docker.com/_/mongo) for more details. +Note that this uses "host" networking, which will allow core to connect over localhost. +Many users will prefer the default "bridge" network, and to use port mapping with `-p 27017:27017`. + :::note The disk space requirements of Monitor are dominated by the storage of system stats. This depends on the number of connected servers (more system stats being produces / stored), stats collection frequency, and your stats pruning configuration. -If you need to save on space, you can configure these fields in your core config: - - Stats poll frequency can be reduced using, for example, `monitoring_interval = "15-sec"` - - Pruning can be tuned more aggresively using, for example, `keep_stats_for_days = 7`. +If you need to save on space, you can configure these fields in your core config: - Stats poll frequency can be reduced using, for example, `monitoring_interval = "15-sec"` - Pruning can be tuned more aggresively using, for example, `keep_stats_for_days = 7`. ::: -## 1. Create the configuration file +### 2. Start Monitor core -Create a configuration file on the system, for example at `~/.config/monitor/core.config.toml`, and copy the [example config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). Fill in all the necessary information before continuing. - -:::note -To enable OAuth2 login, you must create a client on the respective OAuth provider, -for example [google](https://developers.google.com/identity/protocols/oauth2) -or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps). -Monitor uses the `web application` login flow. -The redirect uri is `/auth/google/callback` for google and `/auth/github/callback` for github. -::: - -:::note -Most configuration can additionally be passed using environment variables, which override the value in the config file. -See [config docs](https://docs.rs/monitor_client/latest/monitor_client/entities/config/core/index.html). -::: - -## 2. Start monitor core - -Monitor core is distributed via Github Container Registry under the package [mbecker20/monitor_core](https://github.com/mbecker20/monitor/pkgs/container/monitor_core). +Monitor core is distributed via Github Container Registry under the package [mbecker20/monitor](https://github.com/mbecker20/monitor/pkgs/container/monitor). ```sh docker run -d --name monitor-core \ --network host \ -v $HOME/.monitor/core.config.toml:/config/config.toml \ - ghcr.io/mbecker20/monitor_core + ghcr.io/mbecker20/monitor:latest ``` -## First login - -Core should now be accessible on the specified port, so navigating to `http://
:` will display the login page. - -The first user to log in will be auto enabled and made an admin. Any additional users to create accounts will be disabled by default, and must be enabled by an admin. - -## Tls - -Core itself only supports http, so a reverse proxy like [caddy](https://caddyserver.com/) should be used for https. \ No newline at end of file +Note that this uses "host" networking, which will allow it to connect to a local periphery agent on localhost. +Many users will prefer the default "bridge" network, and to use port mapping with `-p 9120:9120`. diff --git a/docsite/docs/deploy-containers/index.mdx b/docsite/docs/deploy-containers/index.mdx index 40c871e49..f3fa3d522 100644 --- a/docsite/docs/deploy-containers/index.mdx +++ b/docsite/docs/deploy-containers/index.mdx @@ -4,8 +4,6 @@ Monitor can deploy any docker images that it can access with the configured dock It works by parsing the deployment configuration into a `docker run` command, which is then run on the target system. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well. -Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/docs/file-paths). - ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docsite/docs/intro.md b/docsite/docs/intro.md index f533b84c2..d84c899a9 100644 --- a/docsite/docs/intro.md +++ b/docsite/docs/intro.md @@ -8,11 +8,13 @@ Monitor is a web app to provide structure for managing your servers, builds, dep With Monitor you can: - 1. Connect all of your remote servers (across clouds + on premise) to be viewable from a unified 'monitor' (hence the name). - 2. Build application source into auto-versioned Docker images. - 3. Create, start, stop, and restart Docker containers on the connected servers, and view their status and logs. - 4. Manage repositories on connected servers. - 5. Keep a record of all the actions that are performed and by whom. + - Connect all of your servers, and alert on CPU usage, memory usage, and disk usage. + - Create, start, stop, and restart Docker containers on the connected servers, and view their status and logs. + - Deploy docker compose stacks. The file can be defined in UI, or in a git repo, with auto deploy on git push. + - Build application source into auto-versioned Docker images, auto built on webhook. Deploy single-use AWS instances for infinite capacity. + - Manage repositories on connected servers, which can perform automation via scripting / webhooks. + - Manage all your configuration / environment variables, with shared global variable and secret interpolation. + - Keep a record of all the actions that are performed and by whom. ## Docker diff --git a/docsite/docs/resources.md b/docsite/docs/resources.md index 9acf10b16..8e00a6ae5 100644 --- a/docsite/docs/resources.md +++ b/docsite/docs/resources.md @@ -1,25 +1,62 @@ # Resources -Entities like `Server`, `Deployment`, and `Build` all fall under the `Resource` abstraction. A server is a type of resource, a build is a type of resource, and so on. +Monitor is extendible through the **Resource** abstraction. Entities like `Server`, `Deployment`, and `Stack` are all **Monitor Resources**. + All resources have common traits, such as a unique `name` and `id` amongst all other resources of the same resource type. All resources can be assigned `tags`, which can be used to group related resources. -Here is a list of the resources and their description: -- `Server`: Represents a connected server. - - Holds server config, like the address. -- `Deployment`: Represents a docker container on a server, whether it is actually deployed or not. - - Holds deployment config, like the server it should deploy on, and the image / build to deploy. -- `Build`: Represents a docker image. - - Holds build config, like the source repo, Dockerfile location, and version -- `Repo`: Represents a repo on a server, whether it is cloned or not. - - Holds repo config, like the source repo, and the `on_clone` and `on_pull` commands, which run after the repo is cloned / pulled -- `Procedure`: Configure higher level actions by composing lower level actions. - - Holds the actions to execute, like `RunBuild build_1` and `Deploy deployment_1`, and the order to execute them -- `Alerter`: Route the various alerts produced by monitor to alerting endpoints - - Holds the alerting endpoint (Slack channel or Custom http POST), the alerting types to forward (eg. `ServerUnreachable` or `ContainerStateChange`). -- `Builder`: Represents a server used as the "builder" for builds. Can be connected server or ephemeral AWS server. - - Holds builder config, like the AWS ami-id and security groups to allow for builder reachability. -- `ServerTemplate`: Configure cloud server templates (currently AWS and Hetzner) to easily launch more instances and auto connect them to Monitor - - Holds the cloud server config -- `ResourceSync`: Declare Monitor resources in TOML files, push them to a git repo, and sync Monitor config from them. - - Holds config for the source repo containing the files. Will display the computed diff and wait for user to execute. \ No newline at end of file +:::note +Many resources need access to git repos / docker registries. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials. +All resources which depend on git repos / docker registries are able to use these credentials to access private repos. +::: + +## Server + +-- Configure the connection to periphery agents.

+-- Set alerting thresholds.

+-- Can be attached to **Deployments**, **Stacks**, **Repos**, and **Builders**. + +## Deployment + +-- Deploy a docker container on the attached Server.

+-- Manage services at the container level, perform orchestration using **Procedures** and **ResourceSyncs**. + +## Stack + +-- Deploy with docker compose.

+-- Provide the compose file in UI, or move the files to a git repo and use a webhook for auto redeploy on push.

+-- Supports composing multiple compose files using `docker compose -f ... -f ...`. +-- Pass environment variables usable within the compose file. Interpolate in app-wide variables / secrets. + +## Repo + +-- Put scripts in git repos, and run them on a server every time they are pushed to. + +## Build + +-- Build application source into docker images, and push them to + +## Builder + +-- Either points to a connected server, or holds configuration to launch a single-use AWS instance to build the image.

+-- Can be attached to **Builds** and **Repos**. + +## Procedure + +-- Compose many actions on other resource type, like `RunBuild` or `DeployStack`, and run it on button push (or with a webhook).

+-- Can run one or more actions in parallel "stages", and compose a series of parallel stages to run sequentially. + +## ResourceSync + +-- Orchestrate all your configuration declaratively by defining it in `toml` files, which are checked into a git repo.

+-- Can deploy **Deployments** and **Stacks** if changes are suggested. Specify deploy ordering with `after` array. (like docker compose `depends_on` but can span across servers.). + +## Alerter + +-- Route alerts to various endpoints

+-- Can configure rules on each Alerter, such as resource whitelist, blacklist, or alert type filter. + +## ServerTemplate + +-- Easily expand your cloud network by storing cloud server lauch templates on various providers.

+-- Auto connect the server to monitor on launch, using `User Data` launch scripts. diff --git a/docsite/docs/sync-resources.md b/docsite/docs/sync-resources.md index 17cb53f28..7b8d636e5 100644 --- a/docsite/docs/sync-resources.md +++ b/docsite/docs/sync-resources.md @@ -129,6 +129,24 @@ VARIABLE_2 = value_2""" config.labels = "deployment.type = logger" ``` +### Stack + +- [Stack config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/stack/struct.StackConfig.html) + +```toml +[[stack]] +name = "test-stack" +description = "stack test" +deploy = true +after = ["test-logger-01"] # Stacks can depend on deployments, and vice versa. +tags = ["test"] +config.server_id = "monitor-01" +config.file_paths = ["mongo.yaml", "redis.yaml"] +config.git_provider = "git.mogh.tech" +config.git_account = "mbecker20" # clone private repo by specifying account +config.repo = "mbecker20/stack_test" +``` + ### Procedure - [Procedure config schema](https://docs.rs/monitor_client/latest/monitor_client/entities/procedure/struct.ProcedureConfig.html) diff --git a/docsite/docusaurus.config.ts b/docsite/docusaurus.config.ts index e0597152c..e0bae46ff 100644 --- a/docsite/docusaurus.config.ts +++ b/docsite/docusaurus.config.ts @@ -6,12 +6,12 @@ import dotenv from "dotenv" dotenv.config(); const config: Config = { - title: "monitor", - tagline: "build and deployment system", + title: "Monitor", + tagline: "Build and deployment system", favicon: "img/favicon.ico", // Set the production url of your site here - url: "https://docs.monitor.mogh.tech", + url: "https://docs.monitor.dev", // Set the // pathname under which your site is served // For GitHub pages deployment, it is often '//' // baseUrl: "/monitor/", @@ -69,11 +69,11 @@ const config: Config = { }, }, navbar: { - title: "monitor", + title: "Monitor", logo: { alt: "monitor lizard", - src: "img/monitor-lizard.png", - width: "46px" + src: "img/logo512.png", + width: "34px", }, items: [ { diff --git a/docsite/sidebars.ts b/docsite/sidebars.ts index e29c37d8b..ab662e472 100644 --- a/docsite/sidebars.ts +++ b/docsite/sidebars.ts @@ -15,18 +15,7 @@ const sidebars: SidebarsConfig = { "intro", "resources", "core-setup", - { - type: "category", - label: "Connecting Servers", - link: { - type: "doc", - id: "connecting-servers/index", - }, - items: [ - "connecting-servers/setup-periphery", - "connecting-servers/add-server", - ], - }, + "connecting-servers", { type: "category", label: "Build Images", diff --git a/docsite/src/components/HomepageFeatures/index.tsx b/docsite/src/components/HomepageFeatures/index.tsx index 5f0ead90b..e1316e2f3 100644 --- a/docsite/src/components/HomepageFeatures/index.tsx +++ b/docsite/src/components/HomepageFeatures/index.tsx @@ -12,7 +12,7 @@ const FeatureList: FeatureItem[] = [ title: "automated builds 🛠️", description: ( <> - build auto versioned docker images from github repos, trigger builds on + Build auto versioned docker images from github repos, trigger builds on git push ), @@ -21,7 +21,7 @@ const FeatureList: FeatureItem[] = [ title: "deploy docker containers 🚀", description: ( <> - deploy your builds (or any docker image), see uptime and logs across all + Deploy containers, deploy docker compose, see uptime and logs across all your servers ), diff --git a/docsite/src/pages/index.tsx b/docsite/src/pages/index.tsx index 08b0707c5..83387fdcf 100644 --- a/docsite/src/pages/index.tsx +++ b/docsite/src/pages/index.tsx @@ -25,7 +25,7 @@ function HomepageHeader() { transform: "translate(-50%, -50%)", }} > - monitor + Monitor diff --git a/docsite/static/img/favicon.ico b/docsite/static/img/favicon.ico index 82a418d29..2ef3d3ad2 100644 Binary files a/docsite/static/img/favicon.ico and b/docsite/static/img/favicon.ico differ diff --git a/docsite/static/img/logo.svg b/docsite/static/img/logo.svg deleted file mode 100644 index 9db6d0d06..000000000 --- a/docsite/static/img/logo.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/docsite/static/img/logo512.png b/docsite/static/img/logo512.png new file mode 100644 index 000000000..4b070d7b7 Binary files /dev/null and b/docsite/static/img/logo512.png differ diff --git a/docsite/static/img/monitor-summary.png b/docsite/static/img/monitor-summary.png deleted file mode 100644 index b88125dfc..000000000 Binary files a/docsite/static/img/monitor-summary.png and /dev/null differ diff --git a/frontend/index.html b/frontend/index.html index 1ac5fb45f..28cace14a 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -7,13 +7,13 @@ - - + + Monitor - +
diff --git a/frontend/package.json b/frontend/package.json index 22566fcd3..548f7d6d7 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -11,55 +11,55 @@ "build-client": "cd ../client/core/ts && yarn && yarn build && yarn link" }, "dependencies": { - "@radix-ui/react-checkbox": "1.0.4", - "@radix-ui/react-dialog": "1.0.5", - "@radix-ui/react-dropdown-menu": "2.0.6", - "@radix-ui/react-hover-card": "^1.0.7", + "@radix-ui/react-checkbox": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.1", + "@radix-ui/react-dropdown-menu": "^2.1.1", + "@radix-ui/react-hover-card": "^1.1.1", "@radix-ui/react-icons": "1.3.0", - "@radix-ui/react-label": "2.0.2", - "@radix-ui/react-popover": "1.0.7", - "@radix-ui/react-progress": "1.0.3", - "@radix-ui/react-select": "2.0.0", - "@radix-ui/react-separator": "^1.0.3", - "@radix-ui/react-slot": "1.0.2", - "@radix-ui/react-switch": "1.0.3", - "@radix-ui/react-tabs": "1.0.4", - "@radix-ui/react-toast": "1.1.5", - "@radix-ui/react-toggle": "^1.0.3", - "@radix-ui/react-toggle-group": "^1.0.4", - "@tanstack/react-query": "5.35.1", - "@tanstack/react-table": "8.16.0", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-popover": "^1.1.1", + "@radix-ui/react-progress": "^1.1.0", + "@radix-ui/react-select": "^2.1.1", + "@radix-ui/react-separator": "^1.1.0", + "@radix-ui/react-slot": "^1.1.0", + "@radix-ui/react-switch": "^1.1.0", + "@radix-ui/react-tabs": "^1.1.0", + "@radix-ui/react-toast": "^1.2.1", + "@radix-ui/react-toggle": "^1.1.0", + "@radix-ui/react-toggle-group": "^1.1.0", + "@tanstack/react-query": "5.51.23", + "@tanstack/react-table": "8.20.1", "ansi-to-html": "0.7.2", "class-variance-authority": "0.7.0", "clsx": "2.1.1", "cmdk": "1.0.0", - "jotai": "2.8.0", - "lightweight-charts": "4.1.4", - "lucide-react": "0.378.0", + "jotai": "2.9.2", + "lightweight-charts": "4.2.0", + "lucide-react": "0.427.0", "react": "18.3.1", "react-dom": "18.3.1", "react-minimal-pie-chart": "8.4.0", - "react-router-dom": "6.23.0", + "react-router-dom": "6.26.0", "sanitize-html": "2.13.0", - "tailwind-merge": "2.3.0", + "tailwind-merge": "2.4.0", "tailwindcss-animate": "1.0.7" }, "devDependencies": { - "@types/react": "18.3.1", + "@types/react": "18.3.3", "@types/react-dom": "18.3.0", "@types/sanitize-html": "2.11.0", - "@typescript-eslint/eslint-plugin": "7.8.0", - "@typescript-eslint/parser": "7.8.0", - "@vitejs/plugin-react": "4.2.1", - "autoprefixer": "10.4.19", - "eslint": "9.2.0", + "@typescript-eslint/eslint-plugin": "8.0.1", + "@typescript-eslint/parser": "8.0.1", + "@vitejs/plugin-react": "4.3.1", + "autoprefixer": "10.4.20", + "eslint": "9.9.0", "eslint-plugin-react-hooks": "4.6.2", - "eslint-plugin-react-refresh": "0.4.7", - "postcss": "8.4.38", - "tailwindcss": "3.4.3", - "typescript": "5.4.5", - "vite": "5.2.11", - "vite-tsconfig-paths": "4.3.2" + "eslint-plugin-react-refresh": "0.4.9", + "postcss": "8.4.41", + "tailwindcss": "3.4.9", + "typescript": "5.5.4", + "vite": "5.4.0", + "vite-tsconfig-paths": "5.0.1" }, "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" } diff --git a/frontend/src/components/alert/details.tsx b/frontend/src/components/alert/details.tsx index 3facc8e3f..3a886f9f2 100644 --- a/frontend/src/components/alert/details.tsx +++ b/frontend/src/components/alert/details.tsx @@ -31,6 +31,7 @@ export const AlertDetailsDialog = ({ id }: { id: string }) => { set(false)} />
{fmt_date_with_minutes(new Date(alert.ts))} diff --git a/frontend/src/components/alert/index.tsx b/frontend/src/components/alert/index.tsx index 2fa33ac57..84e79f276 100644 --- a/frontend/src/components/alert/index.tsx +++ b/frontend/src/components/alert/index.tsx @@ -1,16 +1,12 @@ import { Section } from "@components/layouts"; -import { - alert_level_intention, - bg_color_class_by_intention, -} from "@lib/color"; +import { alert_level_intention } from "@lib/color"; import { useRead, atomWithStorage } from "@lib/hooks"; import { Types } from "@monitor/client"; import { Button } from "@ui/button"; import { useAtom } from "jotai"; import { AlertTriangle } from "lucide-react"; import { AlertsTable } from "./table"; -import { Card, CardHeader } from "@ui/card"; -import { cn } from "@lib/utils"; +import { StatusBadge } from "@components/util"; const openAtom = atomWithStorage("show-alerts-v0", true); @@ -34,11 +30,11 @@ export const OpenAlerts = () => { ); }; -export const AlertLevel = ({ level }: { level: Types.SeverityLevel }) => { - const color = bg_color_class_by_intention(alert_level_intention(level)); - return ( - - {level} - - ); +export const AlertLevel = ({ + level, +}: { + level: Types.SeverityLevel | undefined; +}) => { + if (!level) return null; + return ; }; diff --git a/frontend/src/components/alert/topbar.tsx b/frontend/src/components/alert/topbar.tsx new file mode 100644 index 000000000..3c4add289 --- /dev/null +++ b/frontend/src/components/alert/topbar.tsx @@ -0,0 +1,121 @@ +import { useRead } from "@lib/hooks"; +import { Button } from "@ui/button"; +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from "@ui/dropdown-menu"; +import { AlertTriangle, Clock } from "lucide-react"; +import { AlertLevel } from "."; +import { ResourceLink } from "@components/resources/common"; +import { UsableResource } from "@types"; +import { + Dialog, + DialogContent, + DialogDescription, + DialogHeader, + DialogTitle, +} from "@ui/dialog"; +import { Types } from "@monitor/client"; +import { useState } from "react"; + +export const TopbarAlerts = () => { + const { data } = useRead( + "ListAlerts", + { query: { resolved: false } }, + { refetchInterval: 3000 } + ); + const [open, setOpen] = useState(false); + + // If this is set, details will open. + const [alert, setAlert] = useState(); + + if (!data || data.alerts.length === 0) { + return null; + } + + return ( + <> + + + + + + {data?.alerts.map((alert) => ( + setAlert(alert)} + > +
+ +
+
+
+ setOpen(false)} + /> +
+
+

{alert.data.type}

+
+ ))} +
+
+ setAlert(undefined)} /> + + ); +}; + +const AlertDetails = ({ + alert, + onClose, +}: { + alert: Types.Alert | undefined; + onClose: () => void; +}) => ( + <> + {alert && ( + !o && onClose()}> + + + Alert - {alert?.data.type} + + + {new Date(alert?.ts!).toLocaleString()} + + +
+
+
+

{alert?.target.type}:

+ +
+
+

Alert Level:

+ +
+
+
+
{JSON.stringify(alert.data.data, undefined, 2)}
+
+
+
+
+ )} + +); diff --git a/frontend/src/components/config/index.tsx b/frontend/src/components/config/index.tsx index ff6caca1c..183d226cd 100644 --- a/frontend/src/components/config/index.tsx +++ b/frontend/src/components/config/index.tsx @@ -8,6 +8,7 @@ import { cn } from "@lib/utils"; import { Types } from "@monitor/client"; import { Button } from "@ui/button"; import { Card, CardHeader, CardTitle, CardContent } from "@ui/card"; +import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; import { Select, SelectContent, @@ -15,7 +16,7 @@ import { SelectTrigger, SelectValue, } from "@ui/select"; -import { AlertTriangle, History, Settings } from "lucide-react"; +import { AlertTriangle, History, Info, Settings } from "lucide-react"; import { Fragment, ReactNode, SetStateAction, useState } from "react"; const keys = >(obj: T) => @@ -82,12 +83,18 @@ export const ConfigLayout = < ); }; -type PrimitiveConfigArgs = { placeholder?: string; label?: string }; +type PrimitiveConfigArgs = { + placeholder?: string; + label?: string; + boldLabel?: boolean; + description?: string; +}; type ConfigComponent = { label: string; icon?: ReactNode; actions?: ReactNode; + description?: ReactNode; hidden?: boolean; labelHidden?: boolean; contentHidden?: boolean; @@ -175,6 +182,7 @@ export const Config = ({ labelHidden, icon, actions, + description, hidden, contentHidden, components, @@ -188,10 +196,24 @@ export const Config = ({ !contentHidden && "border-b" )} > - - {icon} - {label} - +
+ + {icon} + {label} + + {description && ( + + + + + + + + {description} + + + )} +
{actions} )} @@ -264,6 +286,8 @@ export const ConfigAgain = < onChange={(value) => set({ [key]: value } as Partial)} disabled={disabled} placeholder={args?.placeholder} + description={args?.description} + boldLabel={args?.boldLabel} /> ); case "number": @@ -277,6 +301,8 @@ export const ConfigAgain = < } disabled={disabled} placeholder={args?.placeholder} + description={args?.description} + boldLabel={args?.boldLabel} /> ); case "boolean": @@ -287,6 +313,8 @@ export const ConfigAgain = < value={value} onChange={(value) => set({ [key]: value } as Partial)} disabled={disabled} + description={args?.description} + boldLabel={args?.boldLabel} /> ); default: diff --git a/frontend/src/components/config/util.tsx b/frontend/src/components/config/util.tsx index 5baa0c2ba..415f415e2 100644 --- a/frontend/src/components/config/util.tsx +++ b/frontend/src/components/config/util.tsx @@ -13,12 +13,13 @@ import { Input } from "@ui/input"; import { Switch } from "@ui/switch"; import { CheckCircle, + Info, MinusCircle, PlusCircle, Save, SearchX, } from "lucide-react"; -import { ReactNode, useState } from "react"; +import { ReactNode, RefObject, useState } from "react"; import { cn, filterBySplit } from "@lib/utils"; import { Dialog, @@ -39,13 +40,19 @@ import { CommandItem, CommandList, } from "@ui/command"; +import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; +import { Card } from "@ui/card"; export const ConfigItem = ({ label, + boldLabel, + description, children, className, }: { label?: string; + boldLabel?: boolean; + description?: ReactNode; children: ReactNode; className?: string; }) => ( @@ -56,7 +63,25 @@ export const ConfigItem = ({ className )} > - {label &&
{snake_case_to_upper_space_case(label)}
} +
+ {label && ( +
+ {snake_case_to_upper_space_case(label)} +
+ )} + {description && ( + + + + + + + + {description} + + + )} +
{children}
@@ -65,20 +90,24 @@ export const ConfigItem = ({ export const ConfigInput = ({ label, + boldLabel, value, + description, disabled, placeholder, onChange, onBlur, }: { label: string; + boldLabel?: boolean; value: string | number | undefined; + description?: string; disabled?: boolean; placeholder?: string; onChange?: (value: string) => void; onBlur?: (value: string) => void; }) => ( - + void; }) => ( - + ); @@ -197,9 +230,14 @@ export const ProviderSelector = ({ onSelect: (provider: string) => void; showCustom?: boolean; }) => { - const request = - account_type === "git" ? "ListGitProviders" : "ListDockerRegistries"; - const providers = useRead(request, {}).data; + const [db_request, config_request]: + | ["ListGitProviderAccounts", "ListGitProvidersFromConfig"] + | ["ListDockerRegistryAccounts", "ListDockerRegistriesFromConfig"] = + account_type === "git" + ? ["ListGitProviderAccounts", "ListGitProvidersFromConfig"] + : ["ListDockerRegistryAccounts", "ListDockerRegistriesFromConfig"]; + const db_providers = useRead(db_request, {}).data; + const config_providers = useRead(config_request, {}).data; const [customMode, setCustomMode] = useState(false); if (customMode) { @@ -210,11 +248,26 @@ export const ProviderSelector = ({ onChange={(e) => onSelect(e.target.value)} className="max-w-[75%] lg:max-w-[400px]" onBlur={() => setCustomMode(false)} + onKeyDown={(e) => { + if (e.key === "Enter") { + setCustomMode(false) + } + }} autoFocus /> ); } + const domains = new Set(); + for (const provider of db_providers ?? []) { + domains.add(provider.domain); + } + for (const provider of config_providers ?? []) { + domains.add(provider.domain); + } + const providers = [...domains]; + providers.sort(); + return ( set(e.target.value)} + className="max-w-[75%] lg:max-w-[400px]" + onBlur={() => setCustomMode(false)} + onKeyDown={(e) => { + if (e.key === "Enter") { + setCustomMode(false); + } + }} + autoFocus + /> + ); + } + + const orgs = + selected === "" || organizations.includes(selected) + ? organizations + : [...organizations, selected]; + orgs.sort(); + return ( ); @@ -911,3 +1033,55 @@ export const PermissionLevelSelector = ({ ); }; + +/// Takes in env +export const SecretsForEnvironment = ({ + env, + setEnv, + envRef, +}: { + /// Environment file + env?: string; + setEnv: (env: string) => void; + envRef: RefObject; +}) => { + const variables = useRead("ListVariables", {}).data ?? []; + const secrets = useRead("ListSecrets", {}).data ?? []; + + const _env = env || ""; + + if (variables.length === 0 && secrets.length === 0) return; + + return ( +
+ {variables.length > 0 && ( + v.name)} + onSelect={(variable) => + setEnv( + _env.slice(0, envRef.current?.selectionStart) + + `[[${variable}]]` + + _env.slice(envRef.current?.selectionStart, undefined) + ) + } + disabled={false} + /> + )} + {secrets.length > 0 && ( + + setEnv( + _env.slice(0, envRef.current?.selectionStart) + + `[[${secret}]]` + + _env.slice(envRef.current?.selectionStart, undefined) + ) + } + disabled={false} + /> + )} +
+ ); +}; diff --git a/frontend/src/components/layouts.tsx b/frontend/src/components/layouts.tsx index d54f1ca55..956ed59d3 100644 --- a/frontend/src/components/layouts.tsx +++ b/frontend/src/components/layouts.tsx @@ -15,7 +15,7 @@ import { ResourceComponents } from "./resources"; import { Card, CardHeader, CardTitle, CardContent, CardFooter } from "@ui/card"; import { ResourceTags } from "./tags"; import { Topbar } from "./topbar"; -import { usableResourcePath } from "@lib/utils"; +import { cn, usableResourcePath } from "@lib/utils"; import { Sidebar } from "./sidebar"; import { ResourceName } from "./resources/common"; import { useShiftKeyListener } from "@lib/hooks"; @@ -28,13 +28,14 @@ export const Layout = () => { useShiftKeyListener("B", () => nav("/builds")); useShiftKeyListener("R", () => nav("/repos")); useShiftKeyListener("P", () => nav("/procedures")); + return ( <> -
- -
-
+
+
+ +
@@ -51,10 +52,12 @@ interface PageProps { children?: ReactNode; subtitle?: ReactNode; actions?: ReactNode; + superHeader?: ReactNode; wrapSize?: "md" | "lg" | "xl" | "2xl"; } export const Page = ({ + superHeader, title, icon, titleRight, @@ -63,21 +66,42 @@ export const Page = ({ actions, children, }: PageProps) => ( -
- {(title || icon || subtitle || actions) && ( -
-
-
- {icon} -

{title}

- {titleRight} +
+ {superHeader ? ( +
+ {superHeader} + {(title || icon || subtitle || actions) && ( +
+
+
+ {icon} +

{title}

+ {titleRight} +
+
{subtitle}
+
+ {actions}
-
{subtitle}
-
- {actions} + )}
+ ) : ( + (title || icon || subtitle || actions) && ( +
+
+
+ {icon} +

{title}

+ {titleRight} +
+
{subtitle}
+
+ {actions} +
+ ) )} {titleOther} {children} @@ -85,6 +109,7 @@ export const Page = ({ ); export const PageXlRow = ({ + superHeader, title, icon, titleRight, @@ -94,20 +119,41 @@ export const PageXlRow = ({ children, }: PageProps) => (
- {(title || icon || subtitle || actions) && ( -
-
-
- {icon} -

{title}

- {titleRight} + {superHeader ? ( +
+ {superHeader} + {(title || icon || subtitle || actions) && ( +
+
+
+ {icon} +

{title}

+ {titleRight} +
+
{subtitle}
+
+ {actions}
-
{subtitle}
-
- {actions} + )}
+ ) : ( + (title || icon || subtitle || actions) && ( +
+
+
+ {icon} +

{title}

+ {titleRight} +
+
{subtitle}
+
+ {actions} +
+ ) )} {titleOther} {children} @@ -120,6 +166,8 @@ interface SectionProps { titleOther?: ReactNode; children?: ReactNode; actions?: ReactNode; + // otherwise items-start + itemsCenterTitleRow?: boolean; } export const Section = ({ @@ -128,11 +176,17 @@ export const Section = ({ titleOther, actions, children, + itemsCenterTitleRow, }: SectionProps) => ( -
-
+
+
{title || icon ? ( -
+
{icon} {title &&

{title}

}
diff --git a/frontend/src/components/log.tsx b/frontend/src/components/log.tsx new file mode 100644 index 000000000..09cc953fa --- /dev/null +++ b/frontend/src/components/log.tsx @@ -0,0 +1,67 @@ +import { logToHtml } from "@lib/utils"; +import { Types } from "@monitor/client"; +import { Button } from "@ui/button"; +import { Select, SelectContent, SelectGroup, SelectItem, SelectTrigger, SelectValue } from "@ui/select"; +import { ChevronDown } from "lucide-react"; +import { useEffect, useRef } from "react"; + +export const Log = ({ + log, + stream, +}: { + log: Types.Log | undefined; + stream: "stdout" | "stderr"; +}) => { + const _log = log?.[stream as keyof typeof log] as string | undefined; + const ref = useRef(null); + const scroll = () => + ref.current?.scroll({ + top: ref.current.scrollHeight, + behavior: "smooth", + }); + useEffect(scroll, [_log]); + return ( + <> +
+
+      
+ + + ); +}; + +export const TailLengthSelector = ({ + selected, + onSelect, + disabled, +}: { + selected: string; + onSelect: (value: string) => void; + disabled?: boolean; +}) => ( + +); diff --git a/frontend/src/components/omnibar.tsx b/frontend/src/components/omnibar.tsx index 19c1d010e..707aa0ba1 100644 --- a/frontend/src/components/omnibar.tsx +++ b/frontend/src/components/omnibar.tsx @@ -23,6 +23,7 @@ import { AlerterComponents } from "./resources/alerter"; import { ServerTemplateComponents } from "./resources/server-template"; import { Badge } from "@ui/badge"; import { ResourceSyncComponents } from "./resources/resource-sync"; +import { StackComponents } from "./resources/stack"; export const OmniSearch = ({ className, @@ -55,6 +56,7 @@ export const OmniSearch = ({ }; type OmniItem = { + key: string; label: string; icon: ReactNode; onSelect: () => void; @@ -90,9 +92,10 @@ export const OmniDialog = ({ {i !== 0 && } - {items.map(({ label, icon, onSelect }) => ( + {items.map(({ key, label, icon, onSelect }) => ( @@ -115,6 +118,7 @@ const useOmniItems = ( const user = useUser().data; const servers = useRead("ListServers", {}).data; const deployments = useRead("ListDeployments", {}).data; + const stacks = useRead("ListStacks", {}).data; const builds = useRead("ListBuilds", {}).data; const repos = useRead("ListRepos", {}).data; const procedures = useRead("ListProcedures", {}).data; @@ -130,56 +134,73 @@ const useOmniItems = ( () => ({ "": [ { + key: "Home", label: "Home", icon: , onSelect: () => nav("/"), }, { + key: "Servers", label: "Servers", icon: , onSelect: () => nav("/servers"), }, { + key: "Deployments", label: "Deployments", icon: , onSelect: () => nav("/deployments"), }, { + key: "Stacks", + label: "Stacks", + icon: , + onSelect: () => nav("/stacks"), + }, + { + key: "Builds", label: "Builds", icon: , onSelect: () => nav("/builds"), }, { + key: "Repos", label: "Repos", icon: , onSelect: () => nav("/repos"), }, { + key: "Procedures", label: "Procedures", icon: , onSelect: () => nav("/procedures"), }, { + key: "Builders", label: "Builders", icon: , onSelect: () => nav("/builders"), }, { + key: "Alerters", label: "Alerters", icon: , onSelect: () => nav("/alerters"), }, { + key: "Templates", label: "Templates", icon: , onSelect: () => nav("/server-templates"), }, { + key: "Syncs", label: "Syncs", icon: , onSelect: () => nav("/resource-syncs"), }, (user?.admin && { + key: "Users", label: "Users", icon: , onSelect: () => nav("/users"), @@ -206,6 +227,7 @@ const useOmniItems = ( ) ) .map((server) => ({ + key: "server-" + server.name, label: server.name, icon: , onSelect: () => nav(`/servers/${server.id}`), @@ -223,11 +245,30 @@ const useOmniItems = ( ) ) .map((deployment) => ({ + key: "deployment-" + deployment.name, label: deployment.name, icon: , onSelect: () => nav(`/deployments/${deployment.id}`), })) || [], + Stacks: + stacks + ?.filter( + (item) => + searchTerms.length === 0 || + searchTerms.every( + (term) => + item.name.toLowerCase().includes(term) || + "stack".includes(term) + ) + ) + .map((stack) => ({ + key: "stack-" + stack.name, + label: stack.name, + icon: , + onSelect: () => nav(`/stacks/${stack.id}`), + })) || [], + Build: builds ?.filter( @@ -240,6 +281,7 @@ const useOmniItems = ( ) ) .map((build) => ({ + key: "build-" + build.name, label: build.name, icon: , onSelect: () => nav(`/builds/${build.id}`), @@ -257,6 +299,7 @@ const useOmniItems = ( ) ) .map((repo) => ({ + key: "repo-" + repo.name, label: repo.name, icon: , onSelect: () => nav(`/repos/${repo.id}`), @@ -274,6 +317,7 @@ const useOmniItems = ( ) ) .map((procedure) => ({ + key: "procedure-" + procedure.name, label: procedure.name, icon: , onSelect: () => nav(`/procedures/${procedure.id}`), @@ -291,6 +335,7 @@ const useOmniItems = ( ) ) .map((builder) => ({ + key: "builder-" + builder.name, label: builder.name, icon: , onSelect: () => nav(`/builders/${builder.id}`), @@ -308,6 +353,7 @@ const useOmniItems = ( ) ) .map((alerter) => ({ + key: "alerter-" + alerter.name, label: alerter.name, icon: , onSelect: () => nav(`/alerters/${alerter.id}`), @@ -325,6 +371,7 @@ const useOmniItems = ( ) ) .map((template) => ({ + key: "template-" + template.name, label: template.name, icon: , onSelect: () => nav(`/server-templates/${template.id}`), @@ -341,16 +388,18 @@ const useOmniItems = ( "sync".includes(term) ) ) - .map((template) => ({ - label: template.name, - icon: , - onSelect: () => nav(`/resource-syncs/${template.id}`), + .map((sync) => ({ + key: "sync-" + sync.name, + label: sync.name, + icon: , + onSelect: () => nav(`/resource-syncs/${sync.id}`), })) || [], }), [ user, servers, deployments, + stacks, builds, repos, procedures, diff --git a/frontend/src/components/resources/alerter/config/alert_types.tsx b/frontend/src/components/resources/alerter/config/alert_types.tsx index 53ad22acd..312929103 100644 --- a/frontend/src/components/resources/alerter/config/alert_types.tsx +++ b/frontend/src/components/resources/alerter/config/alert_types.tsx @@ -34,8 +34,8 @@ export const AlertTypeConfig = ({ ); return ( -
-
+
+
{alert_types.map((type) => ( export const AlerterComponents: RequiredResourceComponents = { list_item: (id) => useAlerter(id), + Description: () => ( + <>Route alerts to various endpoints. + ), + Dashboard: () => { const alerters_count = useRead("ListAlerters", {}).data?.length; return ( diff --git a/frontend/src/components/resources/build/config.tsx b/frontend/src/components/resources/build/config.tsx index a910d3d62..5d7c66978 100644 --- a/frontend/src/components/resources/build/config.tsx +++ b/frontend/src/components/resources/build/config.tsx @@ -6,7 +6,7 @@ import { ImageRegistryConfig, InputList, ProviderSelectorConfig, - SecretSelector, + SecretsForEnvironment, SystemCommand, } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; @@ -15,11 +15,12 @@ import { Types } from "@monitor/client"; import { Button } from "@ui/button"; import { Textarea } from "@ui/textarea"; import { Ban, CirclePlus, PlusCircle } from "lucide-react"; -import { ReactNode, RefObject, createRef, useState } from "react"; -import { CopyGithubWebhook, LabelsConfig, ResourceSelector } from "../common"; +import { ReactNode, createRef, useState } from "react"; +import { BuilderSelector, CopyGithubWebhook, LabelsConfig } from "../common"; import { useToast } from "@ui/use-toast"; import { text_color_class_by_intention } from "@lib/color"; import { ConfirmButton } from "@components/util"; +import { Link } from "react-router-dom"; export const BuildConfig = ({ id, @@ -55,7 +56,17 @@ export const BuildConfig = ({ components={{ general: [ { - label: "General", + label: "Builder", + labelHidden: true, + components: { + builder_id: (id, set) => ( + + ), + }, + }, + { + label: "Version", + labelHidden: true, components: { version: (version, set) => { const { major, minor, patch } = version ?? { @@ -64,7 +75,10 @@ export const BuildConfig = ({ patch: 0, }; return ( - +
v{major}.{minor}.{patch} @@ -97,17 +111,20 @@ export const BuildConfig = ({ ); }, - builder_id: (id, set) => ( - - set({ builder_id })} - disabled={disabled} - align="end" - /> - - ), + }, + }, + { + label: "Custom Name / Tag", + components: { + image_name: { + description: "Optional. Push the image under a different name", + placeholder: "Custom image name", + }, + image_tag: { + description: + "Optional. Postfix the image version with a custom tag.", + placeholder: "Custom image tag", + }, }, }, { @@ -138,16 +155,35 @@ export const BuildConfig = ({ placeholder="None" /> ), - repo: { placeholder: "Enter repo" }, - branch: { placeholder: "Enter branch" }, + repo: { + placeholder: "Enter repo", + description: + "The repo path on the provider. {namespace}/{repo_name}", + }, + branch: { + placeholder: "Enter branch", + description: "Select a custom branch, or default to 'main'.", + }, commit: { placeholder: "Enter a specific commit hash. Optional.", + description: + "Switch to a specific hash after cloning the branch.", }, }, }, { - label: "Docker", + label: "Image", components: { + build_path: { + placeholder: ".", + description: + "The cwd to run 'docker build', relative to the root of the repo.", + }, + dockerfile_path: { + placeholder: "Dockerfile", + description: + "The path to the dockerfile, relative to the build path.", + }, image_registry: (registry, set) => ( ), - build_path: true, - dockerfile_path: true, }, }, { label: "Extra Args", + description: ( +
+
Pass extra arguments to 'docker build'.
+ + + +
+ ), contentHidden: (update.extra_args ?? config.extra_args)?.length === 0, actions: !disabled && ( @@ -193,6 +240,7 @@ export const BuildConfig = ({ }, { label: "Labels", + description: "Attach --labels to image.", contentHidden: (update.labels ?? config.labels)?.length === 0, actions: !disabled && (