diff --git a/.gitignore b/.gitignore index a5892e08a..fd8d847c0 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ dist .env.development creds.toml .syncs -.stacks \ No newline at end of file +.stacks +.DS_Store \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index b8e2e792a..ebfa82dee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -41,7 +41,7 @@ dependencies = [ [[package]] name = "alerter" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "axum", @@ -120,9 +120,15 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "async-recursion" @@ -176,8 +182,8 @@ checksum = "4ecbb56dce3eb772cfb3f5a29e802838b3ba1fdb06303d4f1ff54e0d278dc876" dependencies = [ "serde", "serde_derive", - "strum 0.26.3", - "strum_macros 0.26.4", + "strum", + "strum_macros", "tokio", ] @@ -195,9 +201,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-config" -version = "1.5.5" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e95816a168520d72c0e7680c405a5a8c1fb6a035b4bc4b9d7b0de8e1a941697" +checksum = "8191fb3091fa0561d1379ef80333c3c7191c6f0435d986e85821bcf7acbd1126" dependencies = [ "aws-credential-types", "aws-runtime", @@ -237,9 +243,9 @@ dependencies = [ [[package]] name = "aws-runtime" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2424565416eef55906f9f8cece2072b6b6a76075e3ff81483ebe938a89a4c05f" +checksum = "a10d5c055aa540164d9561a0e2e74ad30f0dcf7393c3a92f6733ddf9c5762468" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -262,9 +268,9 @@ dependencies = [ [[package]] name = "aws-sdk-ec2" -version = "1.70.0" +version = "1.75.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b504f61ac8793bb504d9aff11743caf35cfe30515ee5a92415a250539cf6f7" +checksum = "f6787d920877cca6a4ee3953093f6a47cefe26de95a4f7b3681c5850bfe657b4" dependencies = [ "aws-credential-types", "aws-runtime", @@ -284,33 +290,11 @@ dependencies = [ "tracing", ] -[[package]] -name = "aws-sdk-ecr" -version = "1.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e4c9459176ab063f753eb710186f5ea7d3db6e3bad3dadb9b3a9fe8d107651d" -dependencies = [ - "aws-credential-types", - "aws-runtime", - "aws-smithy-async", - "aws-smithy-http", - "aws-smithy-json", - "aws-smithy-runtime", - "aws-smithy-runtime-api", - "aws-smithy-types", - "aws-types", - "bytes", - "http 0.2.12", - "once_cell", - "regex-lite", - "tracing", -] - [[package]] name = "aws-sdk-sso" -version = "1.40.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5879bec6e74b648ce12f6085e7245417bc5f6d672781028384d2e494be3eb6d" +checksum = "0b90cfe6504115e13c41d3ea90286ede5aa14da294f3fe077027a6e83850843c" dependencies = [ "aws-credential-types", "aws-runtime", @@ -330,9 +314,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.41.0" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef4cd9362f638c22a3b959fd8df292e7e47fdf170270f86246b97109b5f2f7d" +checksum = "167c0fad1f212952084137308359e8e4c4724d1c643038ce163f06de9662c1d0" dependencies = [ "aws-credential-types", "aws-runtime", @@ -352,9 +336,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.40.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1e2735d2ab28b35ecbb5496c9d41857f52a0d6a0075bbf6a8af306045ea6f6" +checksum = "2cb5f98188ec1435b68097daa2a37d74b9d17c9caa799466338a8d1544e71b9d" dependencies = [ "aws-credential-types", "aws-runtime", @@ -375,9 +359,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.2.3" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5df1b0fa6be58efe9d4ccc257df0a53b89cd8909e86591a13ca54817c87517be" +checksum = "cc8db6904450bafe7473c6ca9123f88cc11089e41a025408f992db4e22d3be68" dependencies = [ "aws-credential-types", "aws-smithy-http", @@ -409,9 +393,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.60.10" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01dbcb6e2588fd64cfb6d7529661b06466419e4c54ed1c62d6510d2d0350a728" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", @@ -492,9 +476,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.4" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "273dcdfd762fae3e1650b8024624e7cd50e484e37abdab73a7a706188ad34543" +checksum = "147100a7bea70fa20ef224a6bad700358305f5dc0f84649c53769761395b355b" dependencies = [ "base64-simd", "bytes", @@ -518,9 +502,9 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.8" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d123fbc2a4adc3c301652ba8e149bf4bc1d1725affb9784eb20c953ace06bf55" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" dependencies = [ "xmlparser", ] @@ -541,14 +525,14 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", "axum-macros", - "base64 0.21.7", + "base64 0.22.1", "bytes", "futures-util", "http 1.1.0", @@ -570,8 +554,8 @@ dependencies = [ "sha1", "sync_wrapper 1.0.1", "tokio", - "tokio-tungstenite 0.21.0", - "tower", + "tokio-tungstenite", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -579,9 +563,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -592,7 +576,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -600,9 +584,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" +checksum = "73c3220b188aea709cf1b6c5f9b01c3bd936bb08bd2b5184a12b35ac8131b1f9" dependencies = [ "axum", "axum-core", @@ -615,7 +599,7 @@ dependencies = [ "mime", "pin-project-lite", "serde", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -623,16 +607,37 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ - "heck 0.4.1", "proc-macro2", "quote", "syn 2.0.77", ] +[[package]] +name = "axum-server" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "openssl", + "pin-project-lite", + "tokio", + "tokio-openssl", + "tower 0.4.13", + "tower-service", +] + [[package]] name = "backtrace" version = "0.3.73" @@ -648,6 +653,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -676,6 +687,12 @@ dependencies = [ "vsimd", ] +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bcrypt" version = "0.15.1" @@ -778,9 +795,9 @@ dependencies = [ [[package]] name = "bson" -version = "2.11.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a88e82b9106923b5c4d6edfca9e7db958d4e98a478ec115022e81b9b38e2c8" +checksum = "068208f2b6fcfa27a7f1ee37488d2bb8ba2640f68f5475d08e1d9130696aba59" dependencies = [ "ahash", "base64 0.13.1", @@ -870,9 +887,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.16" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -880,9 +897,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.15" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -892,9 +909,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.13" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -926,12 +943,18 @@ dependencies = [ [[package]] name = "command" -version = "1.14.2" +version = "1.15.0" dependencies = [ "komodo_client", "run_command", ] +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "convert_case" version = "0.4.0" @@ -988,6 +1011,18 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -998,6 +1033,33 @@ dependencies = [ "typenum", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "darling" version = "0.20.10" @@ -1033,12 +1095,37 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + [[package]] name = "deranged" version = "0.3.11" @@ -1163,6 +1250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] @@ -1179,12 +1267,71 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "hkdf", + "pem-rfc7468", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" version = "0.8.34" @@ -1206,6 +1353,13 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "environment_file" +version = "1.15.0" +dependencies = [ + "thiserror", +] + [[package]] name = "envy" version = "0.4.2" @@ -1237,6 +1391,22 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "fnv" version = "1.0.7" @@ -1269,7 +1439,7 @@ dependencies = [ [[package]] name = "formatting" -version = "1.14.2" +version = "1.15.0" dependencies = [ "serror", ] @@ -1377,6 +1547,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check 0.9.5", + "zeroize", ] [[package]] @@ -1400,7 +1571,7 @@ checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "git" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "command", @@ -1418,6 +1589,17 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + [[package]] name = "h2" version = "0.3.26" @@ -1570,6 +1752,15 @@ dependencies = [ "tracing", ] +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1770,19 +1961,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.30", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -1814,7 +1992,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -1950,6 +2128,15 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.13.0" @@ -2005,7 +2192,7 @@ dependencies = [ [[package]] name = "komodo_cli" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "clap", @@ -2021,7 +2208,7 @@ dependencies = [ [[package]] name = "komodo_client" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "async_timing_util", @@ -2035,15 +2222,15 @@ dependencies = [ "futures", "mongo_indexed", "partial_derive2", - "reqwest 0.12.7", + "reqwest 0.12.8", "resolver_api", "serde", "serde_json", "serror", - "strum 0.26.3", + "strum", "thiserror", "tokio", - "tokio-tungstenite 0.23.1", + "tokio-tungstenite", "tokio-util", "tracing", "typeshare", @@ -2052,19 +2239,21 @@ dependencies = [ [[package]] name = "komodo_core" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "async_timing_util", "aws-config", "aws-sdk-ec2", - "aws-sdk-ecr", "axum", "axum-extra", + "axum-server", "base64 0.22.1", "bcrypt", + "dashmap", "derive_variants", "dotenvy", + "environment_file", "envy", "formatting", "futures", @@ -2079,14 +2268,14 @@ dependencies = [ "mungos", "nom_pem", "octorust", + "openidconnect", "ordered_hash_map", "partial_derive2", "periphery_client", "rand", "regex", - "reqwest 0.12.7", + "reqwest 0.12.8", "resolver_api", - "run_command", "serde", "serde_json", "serde_yaml", @@ -2107,16 +2296,18 @@ dependencies = [ [[package]] name = "komodo_periphery" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "async_timing_util", "axum", "axum-extra", + "axum-server", "bollard", "clap", "command", "dotenvy", + "environment_file", "envy", "formatting", "futures", @@ -2142,6 +2333,9 @@ name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin 0.9.8", +] [[package]] name = "libc" @@ -2149,6 +2343,12 @@ version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2182,7 +2382,7 @@ dependencies = [ [[package]] name = "logger" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "komodo_client", @@ -2246,14 +2446,12 @@ dependencies = [ [[package]] name = "migrator" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "dotenvy", "envy", - "komodo_client", "logger", - "mungos", "serde", "tokio", "tracing", @@ -2286,14 +2484,13 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ - "hermit-abi", "libc", "wasi", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -2457,6 +2654,23 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -2472,6 +2686,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -2479,6 +2704,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "oauth2" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" +dependencies = [ + "base64 0.13.1", + "chrono", + "getrandom", + "http 0.2.12", + "rand", + "reqwest 0.11.27", + "serde", + "serde_json", + "serde_path_to_error", + "sha2", + "thiserror", + "url", ] [[package]] @@ -2529,6 +2785,38 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +[[package]] +name = "openidconnect" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f47e80a9cfae4462dd29c41e987edd228971d6565553fbc14b8a11e666d91590" +dependencies = [ + "base64 0.13.1", + "chrono", + "dyn-clone", + "ed25519-dalek", + "hmac", + "http 0.2.12", + "itertools 0.10.5", + "log", + "oauth2", + "p256", + "p384", + "rand", + "rsa", + "serde", + "serde-value", + "serde_derive", + "serde_json", + "serde_path_to_error", + "serde_plain", + "serde_with", + "sha2", + "subtle", + "thiserror", + "url", +] + [[package]] name = "openssl" version = "0.10.66" @@ -2575,9 +2863,9 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96" +checksum = "803801d3d3b71cd026851a53f974ea03df3d179cb758b260136a6c9e22e196af" dependencies = [ "futures-core", "futures-sink", @@ -2589,9 +2877,9 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.17.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b925a602ffb916fb7421276b86756027b37ee708f9dce2dbdcc51739f07e727" +checksum = "596b1719b3cab83addb20bcbffdf21575279d9436d9ccccfe651a3bf0ab5ab06" dependencies = [ "async-trait", "futures-core", @@ -2607,9 +2895,9 @@ dependencies = [ [[package]] name = "opentelemetry-proto" -version = "0.7.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9" +checksum = "2c43620e8f93359eb7e627a3b16ee92d8585774986f24f2ab010817426c5ce61" dependencies = [ "opentelemetry", "opentelemetry_sdk", @@ -2619,15 +2907,15 @@ dependencies = [ [[package]] name = "opentelemetry-semantic-conventions" -version = "0.16.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cefe0543875379e47eb5f1e68ff83f45cc41366a92dfd0d073d513bf68e9a05" +checksum = "9b8e442487022a943e2315740e443dc5ee95fd541c18f509a5a6251b408a9f95" [[package]] name = "opentelemetry_sdk" -version = "0.24.1" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692eac490ec80f24a17828d49b40b60f5aeaccdfe6a503f939713afd22bc28df" +checksum = "e0da0d6b47a3dbc6e9c9e36a0520e25cf943e046843818faaa3f87365a548c82" dependencies = [ "async-trait", "futures-channel", @@ -2644,6 +2932,15 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "ordered_hash_map" version = "0.4.0" @@ -2666,6 +2963,30 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p384" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + [[package]] name = "parking_lot" version = "0.11.2" @@ -2763,6 +3084,15 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2771,11 +3101,11 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "periphery_client" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "komodo_client", - "reqwest 0.12.7", + "reqwest 0.12.8", "resolver_api", "serde", "serde_json", @@ -2815,6 +3145,27 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + [[package]] name = "pkg-config" version = "0.3.30" @@ -2836,6 +3187,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -2862,7 +3222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.77", @@ -2959,9 +3319,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -2971,9 +3331,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -2988,9 +3348,9 @@ checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -3008,13 +3368,11 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.30", "hyper-rustls 0.24.2", - "hyper-tls 0.5.0", "ipnet", "js-sys", "log", "mime", "mime_guess", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", @@ -3026,7 +3384,6 @@ dependencies = [ "sync_wrapper 0.1.2", "system-configuration 0.5.1", "tokio", - "tokio-native-tls", "tokio-rustls 0.24.1", "tower-service", "url", @@ -3039,9 +3396,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -3054,7 +3411,7 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-rustls 0.27.2", - "hyper-tls 0.6.0", + "hyper-tls", "hyper-util", "ipnet", "js-sys", @@ -3191,6 +3548,16 @@ dependencies = [ "rand", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -3221,6 +3588,26 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rsa" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + [[package]] name = "run_command" version = "0.0.6" @@ -3416,6 +3803,20 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" version = "2.11.1" @@ -3447,13 +3848,23 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_bytes" version = "0.11.15" @@ -3465,9 +3876,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -3487,9 +3898,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "indexmap 2.5.0", "itoa", @@ -3508,6 +3919,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_plain" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1fc6db65a611022b23a0dec6975d63fb80a302cb3388835ff02c097258d50" +dependencies = [ + "serde", +] + [[package]] name = "serde_repr" version = "0.1.19" @@ -3652,6 +4072,16 @@ dependencies = [ "libc", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + [[package]] name = "simple_asn1" version = "0.6.2" @@ -3675,17 +4105,17 @@ dependencies = [ [[package]] name = "slack_client_rs" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c78a07312af77255ee84e41fe32546f97ffc06c4166e753f005888dd12d967f" +checksum = "1df44b4cf36c18e1fae89698ef556bf7ba08848d1ef3adcc97e225288ecb7a80" dependencies = [ "anyhow", - "reqwest 0.11.27", + "reqwest 0.12.8", "serde", "serde_derive", "serde_json", - "strum 0.25.0", - "strum_macros 0.25.3", + "strum", + "strum_macros", ] [[package]] @@ -3716,6 +4146,16 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "stringprep" version = "0.1.5" @@ -3733,32 +4173,13 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" - [[package]] name = "strum" version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "strum_macros 0.26.4", -] - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.77", + "strum_macros", ] [[package]] @@ -3918,18 +4339,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", @@ -3994,27 +4415,28 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "eb2caba9f80616f438e09748d5acda951967e1ea58508ef53d9c6402485a46df" dependencies = [ "backtrace", "bytes", "libc", "mio", + "num_cpus", "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", @@ -4031,6 +4453,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-openssl" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59df6849caa43bb7567f9a36f863c447d95a11d5903c9cc334ba32576a27eadd" +dependencies = [ + "openssl", + "openssl-sys", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -4065,33 +4498,21 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.21.0", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite 0.23.0", + "tungstenite", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -4171,7 +4592,7 @@ dependencies = [ "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -4198,10 +4619,26 @@ dependencies = [ ] [[package]] -name = "tower-http" -version = "0.5.2" +name = "tower" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "bitflags 2.6.0", "bytes", @@ -4280,9 +4717,9 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9784ed4da7d921bc8df6963f8c80a0e4ce34ba6ba76668acadd3edbd985ff3b" +checksum = "5eabc56d23707ad55ba2a0750fc24767125d5a0f51993ba41ad2c441cc7b8dea" dependencies = [ "js-sys", "once_cell", @@ -4331,28 +4768,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.21.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http 1.1.0", - "httparse", - "log", - "rand", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "tungstenite" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" +checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" dependencies = [ "byteorder", "bytes", @@ -4461,7 +4879,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "update_logger" -version = "1.14.2" +version = "1.15.0" dependencies = [ "anyhow", "komodo_client", diff --git a/Cargo.toml b/Cargo.toml index 9970d3ee7..7741a8d0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ resolver = "2" members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"] [workspace.package] -version = "1.14.2" +version = "1.15.0" edition = "2021" authors = ["mbecker20 "] license = "GPL-3.0-or-later" @@ -15,9 +15,10 @@ homepage = "https://komo.do" [workspace.dependencies] # LOCAL -# komodo_client = "1.14.2" +# komodo_client = "1.14.3" komodo_client = { path = "client/core/rs" } periphery_client = { path = "client/periphery/rs" } +environment_file = { path = "lib/environment_file" } formatting = { path = "lib/formatting" } command = { path = "lib/command" } logger = { path = "lib/logger" } @@ -26,7 +27,7 @@ git = { path = "lib/git" } # MOGH run_command = { version = "0.0.6", features = ["async_tokio"] } serror = { version = "0.4.6", default-features = false } -slack = { version = "0.1.0", package = "slack_client_rs" } +slack = { version = "0.2.0", package = "slack_client_rs" } derive_default_builder = "0.1.8" derive_empty_traits = "0.1.0" merge_config_files = "0.1.5" @@ -40,46 +41,48 @@ mungos = "1.1.0" svi = "1.0.1" # ASYNC -tokio = { version = "1.40.0", features = ["full"] } -reqwest = { version = "0.12.7", features = ["json"] } -tokio-util = "0.7.11" +reqwest = { version = "0.12.8", features = ["json"] } +tokio = { version = "1.38.1", features = ["full"] } +tokio-util = "0.7.12" futures = "0.3.30" futures-util = "0.3.30" # SERVER -axum = { version = "0.7.5", features = ["ws", "json"] } -axum-extra = { version = "0.9.3", features = ["typed-header"] } -tower-http = { version = "0.5.2", features = ["fs", "cors"] } -tokio-tungstenite = "0.23.1" +axum-extra = { version = "0.9.4", features = ["typed-header"] } +tower-http = { version = "0.6.1", features = ["fs", "cors"] } +axum-server = { version = "0.7.1", features = ["tls-openssl"] } +axum = { version = "0.7.7", features = ["ws", "json"] } +tokio-tungstenite = "0.24.0" # SER/DE ordered_hash_map = { version = "0.4.0", features = ["serde"] } -serde = { version = "1.0.209", features = ["derive"] } +serde = { version = "1.0.210", features = ["derive"] } strum = { version = "0.26.3", features = ["derive"] } -serde_json = "1.0.127" +serde_json = "1.0.128" serde_yaml = "0.9.34" toml = "0.8.19" # ERROR -anyhow = "1.0.86" -thiserror = "1.0.63" +anyhow = "1.0.89" +thiserror = "1.0.64" # LOGGING -opentelemetry_sdk = { version = "0.24.1", features = ["rt-tokio"] } +opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio"] } tracing-subscriber = { version = "0.3.18", features = ["json"] } -opentelemetry-semantic-conventions = "0.16.0" -tracing-opentelemetry = "0.25.0" -opentelemetry-otlp = "0.17.0" -opentelemetry = "0.24.0" +opentelemetry-semantic-conventions = "0.25.0" +tracing-opentelemetry = "0.26.0" +opentelemetry-otlp = "0.25.0" +opentelemetry = "0.25.0" tracing = "0.1.40" # CONFIG -clap = { version = "4.5.16", features = ["derive"] } +clap = { version = "4.5.19", features = ["derive"] } dotenvy = "0.15.7" envy = "0.4.2" -# CRYPTO +# CRYPTO / AUTH uuid = { version = "1.10.0", features = ["v4", "fast-rng", "serde"] } +openidconnect = "3.5.0" urlencoding = "2.1.3" nom_pem = "4.0.0" bcrypt = "0.15.1" @@ -95,15 +98,14 @@ bollard = "0.17.1" sysinfo = "0.31.4" # CLOUD -aws-config = "1.5.5" -aws-sdk-ec2 = "1.70.0" -aws-sdk-ecr = "1.42.0" +aws-config = "1.5.7" +aws-sdk-ec2 = "1.75.0" # MISC derive_builder = "0.20.1" typeshare = "1.0.3" octorust = "0.7.0" +dashmap = "6.1.0" colored = "2.1.0" -regex = "1.10.6" -bson = "2.11.0" - +regex = "1.11.0" +bson = "2.13.0" \ No newline at end of file diff --git a/bin/core/Cargo.toml b/bin/core/Cargo.toml index 58f004828..919609fd6 100644 --- a/bin/core/Cargo.toml +++ b/bin/core/Cargo.toml @@ -17,6 +17,7 @@ path = "src/main.rs" # local komodo_client = { workspace = true, features = ["mongo"] } periphery_client.workspace = true +environment_file.workspace = true formatting.workspace = true logger.workspace = true git.workspace = true @@ -29,15 +30,15 @@ derive_variants.workspace = true mongo_indexed.workspace = true resolver_api.workspace = true toml_pretty.workspace = true -run_command.workspace = true mungos.workspace = true slack.workspace = true svi.workspace = true # external +axum-server.workspace = true ordered_hash_map.workspace = true +openidconnect.workspace = true urlencoding.workspace = true aws-sdk-ec2.workspace = true -aws-sdk-ecr.workspace = true aws-config.workspace = true tokio-util.workspace = true axum-extra.workspace = true @@ -46,6 +47,7 @@ serde_json.workspace = true serde_yaml.workspace = true typeshare.workspace = true octorust.workspace = true +dashmap.workspace = true tracing.workspace = true reqwest.workspace = true futures.workspace = true diff --git a/bin/core/alpine.Dockerfile b/bin/core/alpine.Dockerfile new file mode 100644 index 000000000..aa8dc7ad8 --- /dev/null +++ b/bin/core/alpine.Dockerfile @@ -0,0 +1,45 @@ +## This one produces smaller images, +## but alpine uses `musl` instead of `glibc`. +## This makes it take longer / more resources to build, +## and may negatively affect runtime performance. + +# Build Core +FROM rust:1.81.0-alpine AS core-builder +WORKDIR /builder +RUN apk update && apk --no-cache add musl-dev openssl-dev openssl-libs-static +COPY . . +RUN cargo build -p komodo_core --release + +# Build Frontend +FROM node:20.12-alpine AS frontend-builder +WORKDIR /builder +COPY ./frontend ./frontend +COPY ./client/core/ts ./client +RUN cd client && yarn && yarn build && yarn link +RUN cd frontend && yarn link @komodo/client && yarn && yarn build + +# Final Image +FROM alpine:3.20 + +# Install Deps +RUN apk update && apk add --no-cache --virtual .build-deps \ + openssl ca-certificates git git-lfs + +# Setup an application directory +WORKDIR /app + +# Copy +COPY ./config/core.config.toml /config/config.toml +COPY --from=core-builder /builder/target/release/core /app +COPY --from=frontend-builder /builder/frontend/dist /app/frontend + +# Hint at the port +EXPOSE 9120 + +# Label for Ghcr +LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo +LABEL org.opencontainers.image.description="Komodo Core" +LABEL org.opencontainers.image.licenses=GPL-3.0 + +# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose. +ENTRYPOINT [ "/app/core" ] \ No newline at end of file diff --git a/bin/core/Dockerfile b/bin/core/debian.Dockerfile similarity index 52% rename from bin/core/Dockerfile rename to bin/core/debian.Dockerfile index ccbbbae7c..6de909eda 100644 --- a/bin/core/Dockerfile +++ b/bin/core/debian.Dockerfile @@ -1,5 +1,5 @@ # Build Core -FROM rust:1.80.1-bookworm AS core-builder +FROM rust:1.81.0-bullseye AS core-builder WORKDIR /builder COPY . . RUN cargo build -p komodo_core --release @@ -13,27 +13,27 @@ RUN cd client && yarn && yarn build && yarn link RUN cd frontend && yarn link @komodo/client && yarn && yarn build # Final Image -FROM debian:bookworm-slim +FROM debian:bullseye-slim # Install Deps -RUN apt update && apt install -y git curl unzip ca-certificates && \ - curl -SL https://github.com/docker/compose/releases/download/v2.29.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose && \ - chmod +x /usr/local/bin/docker-compose && \ - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ - unzip awscliv2.zip && \ - ./aws/install +RUN apt update && \ + apt install -y git ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +# Setup an application directory +WORKDIR /app # Copy COPY ./config/core.config.toml /config/config.toml -COPY --from=core-builder /builder/target/release/core / -COPY --from=frontend-builder /builder/frontend/dist /frontend +COPY --from=core-builder /builder/target/release/core /app +COPY --from=frontend-builder /builder/frontend/dist /app/frontend # Hint at the port -EXPOSE 9000 +EXPOSE 9120 # Label for Ghcr LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo LABEL org.opencontainers.image.description="Komodo Core" LABEL org.opencontainers.image.licenses=GPL-3.0 -CMD ["./core"] \ No newline at end of file +ENTRYPOINT [ "/app/core" ] \ No newline at end of file diff --git a/bin/core/src/alert/discord.rs b/bin/core/src/alert/discord.rs new file mode 100644 index 000000000..23eca5272 --- /dev/null +++ b/bin/core/src/alert/discord.rs @@ -0,0 +1,169 @@ +use std::sync::OnceLock; + +use serde::Serialize; + +use super::*; + +#[instrument(level = "debug")] +pub async fn send_alert( + url: &str, + alert: &Alert, +) -> anyhow::Result<()> { + let level = fmt_level(alert.level); + let content = match &alert.data { + AlertData::ServerUnreachable { + id, + name, + region, + err, + } => { + let region = fmt_region(region); + let link = resource_link(ResourceTargetVariant::Server, id); + match alert.level { + SeverityLevel::Ok => { + format!( + "{level} | *{name}*{region} is now *reachable*\n{link}" + ) + } + SeverityLevel::Critical => { + let err = err + .as_ref() + .map(|e| format!("\n**error**: {e:#?}")) + .unwrap_or_default(); + format!( + "{level} | *{name}*{region} is *unreachable* ❌\n{link}{err}" + ) + } + _ => unreachable!(), + } + } + AlertData::ServerCpu { + id, + name, + region, + percentage, + } => { + let region = fmt_region(region); + let link = resource_link(ResourceTargetVariant::Server, id); + format!( + "{level} | *{name}*{region} cpu usage at *{percentage:.1}%*\n{link}" + ) + } + AlertData::ServerMem { + id, + name, + region, + used_gb, + total_gb, + } => { + let region = fmt_region(region); + let link = resource_link(ResourceTargetVariant::Server, id); + let percentage = 100.0 * used_gb / total_gb; + format!( + "{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾\n\nUsing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}" + ) + } + AlertData::ServerDisk { + id, + name, + region, + path, + used_gb, + total_gb, + } => { + let region = fmt_region(region); + let link = resource_link(ResourceTargetVariant::Server, id); + let percentage = 100.0 * used_gb / total_gb; + format!( + "{level} | *{name}*{region} disk usage at *{percentage:.1}%* 💿\nmount point: `{path:?}`\nusing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}" + ) + } + AlertData::ContainerStateChange { + id, + name, + server_id: _server_id, + server_name, + from, + to, + } => { + let link = resource_link(ResourceTargetVariant::Deployment, id); + let to = fmt_docker_container_state(to); + format!("📦 Deployment *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}") + } + AlertData::StackStateChange { + id, + name, + server_id: _server_id, + server_name, + from, + to, + } => { + let link = resource_link(ResourceTargetVariant::Stack, id); + let to = fmt_stack_state(to); + format!("🥞 Stack *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}") + } + AlertData::AwsBuilderTerminationFailed { + instance_id, + message, + } => { + format!("{level} | Failed to terminated AWS builder instance\ninstance id: *{instance_id}*\n{message}") + } + AlertData::ResourceSyncPendingUpdates { id, name } => { + let link = + resource_link(ResourceTargetVariant::ResourceSync, id); + format!( + "{level} | Pending resource sync updates on *{name}*\n{link}" + ) + } + AlertData::BuildFailed { id, name, version } => { + let link = resource_link(ResourceTargetVariant::Build, id); + format!("{level} | Build *{name}* failed\nversion: v{version}\n{link}") + } + AlertData::RepoBuildFailed { id, name } => { + let link = resource_link(ResourceTargetVariant::Repo, id); + format!("{level} | Repo build for *{name}* failed\n{link}") + } + AlertData::None {} => Default::default(), + }; + if !content.is_empty() { + send_message(url, &content).await?; + } + Ok(()) +} + +async fn send_message( + url: &str, + content: &str, +) -> anyhow::Result<()> { + let body = DiscordMessageBody { content }; + + let response = http_client() + .post(url) + .json(&body) + .send() + .await + .context("Failed to send message")?; + + let status = response.status(); + + if status.is_success() { + Ok(()) + } else { + let text = response.text().await.with_context(|| { + format!("Failed to send message to Discord | {status} | failed to get response text") + })?; + Err(anyhow::anyhow!( + "Failed to send message to Discord | {status} | {text}" + )) + } +} + +fn http_client() -> &'static reqwest::Client { + static CLIENT: OnceLock = OnceLock::new(); + CLIENT.get_or_init(reqwest::Client::new) +} + +#[derive(Serialize)] +struct DiscordMessageBody<'a> { + content: &'a str, +} diff --git a/bin/core/src/alert/mod.rs b/bin/core/src/alert/mod.rs new file mode 100644 index 000000000..11a1ecaef --- /dev/null +++ b/bin/core/src/alert/mod.rs @@ -0,0 +1,207 @@ +use ::slack::types::Block; +use anyhow::{anyhow, Context}; +use derive_variants::ExtractVariant; +use futures::future::join_all; +use komodo_client::entities::{ + alert::{Alert, AlertData, SeverityLevel}, + alerter::*, + deployment::DeploymentState, + stack::StackState, + ResourceTargetVariant, +}; +use mungos::{find::find_collect, mongodb::bson::doc}; + +use crate::{config::core_config, state::db_client}; + +mod discord; +mod slack; + +#[instrument] +pub async fn send_alerts(alerts: &[Alert]) { + if alerts.is_empty() { + return; + } + + let Ok(alerters) = find_collect( + &db_client().alerters, + doc! { "config.enabled": true }, + None, + ) + .await + .inspect_err(|e| { + error!( + "ERROR sending alerts | failed to get alerters from db | {e:#}" + ) + }) else { + return; + }; + + let handles = + alerts.iter().map(|alert| send_alert(&alerters, alert)); + + join_all(handles).await; +} + +#[instrument(level = "debug")] +async fn send_alert(alerters: &[Alerter], alert: &Alert) { + if alerters.is_empty() { + return; + } + + let alert_type = alert.data.extract_variant(); + + let handles = alerters.iter().map(|alerter| async { + // Don't send if not enabled + if !alerter.config.enabled { + return Ok(()); + } + + // Don't send if alert type not configured on the alerter + if !alerter.config.alert_types.is_empty() + && !alerter.config.alert_types.contains(&alert_type) + { + return Ok(()); + } + + // Don't send if resource is in the blacklist + if alerter.config.except_resources.contains(&alert.target) { + return Ok(()); + } + + // Don't send if whitelist configured and target is not included + if !alerter.config.resources.is_empty() + && !alerter.config.resources.contains(&alert.target) + { + return Ok(()); + } + + match &alerter.config.endpoint { + AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => { + send_custom_alert(url, alert).await.with_context(|| { + format!( + "failed to send alert to custom alerter {}", + alerter.name + ) + }) + } + AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => { + slack::send_alert(url, alert).await.with_context(|| { + format!( + "failed to send alert to slack alerter {}", + alerter.name + ) + }) + } + AlerterEndpoint::Discord(DiscordAlerterEndpoint { url }) => { + discord::send_alert(url, alert).await.with_context(|| { + format!( + "failed to send alert to Discord alerter {}", + alerter.name + ) + }) + } + } + }); + + join_all(handles) + .await + .into_iter() + .filter_map(|res| res.err()) + .for_each(|e| error!("{e:#}")); +} + +#[instrument(level = "debug")] +async fn send_custom_alert( + url: &str, + alert: &Alert, +) -> anyhow::Result<()> { + let res = reqwest::Client::new() + .post(url) + .json(alert) + .send() + .await + .context("failed at post request to alerter")?; + let status = res.status(); + if !status.is_success() { + let text = res + .text() + .await + .context("failed to get response text on alerter response")?; + return Err(anyhow!( + "post to alerter failed | {status} | {text}" + )); + } + Ok(()) +} + +fn fmt_region(region: &Option) -> String { + match region { + Some(region) => format!(" ({region})"), + None => String::new(), + } +} + +fn fmt_docker_container_state(state: &DeploymentState) -> String { + match state { + DeploymentState::Running => String::from("Running ▶️"), + DeploymentState::Exited => String::from("Exited 🛑"), + DeploymentState::Restarting => String::from("Restarting 🔄"), + DeploymentState::NotDeployed => String::from("Not Deployed"), + _ => state.to_string(), + } +} + +fn fmt_stack_state(state: &StackState) -> String { + match state { + StackState::Running => String::from("Running ▶️"), + StackState::Stopped => String::from("Stopped 🛑"), + StackState::Restarting => String::from("Restarting 🔄"), + StackState::Down => String::from("Down ⬇️"), + _ => state.to_string(), + } +} + +fn fmt_level(level: SeverityLevel) -> &'static str { + match level { + SeverityLevel::Critical => "CRITICAL 🚨", + SeverityLevel::Warning => "WARNING ‼️", + SeverityLevel::Ok => "OK ✅", + } +} + +fn resource_link( + resource_type: ResourceTargetVariant, + id: &str, +) -> String { + let path = match resource_type { + ResourceTargetVariant::System => unreachable!(), + ResourceTargetVariant::Build => format!("/builds/{id}"), + ResourceTargetVariant::Builder => { + format!("/builders/{id}") + } + ResourceTargetVariant::Deployment => { + format!("/deployments/{id}") + } + ResourceTargetVariant::Stack => { + format!("/stacks/{id}") + } + ResourceTargetVariant::Server => { + format!("/servers/{id}") + } + ResourceTargetVariant::Repo => format!("/repos/{id}"), + ResourceTargetVariant::Alerter => { + format!("/alerters/{id}") + } + ResourceTargetVariant::Procedure => { + format!("/procedures/{id}") + } + ResourceTargetVariant::ServerTemplate => { + format!("/server-templates/{id}") + } + ResourceTargetVariant::ResourceSync => { + format!("/resource-syncs/{id}") + } + }; + + format!("{}{path}", core_config().host) +} diff --git a/bin/core/src/helpers/alert.rs b/bin/core/src/alert/slack.rs similarity index 61% rename from bin/core/src/helpers/alert.rs rename to bin/core/src/alert/slack.rs index 4e52758dc..a8071a841 100644 --- a/bin/core/src/helpers/alert.rs +++ b/bin/core/src/alert/slack.rs @@ -1,130 +1,7 @@ -use anyhow::{anyhow, Context}; -use derive_variants::ExtractVariant; -use futures::future::join_all; -use komodo_client::entities::{ - alert::{Alert, AlertData, SeverityLevel}, - alerter::*, - deployment::DeploymentState, - stack::StackState, - ResourceTargetVariant, -}; -use mungos::{find::find_collect, mongodb::bson::doc}; -use slack::types::Block; - -use crate::{config::core_config, state::db_client}; - -#[instrument] -pub async fn send_alerts(alerts: &[Alert]) { - if alerts.is_empty() { - return; - } - - let Ok(alerters) = find_collect( - &db_client().await.alerters, - doc! { "config.enabled": true }, - None, - ) - .await - .inspect_err(|e| { - error!( - "ERROR sending alerts | failed to get alerters from db | {e:#}" - ) - }) else { - return; - }; - - let handles = - alerts.iter().map(|alert| send_alert(&alerters, alert)); - - join_all(handles).await; -} +use super::*; #[instrument(level = "debug")] -async fn send_alert(alerters: &[Alerter], alert: &Alert) { - if alerters.is_empty() { - return; - } - - let alert_type = alert.data.extract_variant(); - - let handles = alerters.iter().map(|alerter| async { - // Don't send if not enabled - if !alerter.config.enabled { - return Ok(()); - } - - // Don't send if alert type not configured on the alerter - if !alerter.config.alert_types.is_empty() - && !alerter.config.alert_types.contains(&alert_type) - { - return Ok(()); - } - - // Don't send if resource is in the blacklist - if alerter.config.except_resources.contains(&alert.target) { - return Ok(()); - } - - // Don't send if whitelist configured and target is not included - if !alerter.config.resources.is_empty() - && !alerter.config.resources.contains(&alert.target) - { - return Ok(()); - } - - match &alerter.config.endpoint { - AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => { - send_slack_alert(url, alert).await.with_context(|| { - format!( - "failed to send alert to slack alerter {}", - alerter.name - ) - }) - } - AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => { - send_custom_alert(url, alert).await.with_context(|| { - format!( - "failed to send alert to custom alerter {}", - alerter.name - ) - }) - } - } - }); - - join_all(handles) - .await - .into_iter() - .filter_map(|res| res.err()) - .for_each(|e| error!("{e:#}")); -} - -#[instrument(level = "debug")] -async fn send_custom_alert( - url: &str, - alert: &Alert, -) -> anyhow::Result<()> { - let res = reqwest::Client::new() - .post(url) - .json(alert) - .send() - .await - .context("failed at post request to alerter")?; - let status = res.status(); - if !status.is_success() { - let text = res - .text() - .await - .context("failed to get response text on alerter response")?; - return Err(anyhow!( - "post to alerter failed | {status} | {text}" - )); - } - Ok(()) -} - -#[instrument(level = "debug")] -async fn send_slack_alert( +pub async fn send_alert( url: &str, alert: &Alert, ) -> anyhow::Result<()> { @@ -399,80 +276,8 @@ async fn send_slack_alert( AlertData::None {} => Default::default(), }; if !text.is_empty() { - let slack = slack::Client::new(url); + let slack = ::slack::Client::new(url); slack.send_message(text, blocks).await?; } Ok(()) } - -fn fmt_region(region: &Option) -> String { - match region { - Some(region) => format!(" ({region})"), - None => String::new(), - } -} - -fn fmt_docker_container_state(state: &DeploymentState) -> String { - match state { - DeploymentState::Running => String::from("Running ▶️"), - DeploymentState::Exited => String::from("Exited 🛑"), - DeploymentState::Restarting => String::from("Restarting 🔄"), - DeploymentState::NotDeployed => String::from("Not Deployed"), - _ => state.to_string(), - } -} - -fn fmt_stack_state(state: &StackState) -> String { - match state { - StackState::Running => String::from("Running ▶️"), - StackState::Stopped => String::from("Stopped 🛑"), - StackState::Restarting => String::from("Restarting 🔄"), - StackState::Down => String::from("Down ⬇️"), - _ => state.to_string(), - } -} - -fn fmt_level(level: SeverityLevel) -> &'static str { - match level { - SeverityLevel::Critical => "CRITICAL 🚨", - SeverityLevel::Warning => "WARNING ‼️", - SeverityLevel::Ok => "OK ✅", - } -} - -fn resource_link( - resource_type: ResourceTargetVariant, - id: &str, -) -> String { - let path = match resource_type { - ResourceTargetVariant::System => unreachable!(), - ResourceTargetVariant::Build => format!("/builds/{id}"), - ResourceTargetVariant::Builder => { - format!("/builders/{id}") - } - ResourceTargetVariant::Deployment => { - format!("/deployments/{id}") - } - ResourceTargetVariant::Stack => { - format!("/stacks/{id}") - } - ResourceTargetVariant::Server => { - format!("/servers/{id}") - } - ResourceTargetVariant::Repo => format!("/repos/{id}"), - ResourceTargetVariant::Alerter => { - format!("/alerters/{id}") - } - ResourceTargetVariant::Procedure => { - format!("/procedures/{id}") - } - ResourceTargetVariant::ServerTemplate => { - format!("/server-templates/{id}") - } - ResourceTargetVariant::ResourceSync => { - format!("/resource-syncs/{id}") - } - }; - - format!("{}{path}", core_config().host) -} diff --git a/bin/core/src/api/auth.rs b/bin/core/src/api/auth.rs index 50f9c89ba..91ca0ef4e 100644 --- a/bin/core/src/api/auth.rs +++ b/bin/core/src/api/auth.rs @@ -16,6 +16,7 @@ use crate::{ get_user_id_from_headers, github::{self, client::github_oauth_client}, google::{self, client::google_oauth_client}, + oidc, }, config::core_config, helpers::query::get_user, @@ -39,14 +40,25 @@ pub enum AuthRequest { pub fn router() -> Router { let mut router = Router::new().route("/", post(handler)); + if core_config().local_auth { + info!("🔑 Local Login Enabled"); + } + if github_oauth_client().is_some() { + info!("🔑 Github Login Enabled"); router = router.nest("/github", github::router()) } if google_oauth_client().is_some() { + info!("🔑 Github Login Enabled"); router = router.nest("/google", google::router()) } + if core_config().oidc_enabled { + info!("🔑 OIDC Login Enabled"); + router = router.nest("/oidc", oidc::router()) + } + router } @@ -91,6 +103,10 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse { google: config.google_oauth.enabled && !config.google_oauth.id.is_empty() && !config.google_oauth.secret.is_empty(), + oidc: config.oidc_enabled + && !config.oidc_provider.is_empty() + && !config.oidc_client_id.is_empty() + && !config.oidc_client_secret.is_empty(), registration_disabled: config.disable_user_registration, } }) diff --git a/bin/core/src/api/execute/build.rs b/bin/core/src/api/execute/build.rs index f478e2889..9ece875a9 100644 --- a/bin/core/src/api/execute/build.rs +++ b/bin/core/src/api/execute/build.rs @@ -8,13 +8,11 @@ use komodo_client::{ entities::{ alert::{Alert, AlertData, SeverityLevel}, all_logs_success, - build::{Build, ImageRegistry, StandardRegistryConfig}, + build::{Build, BuildConfig, ImageRegistryConfig}, builder::{Builder, BuilderConfig}, - config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials}, deployment::DeploymentState, komodo_timestamp, permission::PermissionLevel, - to_komodo_name, update::{Log, Update}, user::{auto_redeploy_user, User}, }, @@ -32,17 +30,15 @@ use resolver_api::Resolve; use tokio_util::sync::CancellationToken; use crate::{ - cloud::aws::ecr, - config::core_config, + alert::send_alerts, helpers::{ - alert::send_alerts, builder::{cleanup_builder_instance, get_builder_periphery}, channel::build_cancel_channel, git_token, interpolate::{ add_interp_update_log, - interpolate_variables_secrets_into_environment, interpolate_variables_secrets_into_extra_args, + interpolate_variables_secrets_into_string, interpolate_variables_secrets_into_system_command, }, query::{get_deployment_state, get_variables_and_secrets}, @@ -99,8 +95,8 @@ impl Resolve for State { || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account), )?; - let (registry_token, aws_ecr) = - validate_account_extract_registry_token_aws_ecr(&build).await?; + let registry_token = + validate_account_extract_registry_token(&build).await?; let cancel = CancellationToken::new(); let cancel_clone = cancel.clone(); @@ -245,14 +241,14 @@ impl Resolve for State { let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); - interpolate_variables_secrets_into_environment( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut build.config.build_args, &mut global_replacers, &mut secret_replacers, )?; - interpolate_variables_secrets_into_environment( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut build.config.secret_args, &mut global_replacers, @@ -282,7 +278,6 @@ impl Resolve for State { .request(api::build::Build { build: build.clone(), registry_token, - aws_ecr, replacers: secret_replacers.into_iter().collect(), // Push a commit hash tagged image additional_tags: if update.commit_hash.is_empty() { @@ -317,7 +312,7 @@ impl Resolve for State { update.finalize(); - let db = db_client().await; + let db = db_client(); if update.success { let _ = db @@ -403,7 +398,7 @@ async fn handle_early_return( // but will fail to update cache in that case. if let Ok(update_doc) = to_document(&update) { let _ = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update.id, mungos::update::Update::Set(update_doc), None, @@ -443,7 +438,7 @@ pub async fn validate_cancel_build( if let ExecuteRequest::CancelBuild(req) = request { let build = resource::get::(&req.build).await?; - let db = db_client().await; + let db = db_client(); let (latest_build, latest_cancel) = tokio::try_join!( db.updates @@ -526,7 +521,7 @@ impl Resolve for State { tokio::spawn(async move { tokio::time::sleep(Duration::from_secs(60)).await; if let Err(e) = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update_id, doc! { "$set": { "status": "Complete" } }, None, @@ -544,7 +539,7 @@ impl Resolve for State { #[instrument] async fn handle_post_build_redeploy(build_id: &str) { let Ok(redeploy_deployments) = find_collect( - &db_client().await.deployments, + &db_client().deployments, doc! { "config.image.params.build_id": build_id, "config.redeploy_on_build": true @@ -600,56 +595,24 @@ async fn handle_post_build_redeploy(build_id: &str) { } /// This will make sure that a build with non-none image registry has an account attached, -/// and will check the core config for a token / aws ecr config matching requirements. +/// and will check the core config for a token matching requirements. /// Otherwise it is left to periphery. -async fn validate_account_extract_registry_token_aws_ecr( - build: &Build, -) -> anyhow::Result<(Option, Option)> { - let (domain, account) = match &build.config.image_registry { - // Early return for None - ImageRegistry::None(_) => return Ok((None, None)), - // Early return for AwsEcr - ImageRegistry::AwsEcr(label) => { - // Note that aws ecr config still only lives in config file - let config = core_config() - .aws_ecr_registries - .iter() - .find(|reg| ®.label == label); - let token = match config { - Some(AwsEcrConfigWithCredentials { - region, - access_key_id, - secret_access_key, - .. - }) => { - let token = ecr::get_ecr_token( - region, - access_key_id, - secret_access_key, - ) - .await - .context("failed to get aws ecr token")?; - ecr::maybe_create_repo( - &to_komodo_name(&build.name), - region.to_string(), - access_key_id, - secret_access_key, - ) - .await - .context("failed to create aws ecr repo")?; - Some(token) - } - None => None, - }; - return Ok((token, config.map(AwsEcrConfig::from))); - } - ImageRegistry::Standard(StandardRegistryConfig { - domain, - account, - .. - }) => (domain.as_str(), account), - }; - +async fn validate_account_extract_registry_token( + Build { + config: + BuildConfig { + image_registry: + ImageRegistryConfig { + domain, account, .. + }, + .. + }, + .. + }: &Build, +) -> anyhow::Result> { + if domain.is_empty() { + return Ok(None); + } if account.is_empty() { return Err(anyhow!( "Must attach account to use registry provider {domain}" @@ -660,5 +623,5 @@ async fn validate_account_extract_registry_token_aws_ecr( || format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"), )?; - Ok((registry_token, None)) + Ok(registry_token) } diff --git a/bin/core/src/api/execute/deployment.rs b/bin/core/src/api/execute/deployment.rs index 23caa87ea..4e8193677 100644 --- a/bin/core/src/api/execute/deployment.rs +++ b/bin/core/src/api/execute/deployment.rs @@ -5,8 +5,7 @@ use formatting::format_serror; use komodo_client::{ api::execute::*, entities::{ - build::{Build, ImageRegistry}, - config::core::AwsEcrConfig, + build::{Build, ImageRegistryConfig}, deployment::{ extract_registry_domain, Deployment, DeploymentImage, }, @@ -22,14 +21,11 @@ use periphery_client::api; use resolver_api::Resolve; use crate::{ - cloud::aws::ecr, - config::core_config, helpers::{ interpolate::{ add_interp_update_log, - interpolate_variables_secrets_into_container_command, - interpolate_variables_secrets_into_environment, interpolate_variables_secrets_into_extra_args, + interpolate_variables_secrets_into_string, }, periphery_client, query::get_variables_and_secrets, @@ -98,20 +94,11 @@ impl Resolve for State { .context("Failed server health check, stopping run.")?; // This block resolves the attached Build to an actual versioned image - let (version, registry_token, aws_ecr) = match &deployment - .config - .image - { + let (version, registry_token) = match &deployment.config.image { DeploymentImage::Build { build_id, version } => { let build = resource::get::(build_id).await?; - let image_name = get_image_name(&build, |label| { - core_config() - .aws_ecr_registries - .iter() - .find(|reg| ®.label == label) - .map(AwsEcrConfig::from) - }) - .context("failed to create image name")?; + let image_name = get_image_name(&build) + .context("failed to create image name")?; let version = if version.is_none() { build.config.version } else { @@ -133,45 +120,27 @@ impl Resolve for State { deployment.config.image = DeploymentImage::Image { image: format!("{image_name}:{version_str}"), }; - match build.config.image_registry { - ImageRegistry::None(_) => (version, None, None), - ImageRegistry::AwsEcr(label) => { - let config = core_config() - .aws_ecr_registries - .iter() - .find(|reg| reg.label == label) - .with_context(|| { - format!( - "did not find config for aws ecr registry {label}" - ) - })?; - let token = ecr::get_ecr_token( - &config.region, - &config.access_key_id, - &config.secret_access_key, - ) - .await - .context("failed to create aws ecr login token")?; - (version, Some(token), Some(AwsEcrConfig::from(config))) - } - ImageRegistry::Standard(params) => { - if deployment.config.image_registry_account.is_empty() { - deployment.config.image_registry_account = - params.account - } - let token = if !deployment - .config - .image_registry_account - .is_empty() - { - registry_token(¶ms.domain, &deployment.config.image_registry_account).await.with_context( - || format!("Failed to get git token in call to db. Stopping run. | {} | {}", params.domain, deployment.config.image_registry_account), - )? - } else { - None - }; - (version, token, None) + if build.config.image_registry.domain.is_empty() { + (version, None) + } else { + let ImageRegistryConfig { + domain, account, .. + } = build.config.image_registry; + if deployment.config.image_registry_account.is_empty() { + deployment.config.image_registry_account = account } + let token = if !deployment + .config + .image_registry_account + .is_empty() + { + registry_token(&domain, &deployment.config.image_registry_account).await.with_context( + || format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account), + )? + } else { + None + }; + (version, token) } } DeploymentImage::Image { image } => { @@ -187,7 +156,7 @@ impl Resolve for State { } else { None }; - (Version::default(), token, None) + (Version::default(), token) } }; @@ -199,13 +168,27 @@ impl Resolve for State { let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); - interpolate_variables_secrets_into_environment( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut deployment.config.environment, &mut global_replacers, &mut secret_replacers, )?; + interpolate_variables_secrets_into_string( + &vars_and_secrets, + &mut deployment.config.ports, + &mut global_replacers, + &mut secret_replacers, + )?; + + interpolate_variables_secrets_into_string( + &vars_and_secrets, + &mut deployment.config.volumes, + &mut global_replacers, + &mut secret_replacers, + )?; + interpolate_variables_secrets_into_extra_args( &vars_and_secrets, &mut deployment.config.extra_args, @@ -213,7 +196,7 @@ impl Resolve for State { &mut secret_replacers, )?; - interpolate_variables_secrets_into_container_command( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut deployment.config.command, &mut global_replacers, @@ -240,7 +223,6 @@ impl Resolve for State { stop_signal, stop_time, registry_token, - aws_ecr, replacers: secret_replacers.into_iter().collect(), }) .await diff --git a/bin/core/src/api/execute/mod.rs b/bin/core/src/api/execute/mod.rs index 5fcf50095..d319b78ab 100644 --- a/bin/core/src/api/execute/mod.rs +++ b/bin/core/src/api/execute/mod.rs @@ -135,7 +135,7 @@ async fn handler( }; let res = async { let mut update = - find_one_by_id(&db_client().await.updates, &update_id) + find_one_by_id(&db_client().updates, &update_id) .await .context("failed to query to db")? .context("no update exists with given id")?; diff --git a/bin/core/src/api/execute/procedure.rs b/bin/core/src/api/execute/procedure.rs index 075924c2e..5019aba78 100644 --- a/bin/core/src/api/execute/procedure.rs +++ b/bin/core/src/api/execute/procedure.rs @@ -50,7 +50,7 @@ fn resolve_inner( // assumes first log is already created // and will panic otherwise. update.push_simple_log( - "execute_procedure", + "Execute procedure", format!( "{}: executing procedure '{}'", muted("INFO"), @@ -80,9 +80,9 @@ fn resolve_inner( match res { Ok(_) => { update.push_simple_log( - "execution ok", + "Execution ok", format!( - "{}: the procedure has {} with no errors", + "{}: The procedure has {} with no errors", muted("INFO"), colored("completed", Color::Green) ), @@ -100,7 +100,7 @@ fn resolve_inner( // but will fail to update cache in that case. if let Ok(update_doc) = to_document(&update) { let _ = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update.id, mungos::update::Update::Set(update_doc), None, diff --git a/bin/core/src/api/execute/repo.rs b/bin/core/src/api/execute/repo.rs index 054977f1b..03e150612 100644 --- a/bin/core/src/api/execute/repo.rs +++ b/bin/core/src/api/execute/repo.rs @@ -7,7 +7,7 @@ use komodo_client::{ entities::{ alert::{Alert, AlertData, SeverityLevel}, builder::{Builder, BuilderConfig}, - komodo_timestamp, optional_string, + komodo_timestamp, permission::PermissionLevel, repo::Repo, server::Server, @@ -27,14 +27,14 @@ use resolver_api::Resolve; use tokio_util::sync::CancellationToken; use crate::{ + alert::send_alerts, helpers::{ - alert::send_alerts, builder::{cleanup_builder_instance, get_builder_periphery}, channel::repo_cancel_channel, git_token, interpolate::{ add_interp_update_log, - interpolate_variables_secrets_into_environment, + interpolate_variables_secrets_into_string, interpolate_variables_secrets_into_system_command, }, periphery_client, @@ -72,6 +72,10 @@ impl Resolve for State { update_update(update.clone()).await?; + if repo.config.server_id.is_empty() { + return Err(anyhow!("repo has no server attached")); + } + let git_token = git_token( &repo.config.git_provider, &repo.config.git_account, @@ -82,10 +86,6 @@ impl Resolve for State { || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account), )?; - if repo.config.server_id.is_empty() { - return Err(anyhow!("repo has no server attached")); - } - let server = resource::get::(&repo.config.server_id).await?; @@ -100,7 +100,7 @@ impl Resolve for State { .request(api::git::CloneRepo { args: (&repo).into(), git_token, - environment: repo.config.environment, + environment: repo.config.env_vars()?, env_file_path: repo.config.env_file_path, skip_secret_interp: repo.config.skip_secret_interp, replacers: secret_replacers.into_iter().collect(), @@ -156,6 +156,16 @@ impl Resolve for State { return Err(anyhow!("repo has no server attached")); } + let git_token = git_token( + &repo.config.git_provider, + &repo.config.git_account, + |https| repo.config.git_https = https, + ) + .await + .with_context( + || format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account), + )?; + let server = resource::get::(&repo.config.server_id).await?; @@ -168,12 +178,9 @@ impl Resolve for State { let logs = match periphery .request(api::git::PullRepo { - name: repo.name.clone(), - branch: optional_string(&repo.config.branch), - commit: optional_string(&repo.config.commit), - path: optional_string(&repo.config.path), - on_pull: repo.config.on_pull.into_option(), - environment: repo.config.environment, + args: (&repo).into(), + git_token, + environment: repo.config.env_vars()?, env_file_path: repo.config.env_file_path, skip_secret_interp: repo.config.skip_secret_interp, replacers: secret_replacers.into_iter().collect(), @@ -214,7 +221,7 @@ async fn handle_server_update_return( // but will fail to update cache in that case. if let Ok(update_doc) = to_document(&update) { let _ = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update.id, mungos::update::Update::Set(update_doc), None, @@ -229,7 +236,6 @@ async fn handle_server_update_return( #[instrument] async fn update_last_pulled_time(repo_name: &str) { let res = db_client() - .await .repos .update_one( doc! { "name": repo_name }, @@ -363,7 +369,7 @@ impl Resolve for State { .request(api::git::CloneRepo { args: (&repo).into(), git_token, - environment: repo.config.environment, + environment: repo.config.env_vars()?, env_file_path: repo.config.env_file_path, skip_secret_interp: repo.config.skip_secret_interp, replacers: secret_replacers.into_iter().collect() @@ -396,7 +402,7 @@ impl Resolve for State { update.finalize(); - let db = db_client().await; + let db = db_client(); if update.success { let _ = db @@ -473,7 +479,7 @@ async fn handle_builder_early_return( // but will fail to update cache in that case. if let Ok(update_doc) = to_document(&update) { let _ = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update.id, mungos::update::Update::Set(update_doc), None, @@ -511,7 +517,7 @@ pub async fn validate_cancel_repo_build( if let ExecuteRequest::CancelRepoBuild(req) = request { let repo = resource::get::(&req.repo).await?; - let db = db_client().await; + let db = db_client(); let (latest_build, latest_cancel) = tokio::try_join!( db.updates @@ -596,7 +602,7 @@ impl Resolve for State { tokio::spawn(async move { tokio::time::sleep(Duration::from_secs(60)).await; if let Err(e) = update_one_by_id( - &db_client().await.updates, + &db_client().updates, &update_id, doc! { "$set": { "status": "Complete" } }, None, @@ -621,7 +627,7 @@ async fn interpolate( let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); - interpolate_variables_secrets_into_environment( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut repo.config.environment, &mut global_replacers, diff --git a/bin/core/src/api/execute/server.rs b/bin/core/src/api/execute/server.rs index 155d2f096..33e21deb0 100644 --- a/bin/core/src/api/execute/server.rs +++ b/bin/core/src/api/execute/server.rs @@ -970,16 +970,17 @@ impl Resolve for State { let periphery = periphery_client(&server)?; - let log = match periphery.request(api::build::PruneBuilders {}).await { - Ok(log) => log, - Err(e) => Log::error( - "prune builders", - format!( - "failed to docker builder prune on server {} | {e:#?}", - server.name + let log = + match periphery.request(api::build::PruneBuilders {}).await { + Ok(log) => log, + Err(e) => Log::error( + "prune builders", + format!( + "failed to docker builder prune on server {} | {e:#?}", + server.name + ), ), - ), - }; + }; update.logs.push(log); update_cache_for_server(&server).await; @@ -1020,16 +1021,17 @@ impl Resolve for State { let periphery = periphery_client(&server)?; - let log = match periphery.request(api::build::PruneBuildx {}).await { - Ok(log) => log, - Err(e) => Log::error( - "prune buildx", - format!( - "failed to docker buildx prune on server {} | {e:#?}", - server.name + let log = + match periphery.request(api::build::PruneBuildx {}).await { + Ok(log) => log, + Err(e) => Log::error( + "prune buildx", + format!( + "failed to docker buildx prune on server {} | {e:#?}", + server.name + ), ), - ), - }; + }; update.logs.push(log); update_cache_for_server(&server).await; diff --git a/bin/core/src/api/execute/server_template.rs b/bin/core/src/api/execute/server_template.rs index 8d8e1fa59..5ef9fadbe 100644 --- a/bin/core/src/api/execute/server_template.rs +++ b/bin/core/src/api/execute/server_template.rs @@ -34,7 +34,6 @@ impl Resolve for State { ) -> anyhow::Result { // validate name isn't already taken by another server if db_client() - .await .servers .find_one(doc! { "name": &name @@ -62,6 +61,8 @@ impl Resolve for State { let config = match template.config { ServerTemplateConfig::Aws(config) => { let region = config.region.clone(); + let use_https = config.use_https; + let port = config.port; let instance = match launch_ec2_instance(&name, config).await { Ok(instance) => instance, @@ -82,14 +83,18 @@ impl Resolve for State { instance.ip ), ); + let protocol = if use_https { "https" } else { "http" }; PartialServerConfig { - address: format!("http://{}:8120", instance.ip).into(), + address: format!("{protocol}://{}:{port}", instance.ip) + .into(), region: region.into(), ..Default::default() } } ServerTemplateConfig::Hetzner(config) => { let datacenter = config.datacenter; + let use_https = config.use_https; + let port = config.port; let server = match launch_hetzner_server(&name, config).await { Ok(server) => server, @@ -110,8 +115,10 @@ impl Resolve for State { server.ip ), ); + let protocol = if use_https { "https" } else { "http" }; PartialServerConfig { - address: format!("http://{}:8120", server.ip).into(), + address: format!("{protocol}://{}:{port}", server.ip) + .into(), region: datacenter.as_ref().to_string().into(), ..Default::default() } diff --git a/bin/core/src/api/execute/stack.rs b/bin/core/src/api/execute/stack.rs index 16b1924c8..13c57d646 100644 --- a/bin/core/src/api/execute/stack.rs +++ b/bin/core/src/api/execute/stack.rs @@ -17,18 +17,18 @@ use crate::{ helpers::{ interpolate::{ add_interp_update_log, - interpolate_variables_secrets_into_environment, interpolate_variables_secrets_into_extra_args, + interpolate_variables_secrets_into_string, }, periphery_client, query::get_variables_and_secrets, - stack::{ - execute::execute_compose, get_stack_and_server, - services::extract_services_into_res, - }, update::update_update, }, monitor::update_cache_for_server, + stack::{ + execute::execute_compose, get_stack_and_server, + services::extract_services_into_res, + }, state::{action_states, db_client, State}, }; @@ -81,7 +81,7 @@ impl Resolve for State { let mut global_replacers = HashSet::new(); let mut secret_replacers = HashSet::new(); - interpolate_variables_secrets_into_environment( + interpolate_variables_secrets_into_string( &vars_and_secrets, &mut stack.config.environment, &mut global_replacers, @@ -154,6 +154,8 @@ impl Resolve for State { stack.info.latest_services.clone() }; + // This ensures to get the latest project name, + // as it may have changed since the last deploy. let project_name = stack.project_name(true); let ( @@ -203,7 +205,6 @@ impl Resolve for State { .context("failed to serialize stack info to bson")?; db_client() - .await .stacks .update_one( doc! { "name": &stack.name }, diff --git a/bin/core/src/api/execute/sync.rs b/bin/core/src/api/execute/sync.rs index f7a118584..9d6136c44 100644 --- a/bin/core/src/api/execute/sync.rs +++ b/bin/core/src/api/execute/sync.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use anyhow::{anyhow, Context}; use formatting::{colored, format_serror, Color}; -use mongo_indexed::doc; use komodo_client::{ api::{execute::RunSync, write::RefreshResourceSyncPending}, entities::{ @@ -18,28 +17,27 @@ use komodo_client::{ server::Server, server_template::ServerTemplate, stack::Stack, + sync::ResourceSync, update::{Log, Update}, user::{sync_user, User}, }, }; +use mongo_indexed::doc; use mungos::{by_id::update_one_by_id, mongodb::bson::to_document}; use resolver_api::Resolve; use crate::{ - helpers::{ - query::get_id_to_tags, - sync::{ - deploy::{ - build_deploy_cache, deploy_from_cache, SyncDeployParams, - }, - resource::{ - get_updates_for_execution, AllResourcesById, ResourceSync, - }, - }, - update::update_update, - }, + helpers::{query::get_id_to_tags, update::update_update}, resource::{self, refresh_resource_sync_state_cache}, state::{db_client, State}, + sync::{ + deploy::{ + build_deploy_cache, deploy_from_cache, SyncDeployParams, + }, + execute::{get_updates_for_execution, ExecuteResourceSync}, + remote::RemoteResources, + AllResourcesById, + }, }; impl Resolve for State { @@ -54,22 +52,28 @@ impl Resolve for State { >(&sync, &user, PermissionLevel::Execute) .await?; - if sync.config.repo.is_empty() { - return Err(anyhow!("resource sync repo not configured")); - } - // Send update here for FE to recheck action state update_update(update.clone()).await?; - let (res, logs, hash, message) = - crate::helpers::sync::remote::get_remote_resources(&sync) - .await - .context("failed to get remote resources")?; + let RemoteResources { + resources, + logs, + hash, + message, + file_errors, + .. + } = crate::sync::remote::get_remote_resources(&sync) + .await + .context("failed to get remote resources")?; update.logs.extend(logs); update_update(update.clone()).await?; - let resources = res?; + if !file_errors.is_empty() { + return Err(anyhow!("Found file errors. Cannot execute sync.")) + } + + let resources = resources?; let id_to_tags = get_id_to_tags(None).await?; let all_resources = AllResourcesById::load().await?; @@ -94,12 +98,15 @@ impl Resolve for State { }) .await?; + let delete = sync.config.managed || sync.config.delete; + let (servers_to_create, servers_to_update, servers_to_delete) = get_updates_for_execution::( resources.servers, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let ( @@ -108,33 +115,37 @@ impl Resolve for State { deployments_to_delete, ) = get_updates_for_execution::( resources.deployments, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let (stacks_to_create, stacks_to_update, stacks_to_delete) = get_updates_for_execution::( resources.stacks, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let (builds_to_create, builds_to_update, builds_to_delete) = get_updates_for_execution::( resources.builds, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let (repos_to_create, repos_to_update, repos_to_delete) = get_updates_for_execution::( resources.repos, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let ( @@ -143,25 +154,28 @@ impl Resolve for State { procedures_to_delete, ) = get_updates_for_execution::( resources.procedures, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let (builders_to_create, builders_to_update, builders_to_delete) = get_updates_for_execution::( resources.builders, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let (alerters_to_create, alerters_to_update, alerters_to_delete) = get_updates_for_execution::( resources.alerters, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let ( @@ -170,9 +184,10 @@ impl Resolve for State { server_templates_to_delete, ) = get_updates_for_execution::( resources.server_templates, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let ( @@ -181,27 +196,30 @@ impl Resolve for State { resource_syncs_to_delete, ) = get_updates_for_execution::( resources.resource_syncs, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, ) .await?; let ( variables_to_create, variables_to_update, variables_to_delete, - ) = crate::helpers::sync::variables::get_updates_for_execution( + ) = crate::sync::variables::get_updates_for_execution( resources.variables, - sync.config.delete, + // Delete doesn't work with variables when match tags are set + sync.config.match_tags.is_empty() && delete, ) .await?; let ( user_groups_to_create, user_groups_to_update, user_groups_to_delete, - ) = crate::helpers::sync::user_groups::get_updates_for_execution( + ) = crate::sync::user_groups::get_updates_for_execution( resources.user_groups, - sync.config.delete, + // Delete doesn't work with user groups when match tags are set + sync.config.match_tags.is_empty() && delete, &all_resources, ) .await?; @@ -261,7 +279,7 @@ impl Resolve for State { // No deps maybe_extend( &mut update.logs, - crate::helpers::sync::variables::run_updates( + crate::sync::variables::run_updates( variables_to_create, variables_to_update, variables_to_delete, @@ -270,7 +288,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - crate::helpers::sync::user_groups::run_updates( + crate::sync::user_groups::run_updates( user_groups_to_create, user_groups_to_update, user_groups_to_delete, @@ -279,7 +297,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - entities::sync::ResourceSync::run_updates( + ResourceSync::execute_sync_updates( resource_syncs_to_create, resource_syncs_to_update, resource_syncs_to_delete, @@ -288,7 +306,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - ServerTemplate::run_updates( + ServerTemplate::execute_sync_updates( server_templates_to_create, server_templates_to_update, server_templates_to_delete, @@ -297,7 +315,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - Server::run_updates( + Server::execute_sync_updates( servers_to_create, servers_to_update, servers_to_delete, @@ -306,7 +324,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - Alerter::run_updates( + Alerter::execute_sync_updates( alerters_to_create, alerters_to_update, alerters_to_delete, @@ -317,7 +335,7 @@ impl Resolve for State { // Dependent on server maybe_extend( &mut update.logs, - Builder::run_updates( + Builder::execute_sync_updates( builders_to_create, builders_to_update, builders_to_delete, @@ -326,7 +344,7 @@ impl Resolve for State { ); maybe_extend( &mut update.logs, - Repo::run_updates( + Repo::execute_sync_updates( repos_to_create, repos_to_update, repos_to_delete, @@ -337,7 +355,7 @@ impl Resolve for State { // Dependant on builder maybe_extend( &mut update.logs, - Build::run_updates( + Build::execute_sync_updates( builds_to_create, builds_to_update, builds_to_delete, @@ -348,7 +366,7 @@ impl Resolve for State { // Dependant on server / build maybe_extend( &mut update.logs, - Deployment::run_updates( + Deployment::execute_sync_updates( deployments_to_create, deployments_to_update, deployments_to_delete, @@ -358,7 +376,7 @@ impl Resolve for State { // stack only depends on server, but maybe will depend on build later. maybe_extend( &mut update.logs, - Stack::run_updates( + Stack::execute_sync_updates( stacks_to_create, stacks_to_update, stacks_to_delete, @@ -369,7 +387,7 @@ impl Resolve for State { // Dependant on everything maybe_extend( &mut update.logs, - Procedure::run_updates( + Procedure::execute_sync_updates( procedures_to_create, procedures_to_update, procedures_to_delete, @@ -380,7 +398,7 @@ impl Resolve for State { // Execute the deploy cache deploy_from_cache(deploy_cache, &mut update.logs).await; - let db = db_client().await; + let db = db_client(); if let Err(e) = update_one_by_id( &db.resource_syncs, diff --git a/bin/core/src/api/read/alert.rs b/bin/core/src/api/read/alert.rs index 67bec5e66..aaa059767 100644 --- a/bin/core/src/api/read/alert.rs +++ b/bin/core/src/api/read/alert.rs @@ -41,7 +41,7 @@ impl Resolve for State { } let alerts = find_collect( - &db_client().await.alerts, + &db_client().alerts, query, FindOptions::builder() .sort(doc! { "ts": -1 }) @@ -70,7 +70,7 @@ impl Resolve for State { GetAlert { id }: GetAlert, _: User, ) -> anyhow::Result { - find_one_by_id(&db_client().await.alerts, &id) + find_one_by_id(&db_client().alerts, &id) .await .context("failed to query db for alert")? .context("no alert found with given id") diff --git a/bin/core/src/api/read/alerter.rs b/bin/core/src/api/read/alerter.rs index c631df557..02bcef8cd 100644 --- a/bin/core/src/api/read/alerter.rs +++ b/bin/core/src/api/read/alerter.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use mongo_indexed::Document; use komodo_client::{ api::read::*, entities::{ @@ -8,6 +7,7 @@ use komodo_client::{ user::User, }, }; +use mongo_indexed::Document; use mungos::mongodb::bson::doc; use resolver_api::Resolve; @@ -67,7 +67,6 @@ impl Resolve for State { None => Document::new(), }; let total = db_client() - .await .alerters .count_documents(query) .await diff --git a/bin/core/src/api/read/build.rs b/bin/core/src/api/read/build.rs index fc20c1242..bf80e1aec 100644 --- a/bin/core/src/api/read/build.rs +++ b/bin/core/src/api/read/build.rs @@ -145,7 +145,6 @@ impl Resolve for State { let open_ts = close_ts - 30 * ONE_DAY_MS; let mut build_updates = db_client() - .await .updates .find(doc! { "start_ts": { @@ -229,7 +228,7 @@ impl Resolve for State { } let versions = find_collect( - &db_client().await.updates, + &db_client().updates, filter, FindOptions::builder() .sort(doc! { "_id": -1 }) @@ -328,7 +327,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = format!("{host}/listener/github/build/{}", build.id); for webhook in webhooks { diff --git a/bin/core/src/api/read/builder.rs b/bin/core/src/api/read/builder.rs index 02d8d2e1f..141a1a26b 100644 --- a/bin/core/src/api/read/builder.rs +++ b/bin/core/src/api/read/builder.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use mongo_indexed::Document; use komodo_client::{ api::read::*, entities::{ @@ -8,6 +7,7 @@ use komodo_client::{ user::User, }, }; +use mongo_indexed::Document; use mungos::mongodb::bson::doc; use resolver_api::Resolve; @@ -67,7 +67,6 @@ impl Resolve for State { None => Document::new(), }; let total = db_client() - .await .builders .count_documents(query) .await diff --git a/bin/core/src/api/read/deployment.rs b/bin/core/src/api/read/deployment.rs index b8cc703b0..6a4712435 100644 --- a/bin/core/src/api/read/deployment.rs +++ b/bin/core/src/api/read/deployment.rs @@ -223,14 +223,17 @@ impl Resolve for State { DeploymentState::Running => { res.running += 1; } - DeploymentState::Unknown => { - res.unknown += 1; + DeploymentState::Exited | DeploymentState::Paused => { + res.stopped += 1; } DeploymentState::NotDeployed => { res.not_deployed += 1; } + DeploymentState::Unknown => { + res.unknown += 1; + } _ => { - res.stopped += 1; + res.unhealthy += 1; } } } diff --git a/bin/core/src/api/read/mod.rs b/bin/core/src/api/read/mod.rs index 914c3aac4..edecf62b6 100644 --- a/bin/core/src/api/read/mod.rs +++ b/bin/core/src/api/read/mod.rs @@ -60,8 +60,6 @@ enum ReadRequest { GetVersion(GetVersion), #[to_string_resolver] GetCoreInfo(GetCoreInfo), - #[to_string_resolver] - ListAwsEcrLabels(ListAwsEcrLabels), ListSecrets(ListSecrets), ListGitProvidersFromConfig(ListGitProvidersFromConfig), ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig), @@ -283,12 +281,15 @@ fn core_info() -> &'static String { let info = GetCoreInfoResponse { title: config.title.clone(), monitoring_interval: config.monitoring_interval, - webhook_base_url: config - .webhook_base_url - .clone() - .unwrap_or_else(|| config.host.clone()), + webhook_base_url: if config.webhook_base_url.is_empty() { + config.host.clone() + } else { + config.webhook_base_url.clone() + }, transparent_mode: config.transparent_mode, ui_write_disabled: config.ui_write_disabled, + disable_confirm_dialog: config.disable_confirm_dialog, + disable_non_admin_create: config.disable_non_admin_create, github_webhook_owners: config .github_webhook_app .installations @@ -312,31 +313,6 @@ impl ResolveToString for State { } } -fn ecr_labels() -> &'static String { - static ECR_LABELS: OnceLock = OnceLock::new(); - ECR_LABELS.get_or_init(|| { - serde_json::to_string( - &core_config() - .aws_ecr_registries - .iter() - .map(|reg| reg.label.clone()) - .collect::>(), - ) - .context("failed to serialize ecr registries") - .unwrap() - }) -} - -impl ResolveToString for State { - async fn resolve_to_string( - &self, - ListAwsEcrLabels {}: ListAwsEcrLabels, - _: User, - ) -> anyhow::Result { - Ok(ecr_labels().to_string()) - } -} - impl Resolve for State { async fn resolve( &self, diff --git a/bin/core/src/api/read/permission.rs b/bin/core/src/api/read/permission.rs index 84c9d88c7..e3e493e28 100644 --- a/bin/core/src/api/read/permission.rs +++ b/bin/core/src/api/read/permission.rs @@ -22,7 +22,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { find_collect( - &db_client().await.permissions, + &db_client().permissions, doc! { "user_target.type": "User", "user_target.id": &user.id @@ -58,7 +58,7 @@ impl Resolve for State { } let (variant, id) = user_target.extract_variant_id(); find_collect( - &db_client().await.permissions, + &db_client().permissions, doc! { "user_target.type": variant.as_ref(), "user_target.id": id diff --git a/bin/core/src/api/read/provider.rs b/bin/core/src/api/read/provider.rs index e808d9627..28d3735df 100644 --- a/bin/core/src/api/read/provider.rs +++ b/bin/core/src/api/read/provider.rs @@ -1,5 +1,4 @@ use anyhow::{anyhow, Context}; -use mongo_indexed::{doc, Document}; use komodo_client::{ api::read::{ GetDockerRegistryAccount, GetDockerRegistryAccountResponse, @@ -9,6 +8,7 @@ use komodo_client::{ }, entities::user::User, }; +use mongo_indexed::{doc, Document}; use mungos::{ by_id::find_one_by_id, find::find_collect, mongodb::options::FindOptions, @@ -28,7 +28,7 @@ impl Resolve for State { "Only admins can read git provider accounts" )); } - find_one_by_id(&db_client().await.git_accounts, &id) + find_one_by_id(&db_client().git_accounts, &id) .await .context("failed to query db for git provider accounts")? .context("did not find git provider account with the given id") @@ -54,7 +54,7 @@ impl Resolve for State { filter.insert("username", username); } find_collect( - &db_client().await.git_accounts, + &db_client().git_accounts, filter, FindOptions::builder() .sort(doc! { "domain": 1, "username": 1 }) @@ -76,7 +76,7 @@ impl Resolve for State { "Only admins can read docker registry accounts" )); } - find_one_by_id(&db_client().await.registry_accounts, &id) + find_one_by_id(&db_client().registry_accounts, &id) .await .context("failed to query db for docker registry accounts")? .context( @@ -104,7 +104,7 @@ impl Resolve for State { filter.insert("username", username); } find_collect( - &db_client().await.registry_accounts, + &db_client().registry_accounts, filter, FindOptions::builder() .sort(doc! { "domain": 1, "username": 1 }) diff --git a/bin/core/src/api/read/repo.rs b/bin/core/src/api/read/repo.rs index 76b284a4b..86b0d2b6a 100644 --- a/bin/core/src/api/read/repo.rs +++ b/bin/core/src/api/read/repo.rs @@ -188,7 +188,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let clone_url = format!("{host}/listener/github/repo/{}/clone", repo.id); let pull_url = diff --git a/bin/core/src/api/read/server.rs b/bin/core/src/api/read/server.rs index f8bb61497..f7ed1a8ab 100644 --- a/bin/core/src/api/read/server.rs +++ b/bin/core/src/api/read/server.rs @@ -43,8 +43,9 @@ use resolver_api::{Resolve, ResolveToString}; use tokio::sync::Mutex; use crate::{ - helpers::{periphery_client, stack::compose_container_match_regex}, + helpers::periphery_client, resource, + stack::compose_container_match_regex, state::{action_states, db_client, server_status_cache, State}, }; @@ -320,7 +321,7 @@ impl Resolve for State { } let stats = find_collect( - &db_client().await.stats, + &db_client().stats, doc! { "sid": server.id, "ts": { "$in": ts_vec }, diff --git a/bin/core/src/api/read/server_template.rs b/bin/core/src/api/read/server_template.rs index da0830bf9..15489a9e1 100644 --- a/bin/core/src/api/read/server_template.rs +++ b/bin/core/src/api/read/server_template.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use mongo_indexed::Document; use komodo_client::{ api::read::*, entities::{ @@ -7,6 +6,7 @@ use komodo_client::{ user::User, }, }; +use mongo_indexed::Document; use mungos::mongodb::bson::doc; use resolver_api::Resolve; @@ -67,7 +67,6 @@ impl Resolve for State { None => Document::new(), }; let total = db_client() - .await .server_templates .count_documents(query) .await diff --git a/bin/core/src/api/read/stack.rs b/bin/core/src/api/read/stack.rs index 574bb2e2f..914cf1a27 100644 --- a/bin/core/src/api/read/stack.rs +++ b/bin/core/src/api/read/stack.rs @@ -17,8 +17,9 @@ use resolver_api::Resolve; use crate::{ config::core_config, - helpers::{periphery_client, stack::get_stack_and_server}, + helpers::periphery_client, resource, + stack::get_stack_and_server, state::{action_states, github_client, stack_status_cache, State}, }; @@ -236,15 +237,10 @@ impl Resolve for State { match cache.get(&stack.id).await.unwrap_or_default().curr.state { StackState::Running => res.running += 1, - StackState::Paused => res.paused += 1, - StackState::Stopped => res.stopped += 1, - StackState::Restarting => res.restarting += 1, - StackState::Created => res.created += 1, - StackState::Removing => res.removing += 1, - StackState::Dead => res.dead += 1, - StackState::Unhealthy => res.unhealthy += 1, + StackState::Stopped | StackState::Paused => res.stopped += 1, StackState::Down => res.down += 1, StackState::Unknown => res.unknown += 1, + _ => res.unhealthy += 1, } } @@ -311,7 +307,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let refresh_url = format!("{host}/listener/github/stack/{}/refresh", stack.id); let deploy_url = diff --git a/bin/core/src/api/read/sync.rs b/bin/core/src/api/read/sync.rs index 80a7c9c6e..c35c00948 100644 --- a/bin/core/src/api/read/sync.rs +++ b/bin/core/src/api/read/sync.rs @@ -5,8 +5,8 @@ use komodo_client::{ config::core::CoreConfig, permission::PermissionLevel, sync::{ - PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState, - ResourceSyncListItem, ResourceSyncState, + ResourceSync, ResourceSyncActionState, ResourceSyncListItem, + ResourceSyncState, }, user::User, }, @@ -100,17 +100,18 @@ impl Resolve for State { for resource_sync in resource_syncs { res.total += 1; - match resource_sync.info.pending.data { - PendingSyncUpdatesData::Ok(data) => { - if !data.no_updates() { - res.pending += 1; - continue; - } - } - PendingSyncUpdatesData::Err(_) => { - res.failed += 1; - continue; - } + if !(resource_sync.info.pending_deploy.to_deploy == 0 + && resource_sync.info.resource_updates.is_empty() + && resource_sync.info.variable_updates.is_empty() + && resource_sync.info.user_group_updates.is_empty()) + { + res.pending += 1; + continue; + } else if resource_sync.info.pending_error.is_some() + || !resource_sync.info.remote_errors.is_empty() + { + res.failed += 1; + continue; } match ( @@ -201,7 +202,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let refresh_url = format!("{host}/listener/github/sync/{}/refresh", sync.id); let sync_url = diff --git a/bin/core/src/api/read/tag.rs b/bin/core/src/api/read/tag.rs index ba4fc3e2a..be1d032da 100644 --- a/bin/core/src/api/read/tag.rs +++ b/bin/core/src/api/read/tag.rs @@ -1,9 +1,9 @@ use anyhow::Context; -use mongo_indexed::doc; use komodo_client::{ api::read::{GetTag, ListTags}, entities::{tag::Tag, user::User}, }; +use mongo_indexed::doc; use mungos::{find::find_collect, mongodb::options::FindOptions}; use resolver_api::Resolve; @@ -29,7 +29,7 @@ impl Resolve for State { _: User, ) -> anyhow::Result> { find_collect( - &db_client().await.tags, + &db_client().tags, query, FindOptions::builder().sort(doc! { "name": 1 }).build(), ) diff --git a/bin/core/src/api/read/toml.rs b/bin/core/src/api/read/toml.rs index f58d4b0fb..312a6c1d4 100644 --- a/bin/core/src/api/read/toml.rs +++ b/bin/core/src/api/read/toml.rs @@ -2,48 +2,40 @@ use std::collections::HashMap; use anyhow::Context; use komodo_client::{ - api::{ - execute::Execution, - read::{ - ExportAllResourcesToToml, ExportAllResourcesToTomlResponse, - ExportResourcesToToml, ExportResourcesToTomlResponse, - GetUserGroup, ListUserTargetPermissions, - }, + api::read::{ + ExportAllResourcesToToml, ExportAllResourcesToTomlResponse, + ExportResourcesToToml, ExportResourcesToTomlResponse, + GetUserGroup, ListUserTargetPermissions, }, entities::{ alerter::Alerter, build::Build, - builder::{Builder, BuilderConfig}, - deployment::{ - conversions_to_string, term_signal_labels_to_string, - Deployment, DeploymentImage, - }, - environment_vars_to_string, + builder::Builder, + deployment::Deployment, permission::{PermissionLevel, UserTarget}, procedure::Procedure, repo::Repo, - resource::{Resource, ResourceQuery}, + resource::ResourceQuery, server::Server, server_template::ServerTemplate, stack::Stack, sync::ResourceSync, - toml::{ - PermissionToml, ResourceToml, ResourcesToml, UserGroupToml, - }, + toml::{PermissionToml, ResourcesToml, UserGroupToml}, user::User, ResourceTarget, }, }; use mungos::find::find_collect; -use ordered_hash_map::OrderedHashMap; -use partial_derive2::PartialDiff; use resolver_api::Resolve; -use serde_json::Value; use crate::{ - helpers::query::get_user_user_group_ids, - resource::{self, KomodoResource}, + helpers::query::{get_id_to_tags, get_user_user_group_ids}, + resource, state::{db_client, State}, + sync::{ + toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS}, + AllResourcesById, + }, }; impl Resolve for State { @@ -136,17 +128,18 @@ impl Resolve for State { .map(|resource| ResourceTarget::ServerTemplate(resource.id)), ); targets.extend( - resource::list_for_user::( - ResourceQuery::builder().tags(tags).build(), + resource::list_full_for_user::( + ResourceQuery::builder().tags(tags.clone()).build(), &user, ) .await? .into_iter() + // These will already be filtered by [ExportResourcesToToml] .map(|resource| ResourceTarget::ResourceSync(resource.id)), ); - let user_groups = if user.admin { - find_collect(&db_client().await.user_groups, None, None) + let user_groups = if user.admin && tags.is_empty() { + find_collect(&db_client().user_groups, None, None) .await .context("failed to query db for user groups")? .into_iter() @@ -161,7 +154,7 @@ impl Resolve for State { ExportResourcesToToml { targets, user_groups, - include_variables: true, + include_variables: tags.is_empty(), }, user, ) @@ -180,9 +173,8 @@ impl Resolve for State { user: User, ) -> anyhow::Result { let mut res = ResourcesToml::default(); - let names = ResourceNames::new() - .await - .context("failed to init resource name maps")?; + let all = AllResourcesById::load().await?; + let id_to_tags = get_id_to_tags(None).await?; for target in targets { match target { ResourceTarget::Alerter(id) => { @@ -194,7 +186,7 @@ impl Resolve for State { .await?; res .alerters - .push(convert_resource::(alerter, &names.tags)) + .push(convert_resource::(alerter, &id_to_tags)) } ResourceTarget::ResourceSync(id) => { let sync = resource::get_check_permissions::( @@ -203,9 +195,15 @@ impl Resolve for State { PermissionLevel::Read, ) .await?; - res - .resource_syncs - .push(convert_resource::(sync, &names.tags)) + if sync.config.file_contents.is_empty() + && (sync.config.files_on_host + || !sync.config.repo.is_empty()) + { + res.resource_syncs.push(convert_resource::( + sync, + &id_to_tags, + )) + } } ResourceTarget::ServerTemplate(id) => { let template = resource::get_check_permissions::< @@ -215,7 +213,7 @@ impl Resolve for State { ) .await?; res.server_templates.push( - convert_resource::(template, &names.tags), + convert_resource::(template, &id_to_tags), ) } ResourceTarget::Server(id) => { @@ -227,7 +225,7 @@ impl Resolve for State { .await?; res .servers - .push(convert_resource::(server, &names.tags)) + .push(convert_resource::(server, &id_to_tags)) } ResourceTarget::Builder(id) => { let mut builder = @@ -237,18 +235,10 @@ impl Resolve for State { PermissionLevel::Read, ) .await?; - // replace server id of builder - if let BuilderConfig::Server(config) = &mut builder.config { - config.server_id.clone_from( - names - .servers - .get(&config.server_id) - .unwrap_or(&String::new()), - ) - } + Builder::replace_ids(&mut builder, &all); res .builders - .push(convert_resource::(builder, &names.tags)) + .push(convert_resource::(builder, &id_to_tags)) } ResourceTarget::Build(id) => { let mut build = resource::get_check_permissions::( @@ -257,16 +247,10 @@ impl Resolve for State { PermissionLevel::Read, ) .await?; - // replace builder id of build - build.config.builder_id.clone_from( - names - .builders - .get(&build.config.builder_id) - .unwrap_or(&String::new()), - ); + Build::replace_ids(&mut build, &all); res .builds - .push(convert_resource::(build, &names.tags)) + .push(convert_resource::(build, &id_to_tags)) } ResourceTarget::Deployment(id) => { let mut deployment = resource::get_check_permissions::< @@ -275,24 +259,10 @@ impl Resolve for State { &id, &user, PermissionLevel::Read ) .await?; - // replace deployment server with name - deployment.config.server_id.clone_from( - names - .servers - .get(&deployment.config.server_id) - .unwrap_or(&String::new()), - ); - // replace deployment build id with name - if let DeploymentImage::Build { build_id, .. } = - &mut deployment.config.image - { - build_id.clone_from( - names.builds.get(build_id).unwrap_or(&String::new()), - ); - } + Deployment::replace_ids(&mut deployment, &all); res.deployments.push(convert_resource::( deployment, - &names.tags, + &id_to_tags, )) } ResourceTarget::Repo(id) => { @@ -302,21 +272,8 @@ impl Resolve for State { PermissionLevel::Read, ) .await?; - // replace repo server with name - repo.config.server_id.clone_from( - names - .servers - .get(&repo.config.server_id) - .unwrap_or(&String::new()), - ); - // replace repo builder with name - repo.config.builder_id.clone_from( - names - .builders - .get(&repo.config.builder_id) - .unwrap_or(&String::new()), - ); - res.repos.push(convert_resource::(repo, &names.tags)) + Repo::replace_ids(&mut repo, &all); + res.repos.push(convert_resource::(repo, &id_to_tags)) } ResourceTarget::Stack(id) => { let mut stack = resource::get_check_permissions::( @@ -325,35 +282,35 @@ impl Resolve for State { PermissionLevel::Read, ) .await?; - // replace stack server with name - stack.config.server_id.clone_from( - names - .servers - .get(&stack.config.server_id) - .unwrap_or(&String::new()), - ); + Stack::replace_ids(&mut stack, &all); res .stacks - .push(convert_resource::(stack, &names.tags)) + .push(convert_resource::(stack, &id_to_tags)) } ResourceTarget::Procedure(id) => { - add_procedure(&id, &mut res, &user, &names) - .await - .with_context(|| { - format!("failed to add procedure {id}") - })?; + let mut procedure = resource::get_check_permissions::< + Procedure, + >( + &id, &user, PermissionLevel::Read + ) + .await?; + Procedure::replace_ids(&mut procedure, &all); + res.procedures.push(convert_resource::( + procedure, + &id_to_tags, + )); } ResourceTarget::System(_) => continue, }; } - add_user_groups(user_groups, &mut res, &names, &user) + add_user_groups(user_groups, &mut res, &all, &user) .await .context("failed to add user groups")?; if include_variables { res.variables = - find_collect(&db_client().await.variables, None, None) + find_collect(&db_client().variables, None, None) .await .context("failed to get variables from db")? .into_iter() @@ -366,310 +323,20 @@ impl Resolve for State { .collect(); } - let toml = serialize_resources_toml(&res) + let toml = serialize_resources_toml(res) .context("failed to serialize resources to toml")?; Ok(ExportResourcesToTomlResponse { toml }) } } -async fn add_procedure( - id: &str, - res: &mut ResourcesToml, - user: &User, - names: &ResourceNames, -) -> anyhow::Result<()> { - let mut procedure = resource::get_check_permissions::( - id, - user, - PermissionLevel::Read, - ) - .await?; - - for stage in &mut procedure.config.stages { - for execution in &mut stage.executions { - match &mut execution.execution { - Execution::RunProcedure(exec) => exec.procedure.clone_from( - names - .procedures - .get(&exec.procedure) - .unwrap_or(&String::new()), - ), - Execution::RunBuild(exec) => exec.build.clone_from( - names.builds.get(&exec.build).unwrap_or(&String::new()), - ), - Execution::CancelBuild(exec) => exec.build.clone_from( - names.builds.get(&exec.build).unwrap_or(&String::new()), - ), - Execution::Deploy(exec) => exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ), - Execution::StartDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::RestartDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::PauseDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::UnpauseDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::StopDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::DestroyDeployment(exec) => { - exec.deployment.clone_from( - names - .deployments - .get(&exec.deployment) - .unwrap_or(&String::new()), - ) - } - Execution::CloneRepo(exec) => exec.repo.clone_from( - names.repos.get(&exec.repo).unwrap_or(&String::new()), - ), - Execution::PullRepo(exec) => exec.repo.clone_from( - names.repos.get(&exec.repo).unwrap_or(&String::new()), - ), - Execution::BuildRepo(exec) => exec.repo.clone_from( - names.repos.get(&exec.repo).unwrap_or(&String::new()), - ), - Execution::CancelRepoBuild(exec) => exec.repo.clone_from( - names.repos.get(&exec.repo).unwrap_or(&String::new()), - ), - Execution::StartContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::RestartContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PauseContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::UnpauseContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::StopContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::DestroyContainer(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::StartAllContainers(exec) => { - exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ) - } - Execution::RestartAllContainers(exec) => { - exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ) - } - Execution::PauseAllContainers(exec) => { - exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ) - } - Execution::UnpauseAllContainers(exec) => { - exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ) - } - Execution::StopAllContainers(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneContainers(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::DeleteNetwork(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneNetworks(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::DeleteImage(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneImages(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::DeleteVolume(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneVolumes(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneDockerBuilders(exec) => { - exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ) - } - Execution::PruneBuildx(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::PruneSystem(exec) => exec.server.clone_from( - names.servers.get(&exec.server).unwrap_or(&String::new()), - ), - Execution::RunSync(exec) => exec.sync.clone_from( - names.syncs.get(&exec.sync).unwrap_or(&String::new()), - ), - Execution::DeployStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::StartStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::RestartStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::PauseStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::UnpauseStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::StopStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::DestroyStack(exec) => exec.stack.clone_from( - names.stacks.get(&exec.stack).unwrap_or(&String::new()), - ), - Execution::Sleep(_) | Execution::None(_) => {} - } - } - } - - res - .procedures - .push(convert_resource::(procedure, &names.tags)); - Ok(()) -} - -struct ResourceNames { - tags: HashMap, - servers: HashMap, - builders: HashMap, - builds: HashMap, - repos: HashMap, - deployments: HashMap, - procedures: HashMap, - syncs: HashMap, - stacks: HashMap, - alerters: HashMap, - templates: HashMap, -} - -impl ResourceNames { - async fn new() -> anyhow::Result { - let db = db_client().await; - Ok(ResourceNames { - tags: find_collect(&db.tags, None, None) - .await - .context("failed to get all tags")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - servers: find_collect(&db.servers, None, None) - .await - .context("failed to get all servers")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - builders: find_collect(&db.builders, None, None) - .await - .context("failed to get all builders")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - builds: find_collect(&db.builds, None, None) - .await - .context("failed to get all builds")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - repos: find_collect(&db.repos, None, None) - .await - .context("failed to get all repos")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - deployments: find_collect(&db.deployments, None, None) - .await - .context("failed to get all deployments")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - procedures: find_collect(&db.procedures, None, None) - .await - .context("failed to get all procedures")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - syncs: find_collect(&db.resource_syncs, None, None) - .await - .context("failed to get all resource syncs")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - stacks: find_collect(&db.stacks, None, None) - .await - .context("failed to get all stacks")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - alerters: find_collect(&db.alerters, None, None) - .await - .context("failed to get all alerters")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - templates: find_collect(&db.server_templates, None, None) - .await - .context("failed to get all server templates")? - .into_iter() - .map(|t| (t.id, t.name)) - .collect::>(), - }) - } -} - async fn add_user_groups( user_groups: Vec, res: &mut ResourcesToml, - names: &ResourceNames, + all: &AllResourcesById, user: &User, ) -> anyhow::Result<()> { - let db = db_client().await; + let db = db_client(); let usernames = find_collect(&db.users, None, None) .await? @@ -697,36 +364,74 @@ async fn add_user_groups( .map(|mut permission| { match &mut permission.resource_target { ResourceTarget::Build(id) => { - *id = names.builds.get(id).cloned().unwrap_or_default() + *id = all + .builds + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Builder(id) => { - *id = names.builders.get(id).cloned().unwrap_or_default() + *id = all + .builders + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Deployment(id) => { - *id = - names.deployments.get(id).cloned().unwrap_or_default() + *id = all + .deployments + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Server(id) => { - *id = names.servers.get(id).cloned().unwrap_or_default() + *id = all + .servers + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Repo(id) => { - *id = names.repos.get(id).cloned().unwrap_or_default() + *id = all + .repos + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Alerter(id) => { - *id = names.alerters.get(id).cloned().unwrap_or_default() + *id = all + .alerters + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Procedure(id) => { - *id = - names.procedures.get(id).cloned().unwrap_or_default() + *id = all + .procedures + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::ServerTemplate(id) => { - *id = names.templates.get(id).cloned().unwrap_or_default() + *id = all + .templates + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::ResourceSync(id) => { - *id = names.syncs.get(id).cloned().unwrap_or_default() + *id = all + .syncs + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::Stack(id) => { - *id = names.stacks.get(id).cloned().unwrap_or_default() + *id = all + .stacks + .get(id) + .map(|r| r.name.clone()) + .unwrap_or_default() } ResourceTarget::System(_) => {} } @@ -750,288 +455,112 @@ async fn add_user_groups( Ok(()) } -fn convert_resource( - resource: Resource, - tag_names: &HashMap, -) -> ResourceToml { - // This makes sure all non-necessary (defaulted) fields don't make it into final toml - let partial: R::PartialConfig = resource.config.into(); - let config = R::Config::default().minimize_partial(partial); - ResourceToml { - name: resource.name, - tags: resource - .tags - .iter() - .filter_map(|t| tag_names.get(t).cloned()) - .collect(), - description: resource.description, - deploy: false, - after: Default::default(), - latest_hash: false, - config, - } -} - fn serialize_resources_toml( - resources: &ResourcesToml, + resources: ResourcesToml, ) -> anyhow::Result { - let mut res = String::new(); + let mut toml = String::new(); - let options = toml_pretty::Options::default() - .tab(" ") - .skip_empty_string(true) - .max_inline_array_length(30); - - for server in &resources.servers { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for server in resources.servers { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[server]]\n"); - res.push_str( - &toml_pretty::to_string(&server, options) - .context("failed to serialize servers to toml")?, - ); + toml.push_str("[[server]]\n"); + Server::push_to_toml_string(server, &mut toml)?; } - for deployment in &resources.deployments { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for stack in resources.stacks { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[deployment]]\n"); - let mut parsed: OrderedHashMap = - serde_json::from_str(&serde_json::to_string(&deployment)?)?; - let config = parsed - .get_mut("config") - .context("deployment has no config?")? - .as_object_mut() - .context("config is not object?")?; - if let Some(DeploymentImage::Build { version, .. }) = - &deployment.config.image - { - let image = config - .get_mut("image") - .context("deployment has no image")? - .get_mut("params") - .context("deployment image has no params")? - .as_object_mut() - .context("deployment image params is not object")?; - if version.is_none() { - image.remove("version"); - } else { - image.insert( - "version".to_string(), - Value::String(version.to_string()), - ); - } - } - if let Some(term_signal_labels) = - &deployment.config.term_signal_labels - { - config.insert( - "term_signal_labels".to_string(), - Value::String(term_signal_labels_to_string( - term_signal_labels, - )), - ); - } - if let Some(ports) = &deployment.config.ports { - config.insert( - "ports".to_string(), - Value::String(conversions_to_string(ports)), - ); - } - if let Some(volumes) = &deployment.config.volumes { - config.insert( - "volumes".to_string(), - Value::String(conversions_to_string(volumes)), - ); - } - if let Some(environment) = &deployment.config.environment { - config.insert( - "environment".to_string(), - Value::String(environment_vars_to_string(environment)), - ); - } - if let Some(labels) = &deployment.config.labels { - config.insert( - "labels".to_string(), - Value::String(environment_vars_to_string(labels)), - ); - } - res.push_str( - &toml_pretty::to_string(&parsed, options) - .context("failed to serialize deployments to toml")?, - ); + toml.push_str("[[stack]]\n"); + Stack::push_to_toml_string(stack, &mut toml)?; } - for stack in &resources.stacks { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for deployment in resources.deployments { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[stack]]\n"); - let mut parsed: OrderedHashMap = - serde_json::from_str(&serde_json::to_string(&stack)?)?; - let config = parsed - .get_mut("config") - .context("stack has no config?")? - .as_object_mut() - .context("config is not object?")?; - if let Some(environment) = &stack.config.environment { - config.insert( - "environment".to_string(), - Value::String(environment_vars_to_string(environment)), - ); - } - res.push_str( - &toml_pretty::to_string(&parsed, options) - .context("failed to serialize stacks to toml")?, - ); + toml.push_str("[[deployment]]\n"); + Deployment::push_to_toml_string(deployment, &mut toml)?; } - for build in &resources.builds { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for build in resources.builds { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - let mut parsed: OrderedHashMap = - serde_json::from_str(&serde_json::to_string(&build)?)?; - let config = parsed - .get_mut("config") - .context("build has no config?")? - .as_object_mut() - .context("config is not object?")?; - if let Some(version) = &build.config.version { - config.insert( - "version".to_string(), - Value::String(version.to_string()), - ); - } - if let Some(build_args) = &build.config.build_args { - config.insert( - "build_args".to_string(), - Value::String(environment_vars_to_string(build_args)), - ); - } - if let Some(labels) = &build.config.labels { - config.insert( - "labels".to_string(), - Value::String(environment_vars_to_string(labels)), - ); - } - res.push_str("[[build]]\n"); - res.push_str( - &toml_pretty::to_string(&parsed, options) - .context("failed to serialize builds to toml")?, - ); + toml.push_str("[[build]]\n"); + Build::push_to_toml_string(build, &mut toml)?; } - for repo in &resources.repos { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for repo in resources.repos { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[repo]]\n"); - res.push_str( - &toml_pretty::to_string(&repo, options) - .context("failed to serialize repos to toml")?, - ); + toml.push_str("[[repo]]\n"); + Repo::push_to_toml_string(repo, &mut toml)?; } - for procedure in &resources.procedures { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); - } - let mut parsed: OrderedHashMap = - serde_json::from_str(&serde_json::to_string(&procedure)?)?; - let config = parsed - .get_mut("config") - .context("procedure has no config?")? - .as_object_mut() - .context("config is not object?")?; - - let stages = config - .remove("stages") - .context("procedure config has no stages")?; - let stages = stages.as_array().context("stages is not array")?; - - res.push_str("[[procedure]]\n"); - res.push_str( - &toml_pretty::to_string(&parsed, options) - .context("failed to serialize procedures to toml")?, - ); - - for stage in stages { - res.push_str("\n\n[[procedure.config.stage]]\n"); - res.push_str( - &toml_pretty::to_string(stage, options) - .context("failed to serialize procedures to toml")?, - ); + for procedure in resources.procedures { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } + toml.push_str("[[procedure]]\n"); + Procedure::push_to_toml_string(procedure, &mut toml)?; } - for alerter in &resources.alerters { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for alerter in resources.alerters { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[alerter]]\n"); - res.push_str( - &toml_pretty::to_string(&alerter, options) - .context("failed to serialize alerters to toml")?, - ); + toml.push_str("[[alerter]]\n"); + Alerter::push_to_toml_string(alerter, &mut toml)?; } - for builder in &resources.builders { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for builder in resources.builders { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[builder]]\n"); - res.push_str( - &toml_pretty::to_string(&builder, options) - .context("failed to serialize builders to toml")?, - ); + toml.push_str("[[builder]]\n"); + Builder::push_to_toml_string(builder, &mut toml)?; } - for server_template in &resources.server_templates { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for server_template in resources.server_templates { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[server_template]]\n"); - res.push_str( - &toml_pretty::to_string(&server_template, options) - .context("failed to serialize server_templates to toml")?, - ); + toml.push_str("[[server_template]]\n"); + ServerTemplate::push_to_toml_string(server_template, &mut toml)?; } - for resource_sync in &resources.resource_syncs { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + for resource_sync in resources.resource_syncs { + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[resource_sync]]\n"); - res.push_str( - &toml_pretty::to_string(&resource_sync, options) - .context("failed to serialize resource_syncs to toml")?, - ); + toml.push_str("[[resource_sync]]\n"); + ResourceSync::push_to_toml_string(resource_sync, &mut toml)?; } for variable in &resources.variables { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[variable]]\n"); - res.push_str( - &toml_pretty::to_string(&variable, options) + toml.push_str("[[variable]]\n"); + toml.push_str( + &toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS) .context("failed to serialize variables to toml")?, ); } for user_group in &resources.user_groups { - if !res.is_empty() { - res.push_str("\n\n##\n\n"); + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); } - res.push_str("[[user_group]]\n"); - res.push_str( - &toml_pretty::to_string(&user_group, options) + toml.push_str("[[user_group]]\n"); + toml.push_str( + &toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS) .context("failed to serialize user_groups to toml")?, ); } - Ok(res) + Ok(toml) } diff --git a/bin/core/src/api/read/update.rs b/bin/core/src/api/read/update.rs index 9aa308246..4cf949509 100644 --- a/bin/core/src/api/read/update.rs +++ b/bin/core/src/api/read/update.rs @@ -164,16 +164,15 @@ impl Resolve for State { query.into() }; - let usernames = - find_collect(&db_client().await.users, None, None) - .await - .context("failed to pull users from db")? - .into_iter() - .map(|u| (u.id, u.username)) - .collect::>(); + let usernames = find_collect(&db_client().users, None, None) + .await + .context("failed to pull users from db")? + .into_iter() + .map(|u| (u.id, u.username)) + .collect::>(); let updates = find_collect( - &db_client().await.updates, + &db_client().updates, query, FindOptions::builder() .sort(doc! { "start_ts": -1 }) @@ -224,7 +223,7 @@ impl Resolve for State { GetUpdate { id }: GetUpdate, user: User, ) -> anyhow::Result { - let update = find_one_by_id(&db_client().await.updates, &id) + let update = find_one_by_id(&db_client().updates, &id) .await .context("failed to query to db")? .context("no update exists with given id")?; diff --git a/bin/core/src/api/read/user.rs b/bin/core/src/api/read/user.rs index f38c69cb5..3bb944359 100644 --- a/bin/core/src/api/read/user.rs +++ b/bin/core/src/api/read/user.rs @@ -26,7 +26,7 @@ impl Resolve for State { GetUsername { user_id }: GetUsername, _: User, ) -> anyhow::Result { - let user = find_one_by_id(&db_client().await.users, &user_id) + let user = find_one_by_id(&db_client().users, &user_id) .await .context("failed at mongo query for user")? .context("no user found with id")?; @@ -67,7 +67,7 @@ impl Resolve for State { return Err(anyhow!("this route is only accessable by admins")); } let mut users = find_collect( - &db_client().await.users, + &db_client().users, None, FindOptions::builder().sort(doc! { "username": 1 }).build(), ) @@ -85,7 +85,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { let api_keys = find_collect( - &db_client().await.api_keys, + &db_client().api_keys, doc! { "user_id": &user.id }, FindOptions::builder().sort(doc! { "name": 1 }).build(), ) @@ -117,7 +117,7 @@ impl Resolve for State { return Err(anyhow!("Given user is not service user")); }; let api_keys = find_collect( - &db_client().await.api_keys, + &db_client().api_keys, doc! { "user_id": &user.id }, None, ) diff --git a/bin/core/src/api/read/user_group.rs b/bin/core/src/api/read/user_group.rs index 4cb3aabde..363332eaa 100644 --- a/bin/core/src/api/read/user_group.rs +++ b/bin/core/src/api/read/user_group.rs @@ -35,7 +35,6 @@ impl Resolve for State { filter.insert("users", &user.id); } db_client() - .await .user_groups .find_one(filter) .await @@ -55,7 +54,7 @@ impl Resolve for State { filter.insert("users", &user.id); } find_collect( - &db_client().await.user_groups, + &db_client().user_groups, filter, FindOptions::builder().sort(doc! { "name": 1 }).build(), ) diff --git a/bin/core/src/api/read/variable.rs b/bin/core/src/api/read/variable.rs index 90acfc77b..9fa42c517 100644 --- a/bin/core/src/api/read/variable.rs +++ b/bin/core/src/api/read/variable.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use mongo_indexed::doc; use komodo_client::{ api::read::{ GetVariable, GetVariableResponse, ListVariables, @@ -7,6 +6,7 @@ use komodo_client::{ }, entities::user::User, }; +use mongo_indexed::doc; use mungos::{find::find_collect, mongodb::options::FindOptions}; use resolver_api::Resolve; @@ -37,7 +37,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { let variables = find_collect( - &db_client().await.variables, + &db_client().variables, None, FindOptions::builder().sort(doc! { "name": 1 }).build(), ) diff --git a/bin/core/src/api/user.rs b/bin/core/src/api/user.rs index c6372dfde..8d6bd527c 100644 --- a/bin/core/src/api/user.rs +++ b/bin/core/src/api/user.rs @@ -103,7 +103,7 @@ impl Resolve for State { } }; update_one_by_id( - &db_client().await.users, + &db_client().users, &user.id, mungos::update::Update::Set(update), None, @@ -129,7 +129,7 @@ impl Resolve for State { user: User, ) -> anyhow::Result { update_one_by_id( - &db_client().await.users, + &db_client().users, &user.id, mungos::update::Update::Set(doc! { "last_update_view": komodo_timestamp() @@ -172,7 +172,6 @@ impl Resolve for State { expires, }; db_client() - .await .api_keys .insert_one(api_key) .await @@ -192,7 +191,7 @@ impl Resolve for State { DeleteApiKey { key }: DeleteApiKey, user: User, ) -> anyhow::Result { - let client = db_client().await; + let client = db_client(); let key = client .api_keys .find_one(doc! { "key": &key }) diff --git a/bin/core/src/api/write/build.rs b/bin/core/src/api/write/build.rs index 397004ca5..72a83857c 100644 --- a/bin/core/src/api/write/build.rs +++ b/bin/core/src/api/write/build.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, Context}; -use mongo_indexed::doc; +use git::GitRes; use komodo_client::{ api::write::*, entities::{ @@ -10,6 +10,7 @@ use komodo_client::{ CloneArgs, NoData, }, }; +use mongo_indexed::doc; use mungos::mongodb::bson::to_document; use octorust::types::{ ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig, @@ -18,7 +19,7 @@ use resolver_api::Resolve; use crate::{ config::core_config, - helpers::{git_token, random_string}, + helpers::git_token, resource, state::{db_client, github_client, State}, }; @@ -96,40 +97,40 @@ impl Resolve for State { ) .await?; - if build.config.repo.is_empty() { + if build.config.repo.is_empty() + || build.config.git_provider.is_empty() + { // Nothing to do here - return Ok(NoData {}) + return Ok(NoData {}); } let config = core_config(); - let repo_dir = config.repo_directory.join(random_string(10)); let mut clone_args: CloneArgs = (&build).into(); + let repo_path = + clone_args.unique_path(&core_config().repo_directory)?; + clone_args.destination = Some(repo_path.display().to_string()); // Don't want to run these on core. clone_args.on_clone = None; clone_args.on_pull = None; - clone_args.destination = Some(repo_dir.display().to_string()); - let access_token = match (&clone_args.account, &clone_args.provider) - { - (None, _) => None, - (Some(_), None) => { - return Err(anyhow!( - "Account is configured, but provider is empty" - )) - } - (Some(username), Some(provider)) => { - git_token(provider, username, |https| { + let access_token = if let Some(username) = &clone_args.account { + git_token(&clone_args.provider, username, |https| { clone_args.https = https }) .await .with_context( - || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + || format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider), )? - } + } else { + None }; - let (_, latest_hash, latest_message, _) = git::clone( + let GitRes { + hash: latest_hash, + message: latest_message, + .. + } = git::pull_or_clone( clone_args, &config.repo_directory, access_token, @@ -153,7 +154,6 @@ impl Resolve for State { .context("failed to serialize build info to bson")?; db_client() - .await .builds .update_one( doc! { "name": &build.name }, @@ -162,12 +162,6 @@ impl Resolve for State { .await .context("failed to update build info on db")?; - if repo_dir.exists() { - if let Err(e) = std::fs::remove_dir_all(&repo_dir) { - warn!("failed to remove build cache update repo directory | {e:?}") - } - } - Ok(NoData {}) } } @@ -232,7 +226,11 @@ impl Resolve for State { &build.config.webhook_secret }; - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = format!("{host}/listener/github/build/{}", build.id); for webhook in webhooks { @@ -338,7 +336,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = format!("{host}/listener/github/build/{}", build.id); for webhook in webhooks { diff --git a/bin/core/src/api/write/deployment.rs b/bin/core/src/api/write/deployment.rs index c7dea9cca..a6e94ba1b 100644 --- a/bin/core/src/api/write/deployment.rs +++ b/bin/core/src/api/write/deployment.rs @@ -116,7 +116,7 @@ impl Resolve for State { make_update(&deployment, Operation::RenameDeployment, &user); update_one_by_id( - &db_client().await.deployments, + &db_client().deployments, &deployment.id, mungos::update::Update::Set( doc! { "name": &name, "updated_at": komodo_timestamp() }, diff --git a/bin/core/src/api/write/mod.rs b/bin/core/src/api/write/mod.rs index ab22bece6..62a4dcc1a 100644 --- a/bin/core/src/api/write/mod.rs +++ b/bin/core/src/api/write/mod.rs @@ -55,6 +55,7 @@ pub enum WriteRequest { SetUsersInUserGroup(SetUsersInUserGroup), // ==== PERMISSIONS ==== + UpdateUserAdmin(UpdateUserAdmin), UpdateUserBasePermissions(UpdateUserBasePermissions), UpdatePermissionOnResourceType(UpdatePermissionOnResourceType), UpdatePermissionOnTarget(UpdatePermissionOnTarget), @@ -124,6 +125,7 @@ pub enum WriteRequest { DeleteResourceSync(DeleteResourceSync), UpdateResourceSync(UpdateResourceSync), RefreshResourceSyncPending(RefreshResourceSyncPending), + CommitSync(CommitSync), CreateSyncWebhook(CreateSyncWebhook), DeleteSyncWebhook(DeleteSyncWebhook), @@ -133,6 +135,7 @@ pub enum WriteRequest { DeleteStack(DeleteStack), UpdateStack(UpdateStack), RenameStack(RenameStack), + WriteStackFileContents(WriteStackFileContents), RefreshStackCache(RefreshStackCache), CreateStackWebhook(CreateStackWebhook), DeleteStackWebhook(DeleteStackWebhook), diff --git a/bin/core/src/api/write/permissions.rs b/bin/core/src/api/write/permissions.rs index 2614b7d3f..466adcc29 100644 --- a/bin/core/src/api/write/permissions.rs +++ b/bin/core/src/api/write/permissions.rs @@ -5,7 +5,8 @@ use komodo_client::{ api::write::{ UpdatePermissionOnResourceType, UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget, - UpdatePermissionOnTargetResponse, UpdateUserBasePermissions, + UpdatePermissionOnTargetResponse, UpdateUserAdmin, + UpdateUserAdminResponse, UpdateUserBasePermissions, UpdateUserBasePermissionsResponse, }, entities::{ @@ -28,6 +29,40 @@ use crate::{ state::{db_client, State}, }; +impl Resolve for State { + async fn resolve( + &self, + UpdateUserAdmin { user_id, admin }: UpdateUserAdmin, + super_admin: User, + ) -> anyhow::Result { + if !super_admin.super_admin { + return Err(anyhow!("Only super admins can call this method.")); + } + let user = find_one_by_id(&db_client().users, &user_id) + .await + .context("failed to query mongo for user")? + .context("did not find user with given id")?; + + if !user.enabled { + return Err(anyhow!("User is disabled. Enable user first.")); + } + + if user.super_admin { + return Err(anyhow!("Cannot update other super admins")); + } + + update_one_by_id( + &db_client().users, + &user_id, + doc! { "$set": { "admin": admin } }, + None, + ) + .await?; + + Ok(UpdateUserAdminResponse {}) + } +} + impl Resolve for State { #[instrument(name = "UpdateUserBasePermissions", skip(self, admin))] async fn resolve( @@ -44,13 +79,18 @@ impl Resolve for State { return Err(anyhow!("this method is admin only")); } - let user = find_one_by_id(&db_client().await.users, &user_id) + let user = find_one_by_id(&db_client().users, &user_id) .await .context("failed to query mongo for user")? .context("did not find user with given id")?; - if user.admin { + if user.super_admin { return Err(anyhow!( - "cannot use this method to update other admins permissions" + "Cannot use this method to update super admins permissions" + )); + } + if user.admin && !admin.super_admin { + return Err(anyhow!( + "Only super admins can use this method to update other admins permissions" )); } let mut update_doc = Document::new(); @@ -65,7 +105,7 @@ impl Resolve for State { } update_one_by_id( - &db_client().await.users, + &db_client().users, &user_id, mungos::update::Update::Set(update_doc), None, @@ -119,7 +159,6 @@ impl Resolve for State { match user_target_variant { UserTargetVariant::User => { db_client() - .await .users .update_one(filter, update) .await @@ -129,7 +168,6 @@ impl Resolve for State { } UserTargetVariant::UserGroup => { db_client() - .await .user_groups .update_one(filter, update) .await @@ -181,7 +219,6 @@ impl Resolve for State { (user_target_variant.as_ref(), resource_variant.as_ref()); db_client() - .await .permissions .update_one( doc! { @@ -218,7 +255,6 @@ async fn extract_user_target_with_validation( Err(_) => doc! { "username": ident }, }; let id = db_client() - .await .users .find_one(filter) .await @@ -233,7 +269,6 @@ async fn extract_user_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .user_groups .find_one(filter) .await @@ -260,7 +295,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .builds .find_one(filter) .await @@ -275,7 +309,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .builders .find_one(filter) .await @@ -290,7 +323,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .deployments .find_one(filter) .await @@ -305,7 +337,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .servers .find_one(filter) .await @@ -320,7 +351,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .repos .find_one(filter) .await @@ -335,7 +365,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .alerters .find_one(filter) .await @@ -350,7 +379,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .procedures .find_one(filter) .await @@ -365,7 +393,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .server_templates .find_one(filter) .await @@ -380,7 +407,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .resource_syncs .find_one(filter) .await @@ -395,7 +421,6 @@ async fn extract_resource_target_with_validation( Err(_) => doc! { "name": ident }, }; let id = db_client() - .await .stacks .find_one(filter) .await diff --git a/bin/core/src/api/write/provider.rs b/bin/core/src/api/write/provider.rs index 5b07ebf99..1b5f20f19 100644 --- a/bin/core/src/api/write/provider.rs +++ b/bin/core/src/api/write/provider.rs @@ -47,7 +47,6 @@ impl Resolve for State { ); account.id = db_client() - .await .git_accounts .insert_one(&account) .await @@ -118,7 +117,7 @@ impl Resolve for State { let account = to_document(&account).context( "failed to serialize partial git provider account to bson", )?; - let db = db_client().await; + let db = db_client(); update_one_by_id( &db.git_accounts, &id, @@ -175,7 +174,7 @@ impl Resolve for State { &user, ); - let db = db_client().await; + let db = db_client(); let Some(account) = find_one_by_id(&db.git_accounts, &id) .await @@ -237,7 +236,6 @@ impl Resolve for State { ); account.id = db_client() - .await .registry_accounts .insert_one(&account) .await @@ -310,7 +308,7 @@ impl Resolve for State { "failed to serialize partial docker registry account account to bson", )?; - let db = db_client().await; + let db = db_client(); update_one_by_id( &db.registry_accounts, &id, @@ -368,7 +366,7 @@ impl Resolve for State { &user, ); - let db = db_client().await; + let db = db_client(); let Some(account) = find_one_by_id(&db.registry_accounts, &id) .await .context("failed to query db for git accounts")? diff --git a/bin/core/src/api/write/repo.rs b/bin/core/src/api/write/repo.rs index 546679447..33d2790ab 100644 --- a/bin/core/src/api/write/repo.rs +++ b/bin/core/src/api/write/repo.rs @@ -1,5 +1,5 @@ use anyhow::{anyhow, Context}; -use mongo_indexed::doc; +use git::GitRes; use komodo_client::{ api::write::*, entities::{ @@ -10,6 +10,7 @@ use komodo_client::{ CloneArgs, NoData, }, }; +use mongo_indexed::doc; use mungos::mongodb::bson::to_document; use octorust::types::{ ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig, @@ -18,7 +19,7 @@ use resolver_api::Resolve; use crate::{ config::core_config, - helpers::{git_token, random_string}, + helpers::git_token, resource, state::{db_client, github_client, State}, }; @@ -94,42 +95,36 @@ impl Resolve for State { ) .await?; - if repo.config.repo.is_empty() { + if repo.config.git_provider.is_empty() + || repo.config.repo.is_empty() + { // Nothing to do return Ok(NoData {}); } - let config = core_config(); - - let repo_dir = config.repo_directory.join(random_string(10)); let mut clone_args: CloneArgs = (&repo).into(); - // No reason to to the commands here. + let repo_path = + clone_args.unique_path(&core_config().repo_directory)?; + clone_args.destination = Some(repo_path.display().to_string()); + // Don't want to run these on core. clone_args.on_clone = None; clone_args.on_pull = None; - clone_args.destination = Some(repo_dir.display().to_string()); - let access_token = match (&clone_args.account, &clone_args.provider) - { - (None, _) => None, - (Some(_), None) => { - return Err(anyhow!( - "Account is configured, but provider is empty" - )) - } - (Some(username), Some(provider)) => { - git_token(provider, username, |https| { + let access_token = if let Some(username) = &clone_args.account { + git_token(&clone_args.provider, username, |https| { clone_args.https = https }) .await .with_context( - || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + || format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider), )? - } + } else { + None }; - let (_, latest_hash, latest_message, _) = git::clone( + let GitRes { hash, message, .. } = git::pull_or_clone( clone_args, - &config.repo_directory, + &core_config().repo_directory, access_token, &[], "", @@ -137,22 +132,23 @@ impl Resolve for State { &[], ) .await - .context("failed to clone repo (the resource) repo")?; + .with_context(|| { + format!("Failed to update repo at {repo_path:?}") + })?; let info = RepoInfo { last_pulled_at: repo.info.last_pulled_at, last_built_at: repo.info.last_built_at, built_hash: repo.info.built_hash, built_message: repo.info.built_message, - latest_hash, - latest_message, + latest_hash: hash, + latest_message: message, }; let info = to_document(&info) .context("failed to serialize repo info to bson")?; db_client() - .await .repos .update_one( doc! { "name": &repo.name }, @@ -161,14 +157,6 @@ impl Resolve for State { .await .context("failed to update repo info on db")?; - if repo_dir.exists() { - if let Err(e) = std::fs::remove_dir_all(&repo_dir) { - warn!( - "failed to remove repo (resource) cache update repo directory | {e:?}" - ) - } - } - Ok(NoData {}) } } @@ -233,7 +221,11 @@ impl Resolve for State { &repo.config.webhook_secret }; - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { RepoWebhookAction::Clone => { format!("{host}/listener/github/repo/{}/clone", repo.id) @@ -350,7 +342,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { RepoWebhookAction::Clone => { format!("{host}/listener/github/repo/{}/clone", repo.id) diff --git a/bin/core/src/api/write/server.rs b/bin/core/src/api/write/server.rs index c6422989d..797833adb 100644 --- a/bin/core/src/api/write/server.rs +++ b/bin/core/src/api/write/server.rs @@ -73,7 +73,7 @@ impl Resolve for State { let mut update = make_update(&server, Operation::RenameServer, &user); - update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None) + update_one_by_id(&db_client().servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None) .await .context("failed to update server on db. this name may already be taken.")?; update.push_simple_log( diff --git a/bin/core/src/api/write/service_user.rs b/bin/core/src/api/write/service_user.rs index adfeeb050..e23884656 100644 --- a/bin/core/src/api/write/service_user.rs +++ b/bin/core/src/api/write/service_user.rs @@ -48,6 +48,7 @@ impl Resolve for State { config, enabled: true, admin: false, + super_admin: false, create_server_permissions: false, create_build_permissions: false, last_update_view: 0, @@ -56,7 +57,6 @@ impl Resolve for State { updated_at: komodo_timestamp(), }; user.id = db_client() - .await .users .insert_one(&user) .await @@ -85,7 +85,7 @@ impl Resolve for State { if !user.admin { return Err(anyhow!("user not admin")); } - let db = db_client().await; + let db = db_client(); let service_user = db .users .find_one(doc! { "username": &username }) @@ -124,11 +124,10 @@ impl Resolve for State { if !user.admin { return Err(anyhow!("user not admin")); } - let service_user = - find_one_by_id(&db_client().await.users, &user_id) - .await - .context("failed to query db for user")? - .context("no user found with id")?; + let service_user = find_one_by_id(&db_client().users, &user_id) + .await + .context("failed to query db for user")? + .context("no user found with id")?; let UserConfig::Service { .. } = &service_user.config else { return Err(anyhow!("user is not service user")); }; @@ -148,7 +147,7 @@ impl Resolve for State { if !user.admin { return Err(anyhow!("user not admin")); } - let db = db_client().await; + let db = db_client(); let api_key = db .api_keys .find_one(doc! { "key": &key }) @@ -156,7 +155,7 @@ impl Resolve for State { .context("failed to query db for api key")? .context("did not find matching api key")?; let service_user = - find_one_by_id(&db_client().await.users, &api_key.user_id) + find_one_by_id(&db_client().users, &api_key.user_id) .await .context("failed to query db for user")? .context("no user found with id")?; diff --git a/bin/core/src/api/write/stack.rs b/bin/core/src/api/write/stack.rs index c5c62ecd9..5ad0610af 100644 --- a/bin/core/src/api/write/stack.rs +++ b/bin/core/src/api/write/stack.rs @@ -7,10 +7,10 @@ use komodo_client::{ komodo_timestamp, permission::PermissionLevel, server::ServerState, - stack::{ComposeContents, PartialStackConfig, Stack, StackInfo}, + stack::{PartialStackConfig, Stack, StackInfo}, update::Update, - user::User, - NoData, Operation, + user::{stack_user, User}, + FileContents, NoData, Operation, }, }; use mungos::{ @@ -22,6 +22,7 @@ use octorust::types::{ }; use periphery_client::api::compose::{ GetComposeContentsOnHost, GetComposeContentsOnHostResponse, + WriteComposeContentsToHost, }; use resolver_api::Resolve; @@ -30,13 +31,14 @@ use crate::{ helpers::{ periphery_client, query::get_server_with_state, - stack::{ - remote::get_remote_compose_contents, - services::extract_services_into_res, - }, update::{add_update, make_update}, }, resource, + stack::{ + get_stack_and_server, + remote::{get_remote_compose_contents, RemoteComposeContents}, + services::extract_services_into_res, + }, state::{db_client, github_client, State}, }; @@ -109,7 +111,7 @@ impl Resolve for State { make_update(&stack, Operation::RenameStack, &user); update_one_by_id( - &db_client().await.stacks, + &db_client().stacks, &stack.id, mungos::update::Update::Set( doc! { "name": &name, "updated_at": komodo_timestamp() }, @@ -131,6 +133,79 @@ impl Resolve for State { } } +impl Resolve for State { + async fn resolve( + &self, + WriteStackFileContents { + stack, + file_path, + contents, + }: WriteStackFileContents, + user: User, + ) -> anyhow::Result { + let (stack, server) = get_stack_and_server( + &stack, + &user, + PermissionLevel::Write, + true, + ) + .await?; + + if !stack.config.files_on_host { + return Err(anyhow!( + "Stack is not configured to use files on host, can't write file contents" + )); + } + + let mut update = + make_update(&stack, Operation::WriteStackContents, &user); + + update.push_simple_log("File contents to write", &contents); + + match periphery_client(&server)? + .request(WriteComposeContentsToHost { + name: stack.name, + run_directory: stack.config.run_directory, + file_path, + contents, + }) + .await + .context("Failed to write contents to host") + { + Ok(log) => { + update.logs.push(log); + } + Err(e) => { + update.push_error_log( + "Write file contents", + format_serror(&e.into()), + ); + } + }; + + if let Err(e) = State + .resolve( + RefreshStackCache { stack: stack.id }, + stack_user().to_owned(), + ) + .await + .context( + "Failed to refresh stack cache after writing file contents", + ) + { + update.push_error_log( + "Refresh stack cache", + format_serror(&e.into()), + ); + } + + update.finalize(); + add_update(update.clone()).await?; + + Ok(update) + } +} + impl Resolve for State { #[instrument( name = "RefreshStackCache", @@ -195,7 +270,7 @@ impl Resolve for State { Ok(res) => res, Err(e) => GetComposeContentsOnHostResponse { contents: Default::default(), - errors: vec![ComposeContents { + errors: vec![FileContents { path: stack.config.run_directory.clone(), contents: format_serror(&e.into()), }], @@ -226,16 +301,16 @@ impl Resolve for State { // ================ // REPO BASED STACK // ================ - let ( - remote_contents, - remote_errors, - _, - latest_hash, - latest_message, - ) = + let RemoteComposeContents { + successful: remote_contents, + errored: remote_errors, + hash: latest_hash, + message: latest_message, + .. + } = get_remote_compose_contents(&stack, Some(&mut missing_files)) - .await - .context("failed to clone remote compose file")?; + .await?; + let project_name = stack.project_name(true); let mut services = Vec::new(); @@ -298,7 +373,6 @@ impl Resolve for State { .context("failed to serialize stack info to bson")?; db_client() - .await .stacks .update_one( doc! { "name": &stack.name }, @@ -371,7 +445,11 @@ impl Resolve for State { &stack.config.webhook_secret }; - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { StackWebhookAction::Refresh => { format!("{host}/listener/github/stack/{}/refresh", stack.id) @@ -485,7 +563,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { StackWebhookAction::Refresh => { format!("{host}/listener/github/stack/{}/refresh", stack.id) diff --git a/bin/core/src/api/write/sync.rs b/bin/core/src/api/write/sync.rs index e9d2ac0cc..6e7b3a067 100644 --- a/bin/core/src/api/write/sync.rs +++ b/bin/core/src/api/write/sync.rs @@ -1,9 +1,9 @@ -use std::collections::HashMap; +use std::{collections::HashMap, path::PathBuf}; use anyhow::{anyhow, Context}; use formatting::format_serror; use komodo_client::{ - api::write::*, + api::{read::ExportAllResourcesToToml, write::*}, entities::{ self, alert::{Alert, AlertData, SeverityLevel}, @@ -20,13 +20,11 @@ use komodo_client::{ server_template::ServerTemplate, stack::Stack, sync::{ - PartialResourceSyncConfig, PendingSyncUpdates, - PendingSyncUpdatesData, PendingSyncUpdatesDataErr, - PendingSyncUpdatesDataOk, ResourceSync, + PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo, }, - ResourceTarget, - user::User, - NoData, + update::Log, + user::{sync_user, User}, + NoData, Operation, ResourceTarget, }, }; use mungos::{ @@ -39,17 +37,18 @@ use octorust::types::{ use resolver_api::Resolve; use crate::{ + alert::send_alerts, config::core_config, helpers::{ - alert::send_alerts, query::get_id_to_tags, - sync::{ - deploy::SyncDeployParams, - resource::{get_updates_for_view, AllResourcesById}, - }, + update::{add_update, make_update, update_update}, }, - resource, + resource::{self, refresh_resource_sync_state_cache}, state::{db_client, github_client, State}, + sync::{ + deploy::SyncDeployParams, remote::RemoteResources, + view::push_updates_for_view, AllResourcesById, + }, }; impl Resolve for State { @@ -117,21 +116,44 @@ impl Resolve for State { ) -> anyhow::Result { // Even though this is a write request, this doesn't change any config. Anyone that can execute the // sync should be able to do this. - let sync = resource::get_check_permissions::< + let mut sync = resource::get_check_permissions::< entities::sync::ResourceSync, >(&sync, &user, PermissionLevel::Execute) .await?; - if sync.config.repo.is_empty() { - return Err(anyhow!("resource sync repo not configured")); + if !sync.config.managed + && !sync.config.files_on_host + && sync.config.file_contents.is_empty() + && sync.config.repo.is_empty() + { + // Sync not configured, nothing to refresh + return Ok(sync); } let res = async { - let (res, _, hash, message) = - crate::helpers::sync::remote::get_remote_resources(&sync) - .await - .context("failed to get remote resources")?; - let resources = res?; + let RemoteResources { + resources, + files, + file_errors, + hash, + message, + .. + } = crate::sync::remote::get_remote_resources(&sync) + .await + .context("failed to get remote resources")?; + + sync.info.remote_contents = files; + sync.info.remote_errors = file_errors; + sync.info.pending_hash = hash; + sync.info.pending_message = message; + + if !sync.info.remote_errors.is_empty() { + return Err(anyhow!( + "Remote resources have errors. Cannot compute diffs." + )); + } + + let resources = resources?; let id_to_tags = get_id_to_tags(None).await?; let all_resources = AllResourcesById::load().await?; @@ -150,155 +172,182 @@ impl Resolve for State { .collect::>(); let deploy_updates = - crate::helpers::sync::deploy::get_updates_for_view( - SyncDeployParams { - deployments: &resources.deployments, - deployment_map: &deployments_by_name, - stacks: &resources.stacks, - stack_map: &stacks_by_name, - all_resources: &all_resources, - }, - ) + crate::sync::deploy::get_updates_for_view(SyncDeployParams { + deployments: &resources.deployments, + deployment_map: &deployments_by_name, + stacks: &resources.stacks, + stack_map: &stacks_by_name, + all_resources: &all_resources, + }) .await; - let data = PendingSyncUpdatesDataOk { - server_updates: get_updates_for_view::( + let delete = sync.config.managed || sync.config.delete; + + let mut diffs = Vec::new(); + + { + push_updates_for_view::( resources.servers, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get server updates")?, - deployment_updates: get_updates_for_view::( - resources.deployments, - sync.config.delete, - &all_resources, - &id_to_tags, - ) - .await - .context("failed to get deployment updates")?, - stack_updates: get_updates_for_view::( + .await?; + push_updates_for_view::( resources.stacks, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get stack updates")?, - build_updates: get_updates_for_view::( + .await?; + push_updates_for_view::( + resources.deployments, + delete, + &all_resources, + &id_to_tags, + &sync.config.match_tags, + &mut diffs, + ) + .await?; + push_updates_for_view::( resources.builds, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get build updates")?, - repo_updates: get_updates_for_view::( + .await?; + push_updates_for_view::( resources.repos, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get repo updates")?, - procedure_updates: get_updates_for_view::( + .await?; + push_updates_for_view::( resources.procedures, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get procedure updates")?, - alerter_updates: get_updates_for_view::( - resources.alerters, - sync.config.delete, - &all_resources, - &id_to_tags, - ) - .await - .context("failed to get alerter updates")?, - builder_updates: get_updates_for_view::( + .await?; + push_updates_for_view::( resources.builders, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get builder updates")?, - server_template_updates: - get_updates_for_view::( - resources.server_templates, - sync.config.delete, - &all_resources, - &id_to_tags, - ) - .await - .context("failed to get server template updates")?, - resource_sync_updates: get_updates_for_view::< - entities::sync::ResourceSync, - >( + .await?; + push_updates_for_view::( + resources.alerters, + delete, + &all_resources, + &id_to_tags, + &sync.config.match_tags, + &mut diffs, + ) + .await?; + push_updates_for_view::( + resources.server_templates, + delete, + &all_resources, + &id_to_tags, + &sync.config.match_tags, + &mut diffs, + ) + .await?; + push_updates_for_view::( resources.resource_syncs, - sync.config.delete, + delete, &all_resources, &id_to_tags, + &sync.config.match_tags, + &mut diffs, ) - .await - .context("failed to get resource sync updates")?, - variable_updates: - crate::helpers::sync::variables::get_updates_for_view( - resources.variables, - sync.config.delete, - ) - .await - .context("failed to get variable updates")?, - user_group_updates: - crate::helpers::sync::user_groups::get_updates_for_view( - resources.user_groups, - sync.config.delete, - &all_resources, - ) - .await - .context("failed to get user group updates")?, + .await?; + } + + let variable_updates = + crate::sync::variables::get_updates_for_view( + &resources.variables, + // Delete doesn't work with variables when match tags are set + sync.config.match_tags.is_empty() && delete, + ) + .await?; + + let user_group_updates = + crate::sync::user_groups::get_updates_for_view( + resources.user_groups, + // Delete doesn't work with user groups when match tags are set + sync.config.match_tags.is_empty() && delete, + &all_resources, + ) + .await?; + + anyhow::Ok(( + diffs, deploy_updates, - }; - anyhow::Ok((hash, message, data)) + variable_updates, + user_group_updates, + )) } .await; - let (pending, has_updates) = match res { - Ok((hash, message, data)) => { - let has_updates = !data.no_updates(); - ( - PendingSyncUpdates { - hash: Some(hash), - message: Some(message), - data: PendingSyncUpdatesData::Ok(data), - }, - has_updates, - ) - } + let ( + resource_updates, + deploy_updates, + variable_updates, + user_group_updates, + pending_error, + ) = match res { + Ok(res) => (res.0, res.1, res.2, res.3, None), Err(e) => ( - PendingSyncUpdates { - hash: None, - message: None, - data: PendingSyncUpdatesData::Err( - PendingSyncUpdatesDataErr { - message: format_serror(&e.into()), - }, - ), - }, - false, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Some(format_serror(&e.into())), ), }; - let pending = to_document(&pending) + let has_updates = !resource_updates.is_empty() + || !deploy_updates.to_deploy == 0 + || !variable_updates.is_empty() + || !user_group_updates.is_empty(); + + let info = ResourceSyncInfo { + last_sync_ts: sync.info.last_sync_ts, + last_sync_hash: sync.info.last_sync_hash, + last_sync_message: sync.info.last_sync_message, + remote_contents: sync.info.remote_contents, + remote_errors: sync.info.remote_errors, + pending_hash: sync.info.pending_hash, + pending_message: sync.info.pending_message, + pending_deploy: deploy_updates, + resource_updates, + variable_updates, + user_group_updates, + pending_error, + }; + + let info = to_document(&info) .context("failed to serialize pending to document")?; update_one_by_id( - &db_client().await.resource_syncs, + &db_client().resource_syncs, &sync.id, - doc! { "$set": { "info.pending": pending } }, + doc! { "$set": { "info": info } }, None, ) .await?; @@ -307,9 +356,8 @@ impl Resolve for State { let id = sync.id.clone(); let name = sync.name.clone(); tokio::task::spawn(async move { - let db = db_client().await; + let db = db_client(); let Some(existing) = db_client() - .await .alerts .find_one(doc! { "resolved": false, @@ -370,6 +418,135 @@ impl Resolve for State { } } +impl Resolve for State { + #[instrument(name = "CommitSync", skip(self, user))] + async fn resolve( + &self, + CommitSync { sync }: CommitSync, + user: User, + ) -> anyhow::Result { + let sync = resource::get_check_permissions::< + entities::sync::ResourceSync, + >(&sync, &user, PermissionLevel::Write) + .await?; + + let fresh_sync = !sync.config.files_on_host + && sync.config.file_contents.is_empty() + && sync.config.repo.is_empty(); + + if !sync.config.managed && !fresh_sync { + return Err(anyhow!( + "Cannot commit to sync. Enabled 'managed' mode." + )); + } + + let res = State + .resolve( + ExportAllResourcesToToml { + tags: sync.config.match_tags, + }, + sync_user().to_owned(), + ) + .await?; + + let mut update = make_update( + ResourceTarget::ResourceSync(sync.id), + Operation::CommitSync, + &user, + ); + update.id = add_update(update.clone()).await?; + + if sync.config.files_on_host { + let path = sync + .config + .resource_path + .parse::() + .context("Resource path is not valid file path")?; + let extension = path + .extension() + .context("Resource path missing '.toml' extension")?; + if extension != "toml" { + return Err(anyhow!("Wrong file extension. Expected '.toml', got '.{extension:?}'")); + } + if let Some(parent) = path.parent() { + let _ = tokio::fs::create_dir_all(&parent).await; + }; + if let Err(e) = + tokio::fs::write(&sync.config.resource_path, &res.toml) + .await + .with_context(|| { + format!( + "Failed to write resource file to {}", + sync.config.resource_path + ) + }) + { + update.push_error_log( + "Write resource file", + format_serror(&e.into()), + ); + update.finalize(); + add_update(update).await?; + return resource::get::(&sync.name).await; + } + } else if let Err(e) = db_client() + .resource_syncs + .update_one( + doc! { "name": &sync.name }, + doc! { "$set": { "config.file_contents": &res.toml } }, + ) + .await + .context("failed to update file_contents on db") + { + update.push_error_log( + "Write resource to database", + format_serror(&e.into()), + ); + update.finalize(); + add_update(update).await?; + return resource::get::(&sync.name).await; + } + + update + .logs + .push(Log::simple("Committed resources", res.toml)); + + let res = match State + .resolve(RefreshResourceSyncPending { sync: sync.name }, user) + .await + { + Ok(sync) => Ok(sync), + Err(e) => { + update.push_error_log( + "Refresh sync pending", + format_serror(&(&e).into()), + ); + Err(e) + } + }; + + update.finalize(); + + // Need to manually update the update before cache refresh, + // and before broadcast with add_update. + // The Err case of to_document should be unreachable, + // but will fail to update cache in that case. + if let Ok(update_doc) = to_document(&update) { + let _ = update_one_by_id( + &db_client().updates, + &update.id, + mungos::update::Update::Set(update_doc), + None, + ) + .await; + refresh_resource_sync_state_cache().await; + } + update_update(update).await?; + + res + } +} + impl Resolve for State { #[instrument(name = "CreateSyncWebhook", skip(self, user))] async fn resolve( @@ -430,7 +607,11 @@ impl Resolve for State { &sync.config.webhook_secret }; - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { SyncWebhookAction::Refresh => { format!("{host}/listener/github/sync/{}/refresh", sync.id) @@ -544,7 +725,11 @@ impl Resolve for State { .. } = core_config(); - let host = webhook_base_url.as_ref().unwrap_or(host); + let host = if webhook_base_url.is_empty() { + host + } else { + webhook_base_url + }; let url = match action { SyncWebhookAction::Refresh => { format!("{host}/listener/github/sync/{}/refresh", sync.id) diff --git a/bin/core/src/api/write/tag.rs b/bin/core/src/api/write/tag.rs index 0389eccc0..071884e36 100644 --- a/bin/core/src/api/write/tag.rs +++ b/bin/core/src/api/write/tag.rs @@ -44,7 +44,6 @@ impl Resolve for State { }; tag.id = db_client() - .await .tags .insert_one(&tag) .await @@ -72,7 +71,7 @@ impl Resolve for State { get_tag_check_owner(&id, &user).await?; update_one_by_id( - &db_client().await.tags, + &db_client().tags, &id, doc! { "$set": { "name": name } }, None, @@ -105,7 +104,7 @@ impl Resolve for State { resource::remove_tag_from_all::(&id), )?; - delete_one_by_id(&db_client().await.tags, &id, None).await?; + delete_one_by_id(&db_client().tags, &id, None).await?; Ok(tag) } diff --git a/bin/core/src/api/write/user_group.rs b/bin/core/src/api/write/user_group.rs index 3d886d18c..95f34170a 100644 --- a/bin/core/src/api/write/user_group.rs +++ b/bin/core/src/api/write/user_group.rs @@ -33,7 +33,7 @@ impl Resolve for State { updated_at: komodo_timestamp(), name, }; - let db = db_client().await; + let db = db_client(); let id = db .user_groups .insert_one(user_group) @@ -59,7 +59,7 @@ impl Resolve for State { if !admin.admin { return Err(anyhow!("This call is admin-only")); } - let db = db_client().await; + let db = db_client(); update_one_by_id( &db.user_groups, &id, @@ -85,7 +85,7 @@ impl Resolve for State { return Err(anyhow!("This call is admin-only")); } - let db = db_client().await; + let db = db_client(); let ug = find_one_by_id(&db.user_groups, &id) .await @@ -118,7 +118,7 @@ impl Resolve for State { return Err(anyhow!("This call is admin-only")); } - let db = db_client().await; + let db = db_client(); let filter = match ObjectId::from_str(&user) { Ok(id) => doc! { "_id": id }, @@ -163,7 +163,7 @@ impl Resolve for State { return Err(anyhow!("This call is admin-only")); } - let db = db_client().await; + let db = db_client(); let filter = match ObjectId::from_str(&user) { Ok(id) => doc! { "_id": id }, @@ -205,7 +205,7 @@ impl Resolve for State { return Err(anyhow!("This call is admin-only")); } - let db = db_client().await; + let db = db_client(); let all_users = find_collect(&db.users, None, None) .await diff --git a/bin/core/src/api/write/variable.rs b/bin/core/src/api/write/variable.rs index 51eb269b3..fa47404fc 100644 --- a/bin/core/src/api/write/variable.rs +++ b/bin/core/src/api/write/variable.rs @@ -46,7 +46,6 @@ impl Resolve for State { }; db_client() - .await .variables .insert_one(&variable) .await @@ -86,7 +85,6 @@ impl Resolve for State { } db_client() - .await .variables .update_one( doc! { "name": &name }, @@ -133,7 +131,6 @@ impl Resolve for State { return Err(anyhow!("only admins can update variables")); } db_client() - .await .variables .update_one( doc! { "name": &name }, @@ -156,7 +153,6 @@ impl Resolve for State { return Err(anyhow!("only admins can update variables")); } db_client() - .await .variables .update_one( doc! { "name": &name }, @@ -179,7 +175,6 @@ impl Resolve for State { } let variable = get_variable(&name).await?; db_client() - .await .variables .delete_one(doc! { "name": &name }) .await diff --git a/bin/core/src/auth/github/mod.rs b/bin/core/src/auth/github/mod.rs index 1c638d792..0fa619c30 100644 --- a/bin/core/src/auth/github/mod.rs +++ b/bin/core/src/auth/github/mod.rs @@ -64,12 +64,12 @@ async fn callback( let github_user = client.get_github_user(&token.access_token).await?; let github_id = github_user.id.to_string(); - let db_client = db_client().await; + let db_client = db_client(); let user = db_client .users .find_one(doc! { "config.data.github_id": &github_id }) .await - .context("failed at find user query from mongo")?; + .context("failed at find user query from database")?; let jwt = match user { Some(user) => jwt_client() .generate(user.id) @@ -87,6 +87,7 @@ async fn callback( username: github_user.login, enabled: no_users_exist || core_config.enable_new_users, admin: no_users_exist, + super_admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, updated_at: ts, diff --git a/bin/core/src/auth/google/mod.rs b/bin/core/src/auth/google/mod.rs index 24af24083..45f6f50df 100644 --- a/bin/core/src/auth/google/mod.rs +++ b/bin/core/src/auth/google/mod.rs @@ -73,7 +73,7 @@ async fn callback( .await?; let google_user = client.get_google_user(&token.id_token)?; let google_id = google_user.id.to_string(); - let db_client = db_client().await; + let db_client = db_client(); let user = db_client .users .find_one(doc! { "config.data.google_id": &google_id }) @@ -102,6 +102,7 @@ async fn callback( .to_string(), enabled: no_users_exist || core_config.enable_new_users, admin: no_users_exist, + super_admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, updated_at: ts, diff --git a/bin/core/src/auth/local.rs b/bin/core/src/auth/local.rs index f6b52e379..84a53046b 100644 --- a/bin/core/src/auth/local.rs +++ b/bin/core/src/auth/local.rs @@ -3,7 +3,6 @@ use std::str::FromStr; use anyhow::{anyhow, Context}; use async_timing_util::unix_timestamp_ms; use axum::http::HeaderMap; -use mongo_indexed::Document; use komodo_client::{ api::auth::{ CreateLocalUser, CreateLocalUserResponse, LoginLocalUser, @@ -11,6 +10,7 @@ use komodo_client::{ }, entities::user::{User, UserConfig}, }; +use mongo_indexed::Document; use mungos::mongodb::bson::{doc, oid::ObjectId}; use resolver_api::Resolve; @@ -50,12 +50,8 @@ impl Resolve for State { let password = bcrypt::hash(password, BCRYPT_COST) .context("failed to hash password")?; - let no_users_exist = db_client() - .await - .users - .find_one(Document::new()) - .await? - .is_none(); + let no_users_exist = + db_client().users.find_one(Document::new()).await?.is_none(); if !no_users_exist && core_config.disable_user_registration { return Err(anyhow!("User registration is disabled")); @@ -68,6 +64,7 @@ impl Resolve for State { username, enabled: no_users_exist || core_config.enable_new_users, admin: no_users_exist, + super_admin: no_users_exist, create_server_permissions: no_users_exist, create_build_permissions: no_users_exist, updated_at: ts, @@ -78,7 +75,6 @@ impl Resolve for State { }; let user_id = db_client() - .await .users .insert_one(user) .await @@ -108,7 +104,6 @@ impl Resolve for State { } let user = db_client() - .await .users .find_one(doc! { "username": &username }) .await diff --git a/bin/core/src/auth/mod.rs b/bin/core/src/auth/mod.rs index 229368fcd..d7c6d3441 100644 --- a/bin/core/src/auth/mod.rs +++ b/bin/core/src/auth/mod.rs @@ -21,14 +21,15 @@ use self::jwt::JwtClaims; pub mod github; pub mod google; pub mod jwt; +pub mod oidc; mod local; const STATE_PREFIX_LENGTH: usize = 20; -#[derive(Deserialize)] -pub struct RedirectQuery { - pub redirect: Option, +#[derive(Debug, Deserialize)] +struct RedirectQuery { + redirect: Option, } #[instrument(level = "debug")] @@ -116,7 +117,6 @@ pub async fn auth_api_key_get_user_id( secret: &str, ) -> anyhow::Result { let key = db_client() - .await .api_keys .find_one(doc! { "key": key }) .await diff --git a/bin/core/src/auth/oidc/client.rs b/bin/core/src/auth/oidc/client.rs new file mode 100644 index 000000000..63741558b --- /dev/null +++ b/bin/core/src/auth/oidc/client.rs @@ -0,0 +1,69 @@ +use std::sync::OnceLock; + +use anyhow::Context; +use openidconnect::{ + core::{CoreClient, CoreProviderMetadata}, + reqwest::async_http_client, + ClientId, ClientSecret, IssuerUrl, RedirectUrl, +}; + +use crate::config::core_config; + +static DEFAULT_OIDC_CLIENT: OnceLock> = + OnceLock::new(); + +pub fn default_oidc_client() -> Option<&'static CoreClient> { + DEFAULT_OIDC_CLIENT + .get() + .expect("OIDC client get before init") + .as_ref() +} + +pub async fn init_default_oidc_client() { + let config = core_config(); + if !config.oidc_enabled + || config.oidc_provider.is_empty() + || config.oidc_client_id.is_empty() + || config.oidc_client_secret.is_empty() + { + DEFAULT_OIDC_CLIENT + .set(None) + .expect("Default OIDC client initialized twice"); + return; + } + async { + let provider = config.oidc_provider.to_string(); + // Use OpenID Connect Discovery to fetch the provider metadata. + let provider_metadata = CoreProviderMetadata::discover_async( + IssuerUrl::new(if provider.ends_with('/') { + provider + } else { + provider + "/" + })?, + async_http_client, + ) + .await?; + + // Create an OpenID Connect client by specifying the client ID, client secret, authorization URL + // and token URL. + let client = CoreClient::from_provider_metadata( + provider_metadata, + ClientId::new(config.oidc_client_id.to_string()), + Some(ClientSecret::new(config.oidc_client_secret.to_string())), + ) + // Set the URL the user will be redirected to after the authorization process. + .set_redirect_uri(RedirectUrl::new(format!( + "{}/auth/oidc/callback", + core_config().host + ))?); + + DEFAULT_OIDC_CLIENT + .set(Some(client)) + .expect("Default OIDC client initialized twice"); + + anyhow::Ok(()) + } + .await + .context("Failed to init default OIDC client") + .unwrap(); +} diff --git a/bin/core/src/auth/oidc/mod.rs b/bin/core/src/auth/oidc/mod.rs new file mode 100644 index 000000000..f574b9489 --- /dev/null +++ b/bin/core/src/auth/oidc/mod.rs @@ -0,0 +1,247 @@ +use std::sync::OnceLock; + +use anyhow::{anyhow, Context}; +use axum::{ + extract::Query, response::Redirect, routing::get, Router, +}; +use client::default_oidc_client; +use dashmap::DashMap; +use komodo_client::entities::{ + komodo_timestamp, + user::{User, UserConfig}, +}; +use mungos::mongodb::bson::{doc, Document}; +use openidconnect::{ + core::CoreAuthenticationFlow, AccessTokenHash, AuthorizationCode, + CsrfToken, Nonce, OAuth2TokenResponse, PkceCodeChallenge, + PkceCodeVerifier, Scope, TokenResponse, +}; +use reqwest::StatusCode; +use serde::Deserialize; +use serror::AddStatusCode; + +use crate::{ + config::core_config, + state::{db_client, jwt_client}, +}; + +use super::RedirectQuery; + +pub mod client; + +/// CSRF tokens can only be used once from the callback, +/// and must be used within this timeframe +const CSRF_VALID_FOR_MS: i64 = 120_000; // 2 minutes for user to log in. + +type RedirectUrl = Option; +type CsrfMap = + DashMap; +fn csrf_verifier_tokens() -> &'static CsrfMap { + static CSRF: OnceLock = OnceLock::new(); + CSRF.get_or_init(Default::default) +} + +pub fn router() -> Router { + Router::new() + .route( + "/login", + get(|query| async { + login(query).await.status_code(StatusCode::UNAUTHORIZED) + }), + ) + .route( + "/callback", + get(|query| async { + callback(query).await.status_code(StatusCode::UNAUTHORIZED) + }), + ) +} + +#[instrument(name = "OidcRedirect", level = "debug")] +async fn login( + Query(RedirectQuery { redirect }): Query, +) -> anyhow::Result { + let client = + default_oidc_client().context("OIDC Client not configured")?; + + // Generate a PKCE challenge. + let (pkce_challenge, pkce_verifier) = + PkceCodeChallenge::new_random_sha256(); + + // Generate the authorization URL. + let (auth_url, csrf_token, nonce) = client + .authorize_url( + CoreAuthenticationFlow::AuthorizationCode, + CsrfToken::new_random, + Nonce::new_random, + ) + .add_scope(Scope::new("openid".to_string())) + .add_scope(Scope::new("email".to_string())) + .set_pkce_challenge(pkce_challenge) + .url(); + + // Data inserted here will be matched on callback side for csrf protection. + csrf_verifier_tokens().insert( + csrf_token.secret().clone(), + ( + pkce_verifier, + nonce, + redirect, + komodo_timestamp() + CSRF_VALID_FOR_MS, + ), + ); + + let config = core_config(); + let redirect = if !config.oidc_redirect.is_empty() { + Redirect::to( + auth_url + .as_str() + .replace(&config.oidc_provider, &config.oidc_redirect) + .as_str(), + ) + } else { + Redirect::to(auth_url.as_str()) + }; + + Ok(redirect) +} + +#[derive(Debug, Deserialize)] +struct CallbackQuery { + state: Option, + code: Option, + error: Option, +} + +#[instrument(name = "OidcCallback", level = "debug")] +async fn callback( + Query(query): Query, +) -> anyhow::Result { + let client = + default_oidc_client().context("OIDC Client not configured")?; + + if let Some(e) = query.error { + return Err(anyhow!("Provider returned error: {e}")); + } + + let code = query.code.context("Provider did not return code")?; + let state = CsrfToken::new( + query.state.context("Provider did not return state")?, + ); + + let (_, (pkce_verifier, nonce, redirect, valid_until)) = + csrf_verifier_tokens() + .remove(state.secret()) + .context("CSRF Token invalid")?; + + if komodo_timestamp() > valid_until { + return Err(anyhow!("CSRF token invalid (Timed out). The token must be ")); + } + + let token_response = client + .exchange_code(AuthorizationCode::new(code)) + // Set the PKCE code verifier. + .set_pkce_verifier(pkce_verifier) + .request_async(openidconnect::reqwest::async_http_client) + .await + .context("Failed to get Oauth token")?; + + // Extract the ID token claims after verifying its authenticity and nonce. + let id_token = token_response + .id_token() + .context("OIDC Server did not return an ID token")?; + let claims = id_token + .claims(&client.id_token_verifier(), &nonce) + .context("Failed to verify token claims")?; + + // Verify the access token hash to ensure that the access token hasn't been substituted for + // another user's. + if let Some(expected_access_token_hash) = claims.access_token_hash() + { + let actual_access_token_hash = AccessTokenHash::from_token( + token_response.access_token(), + &id_token.signing_alg()?, + )?; + if actual_access_token_hash != *expected_access_token_hash { + return Err(anyhow!("Invalid access token")); + } + } + + let user_id = claims.subject().as_str(); + + let db_client = db_client(); + let user = db_client + .users + .find_one(doc! { + "config.data.provider": &core_config().oidc_provider, + "config.data.user_id": user_id + }) + .await + .context("failed at find user query from database")?; + + let jwt = match user { + Some(user) => jwt_client() + .generate(user.id) + .context("failed to generate jwt")?, + None => { + let ts = komodo_timestamp(); + let no_users_exist = + db_client.users.find_one(Document::new()).await?.is_none(); + let core_config = core_config(); + if !no_users_exist && core_config.disable_user_registration { + return Err(anyhow!("User registration is disabled")); + } + // Email will use user_id if it isn't available. + let email = claims + .email() + .map(|email| email.as_str()) + .unwrap_or(user_id); + let username = if core_config.oidc_use_full_email { + email + } else { + email + .split_once('@') + .map(|(username, _)| username) + .unwrap_or(email) + } + .to_string(); + let user = User { + id: Default::default(), + username, + enabled: no_users_exist || core_config.enable_new_users, + admin: no_users_exist, + super_admin: no_users_exist, + create_server_permissions: no_users_exist, + create_build_permissions: no_users_exist, + updated_at: ts, + last_update_view: 0, + recents: Default::default(), + all: Default::default(), + config: UserConfig::Oidc { + provider: core_config.oidc_provider.clone(), + user_id: user_id.to_string(), + }, + }; + let user_id = db_client + .users + .insert_one(user) + .await + .context("failed to create user on database")? + .inserted_id + .as_object_id() + .context("inserted_id is not ObjectId")? + .to_string(); + jwt_client() + .generate(user_id) + .context("failed to generate jwt")? + } + }; + let exchange_token = jwt_client().create_exchange_token(jwt).await; + let redirect_url = if let Some(redirect) = redirect { + let splitter = if redirect.contains('?') { '&' } else { '?' }; + format!("{}{splitter}token={exchange_token}", redirect) + } else { + format!("{}?token={exchange_token}", core_config().host) + }; + Ok(Redirect::to(&redirect_url)) +} diff --git a/bin/core/src/cloud/aws/ec2.rs b/bin/core/src/cloud/aws/ec2.rs index bcd91a88b..8c194b966 100644 --- a/bin/core/src/cloud/aws/ec2.rs +++ b/bin/core/src/cloud/aws/ec2.rs @@ -19,7 +19,7 @@ use komodo_client::entities::{ ResourceTarget, }; -use crate::{config::core_config, helpers::alert::send_alerts}; +use crate::{alert::send_alerts, config::core_config}; const POLL_RATE_SECS: u64 = 2; const MAX_POLL_TRIES: usize = 30; @@ -65,6 +65,7 @@ pub async fn launch_ec2_instance( use_public_ip, user_data, port: _, + use_https: _, } = config; let instance_type = handle_unknown_instance_type( InstanceType::from(instance_type.as_str()), diff --git a/bin/core/src/cloud/aws/ecr.rs b/bin/core/src/cloud/aws/ecr.rs deleted file mode 100644 index ec5310d47..000000000 --- a/bin/core/src/cloud/aws/ecr.rs +++ /dev/null @@ -1,82 +0,0 @@ -use anyhow::{anyhow, Context}; -use aws_config::{BehaviorVersion, Region}; -use aws_sdk_ecr::Client as EcrClient; -use run_command::async_run_command; - -#[tracing::instrument(skip(access_key_id, secret_access_key))] -async fn make_ecr_client( - region: String, - access_key_id: &str, - secret_access_key: &str, -) -> EcrClient { - std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id); - std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key); - let region = Region::new(region); - let config = aws_config::defaults(BehaviorVersion::v2024_03_28()) - .region(region) - .load() - .await; - EcrClient::new(&config) -} - -#[tracing::instrument(skip(access_key_id, secret_access_key))] -pub async fn maybe_create_repo( - repo: &str, - region: String, - access_key_id: &str, - secret_access_key: &str, -) -> anyhow::Result<()> { - let client = - make_ecr_client(region, access_key_id, secret_access_key).await; - - let existing = client - .describe_repositories() - .send() - .await - .context("failed to describe existing repositories")? - .repositories - .unwrap_or_default(); - - if existing.iter().any(|r| { - if let Some(name) = r.repository_name() { - name == repo - } else { - false - } - }) { - return Ok(()); - }; - - client - .create_repository() - .repository_name(repo) - .send() - .await - .context("failed to create repository")?; - - Ok(()) -} - -/// Gets a token docker login. -/// -/// Requires the aws cli be installed on the host -#[tracing::instrument(skip(access_key_id, secret_access_key))] -pub async fn get_ecr_token( - region: &str, - access_key_id: &str, - secret_access_key: &str, -) -> anyhow::Result { - let log = async_run_command(&format!( - "AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}" - )) - .await; - - if log.success() { - Ok(log.stdout) - } else { - Err( - anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr) - .context("failed to get aws ecr login token"), - ) - } -} diff --git a/bin/core/src/cloud/aws/mod.rs b/bin/core/src/cloud/aws/mod.rs index ca62b43c6..99846274e 100644 --- a/bin/core/src/cloud/aws/mod.rs +++ b/bin/core/src/cloud/aws/mod.rs @@ -1,2 +1 @@ pub mod ec2; -pub mod ecr; diff --git a/bin/core/src/cloud/hetzner/mod.rs b/bin/core/src/cloud/hetzner/mod.rs index 7932938ec..aed19e0b5 100644 --- a/bin/core/src/cloud/hetzner/mod.rs +++ b/bin/core/src/cloud/hetzner/mod.rs @@ -66,6 +66,7 @@ pub async fn launch_hetzner_server( labels, volumes, port: _, + use_https: _, } = config; let datacenter = hetzner_datacenter(datacenter); diff --git a/bin/core/src/config.rs b/bin/core/src/config.rs index 7db3ee0a4..cdeaf5eab 100644 --- a/bin/core/src/config.rs +++ b/bin/core/src/config.rs @@ -1,38 +1,18 @@ use std::sync::OnceLock; use anyhow::Context; -use merge_config_files::parse_config_file; +use environment_file::{ + maybe_read_item_from_file, maybe_read_list_from_file, +}; use komodo_client::entities::{ config::core::{ - AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig, - GithubWebhookAppInstallationConfig, HetznerCredentials, - MongoConfig, OauthCredentials, + AwsCredentials, CoreConfig, DatabaseConfig, Env, + GithubWebhookAppConfig, GithubWebhookAppInstallationConfig, + HetznerCredentials, OauthCredentials, }, logger::LogConfig, }; -use serde::Deserialize; - -pub fn frontend_path() -> &'static String { - #[derive(Deserialize)] - struct FrontendEnv { - #[serde(default = "default_frontend_path")] - komodo_frontend_path: String, - } - - fn default_frontend_path() -> String { - "/frontend".to_string() - } - - static FRONTEND_PATH: OnceLock = OnceLock::new(); - FRONTEND_PATH.get_or_init(|| { - let FrontendEnv { - komodo_frontend_path, - } = envy::from_env() - .context("failed to parse FrontendEnv") - .unwrap(); - komodo_frontend_path - }) -} +use merge_config_files::parse_config_file; pub fn core_config() -> &'static CoreConfig { static CORE_CONFIG: OnceLock = OnceLock::new(); @@ -50,7 +30,7 @@ pub fn core_config() -> &'static CoreConfig { .unwrap_or_else(|e| { panic!("failed at parsing config at {config_path} | {e:#}") }); - let installations = match (env.komodo_github_webhook_app_installations_ids, env.komodo_github_webhook_app_installations_namespaces) { + let installations = match (maybe_read_list_from_file(env.komodo_github_webhook_app_installations_ids_file,env.komodo_github_webhook_app_installations_ids), env.komodo_github_webhook_app_installations_namespaces) { (Some(ids), Some(namespaces)) => { if ids.len() != namespaces.len() { panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}") @@ -71,35 +51,102 @@ pub fn core_config() -> &'static CoreConfig { config.github_webhook_app.installations } }; - // recreating CoreConfig here makes sure we apply all env overrides. + + // recreating CoreConfig here makes sure apply all env overrides applied. CoreConfig { + // Secret things overridden with file + jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret), + passkey: maybe_read_item_from_file(env.komodo_passkey_file, env.komodo_passkey) + .unwrap_or(config.passkey), + webhook_secret: maybe_read_item_from_file(env.komodo_webhook_secret_file, env.komodo_webhook_secret) + .unwrap_or(config.webhook_secret), + database: DatabaseConfig { + uri: maybe_read_item_from_file(env.komodo_database_uri_file,env.komodo_database_uri).unwrap_or(config.database.uri), + address: env.komodo_database_address.unwrap_or(config.database.address), + username: maybe_read_item_from_file(env.komodo_database_username_file,env + .komodo_database_username) + .unwrap_or(config.database.username), + password: maybe_read_item_from_file(env.komodo_database_password_file,env + .komodo_database_password) + .unwrap_or(config.database.password), + app_name: env + .komodo_database_app_name + .unwrap_or(config.database.app_name), + db_name: env + .komodo_database_db_name + .unwrap_or(config.database.db_name), + }, + oidc_enabled: env.komodo_oidc_enabled.unwrap_or(config.oidc_enabled), + oidc_provider: env.komodo_oidc_provider.unwrap_or(config.oidc_provider), + oidc_redirect: env.komodo_oidc_redirect.unwrap_or(config.oidc_redirect), + oidc_client_id: maybe_read_item_from_file(env.komodo_oidc_client_id_file,env + .komodo_oidc_client_id) + .unwrap_or(config.oidc_client_id), + oidc_client_secret: maybe_read_item_from_file(env.komodo_oidc_client_secret_file,env + .komodo_oidc_client_secret) + .unwrap_or(config.oidc_client_secret), + oidc_use_full_email: env.komodo_oidc_use_full_email + .unwrap_or(config.oidc_use_full_email), + google_oauth: OauthCredentials { + enabled: env + .komodo_google_oauth_enabled + .unwrap_or(config.google_oauth.enabled), + id: maybe_read_item_from_file(env.komodo_google_oauth_id_file,env + .komodo_google_oauth_id) + .unwrap_or(config.google_oauth.id), + secret: maybe_read_item_from_file(env.komodo_google_oauth_secret_file,env + .komodo_google_oauth_secret) + .unwrap_or(config.google_oauth.secret), + }, + github_oauth: OauthCredentials { + enabled: env + .komodo_github_oauth_enabled + .unwrap_or(config.github_oauth.enabled), + id: maybe_read_item_from_file(env.komodo_github_oauth_id_file,env + .komodo_github_oauth_id) + .unwrap_or(config.github_oauth.id), + secret: maybe_read_item_from_file(env.komodo_github_oauth_secret_file,env + .komodo_github_oauth_secret) + .unwrap_or(config.github_oauth.secret), + }, + aws: AwsCredentials { + access_key_id: maybe_read_item_from_file(env.komodo_aws_access_key_id_file, env + .komodo_aws_access_key_id) + .unwrap_or(config.aws.access_key_id), + secret_access_key: maybe_read_item_from_file(env.komodo_aws_secret_access_key_file, env + .komodo_aws_secret_access_key) + .unwrap_or(config.aws.secret_access_key), + }, + hetzner: HetznerCredentials { + token: maybe_read_item_from_file(env.komodo_hetzner_token_file, env + .komodo_hetzner_token) + .unwrap_or(config.hetzner.token), + }, + github_webhook_app: GithubWebhookAppConfig { + app_id: maybe_read_item_from_file(env.komodo_github_webhook_app_app_id_file, env + .komodo_github_webhook_app_app_id) + .unwrap_or(config.github_webhook_app.app_id), + pk_path: env + .komodo_github_webhook_app_pk_path + .unwrap_or(config.github_webhook_app.pk_path), + installations, + }, + + // Non secrets title: env.komodo_title.unwrap_or(config.title), host: env.komodo_host.unwrap_or(config.host), port: env.komodo_port.unwrap_or(config.port), - passkey: env.komodo_passkey.unwrap_or(config.passkey), - ensure_server: env.komodo_ensure_server.unwrap_or(config.ensure_server), - jwt_secret: env.komodo_jwt_secret.unwrap_or(config.jwt_secret), + first_server: env.komodo_first_server.unwrap_or(config.first_server), + frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path), jwt_ttl: env .komodo_jwt_ttl .unwrap_or(config.jwt_ttl), repo_directory: env .komodo_repo_directory - .map(|dir| - dir.parse() - .context("failed to parse env komodo_REPO_DIRECTORY as valid path").unwrap()) .unwrap_or(config.repo_directory), - stack_poll_interval: env - .komodo_stack_poll_interval - .unwrap_or(config.stack_poll_interval), - sync_poll_interval: env - .komodo_sync_poll_interval - .unwrap_or(config.sync_poll_interval), - build_poll_interval: env - .komodo_build_poll_interval - .unwrap_or(config.build_poll_interval), - repo_poll_interval: env - .komodo_repo_poll_interval - .unwrap_or(config.repo_poll_interval), + resource_poll_interval: env + .komodo_resource_poll_interval + .unwrap_or(config.resource_poll_interval), monitoring_interval: env .komodo_monitoring_interval .unwrap_or(config.monitoring_interval), @@ -109,83 +156,25 @@ pub fn core_config() -> &'static CoreConfig { keep_alerts_for_days: env .komodo_keep_alerts_for_days .unwrap_or(config.keep_alerts_for_days), - webhook_secret: env - .komodo_webhook_secret - .unwrap_or(config.webhook_secret), webhook_base_url: env .komodo_webhook_base_url - .or(config.webhook_base_url), + .unwrap_or(config.webhook_base_url), transparent_mode: env .komodo_transparent_mode .unwrap_or(config.transparent_mode), ui_write_disabled: env .komodo_ui_write_disabled .unwrap_or(config.ui_write_disabled), + disable_confirm_dialog: env.komodo_disable_confirm_dialog + .unwrap_or(config.disable_confirm_dialog), enable_new_users: env.komodo_enable_new_users .unwrap_or(config.enable_new_users), disable_user_registration: env.komodo_disable_user_registration .unwrap_or(config.disable_user_registration), - local_auth: env.komodo_local_auth.unwrap_or(config.local_auth), - google_oauth: OauthCredentials { - enabled: env - .komodo_google_oauth_enabled - .unwrap_or(config.google_oauth.enabled), - id: env - .komodo_google_oauth_id - .unwrap_or(config.google_oauth.id), - secret: env - .komodo_google_oauth_secret - .unwrap_or(config.google_oauth.secret), - }, - github_oauth: OauthCredentials { - enabled: env - .komodo_github_oauth_enabled - .unwrap_or(config.github_oauth.enabled), - id: env - .komodo_github_oauth_id - .unwrap_or(config.github_oauth.id), - secret: env - .komodo_github_oauth_secret - .unwrap_or(config.github_oauth.secret), - }, - github_webhook_app: GithubWebhookAppConfig { - app_id: env - .komodo_github_webhook_app_app_id - .unwrap_or(config.github_webhook_app.app_id), - pk_path: env - .komodo_github_webhook_app_pk_path - .unwrap_or(config.github_webhook_app.pk_path), - installations, - }, - aws: AwsCredentials { - access_key_id: env - .komodo_aws_access_key_id - .unwrap_or(config.aws.access_key_id), - secret_access_key: env - .komodo_aws_secret_access_key - .unwrap_or(config.aws.secret_access_key), - }, - hetzner: HetznerCredentials { - token: env - .komodo_hetzner_token - .unwrap_or(config.hetzner.token), - }, - mongo: MongoConfig { - uri: env.komodo_mongo_uri.or(config.mongo.uri), - address: env.komodo_mongo_address.or(config.mongo.address), - username: env - .komodo_mongo_username - .or(config.mongo.username), - password: env - .komodo_mongo_password - .or(config.mongo.password), - app_name: env - .komodo_mongo_app_name - .unwrap_or(config.mongo.app_name), - db_name: env - .komodo_mongo_db_name - .unwrap_or(config.mongo.db_name), - }, + disable_non_admin_create: env.komodo_disable_non_admin_create + .unwrap_or(config.disable_non_admin_create), + local_auth: env.komodo_local_auth + .unwrap_or(config.local_auth), logging: LogConfig { level: env .komodo_logging_level @@ -195,17 +184,19 @@ pub fn core_config() -> &'static CoreConfig { .unwrap_or(config.logging.stdio), otlp_endpoint: env .komodo_logging_otlp_endpoint - .or(config.logging.otlp_endpoint), + .unwrap_or(config.logging.otlp_endpoint), opentelemetry_service_name: env .komodo_logging_opentelemetry_service_name .unwrap_or(config.logging.opentelemetry_service_name), }, + ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled), + ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file), + ssl_cert_file: env.komodo_ssl_cert_file.unwrap_or(config.ssl_cert_file), // These can't be overridden on env secrets: config.secrets, git_providers: config.git_providers, docker_registries: config.docker_registries, - aws_ecr_registries: config.aws_ecr_registries, } }) } diff --git a/bin/core/src/db.rs b/bin/core/src/db.rs index 34168f4dc..43ff70f3b 100644 --- a/bin/core/src/db.rs +++ b/bin/core/src/db.rs @@ -1,11 +1,10 @@ -use mongo_indexed::{create_index, create_unique_index}; use komodo_client::entities::{ alert::Alert, alerter::Alerter, api_key::ApiKey, build::Build, builder::Builder, - config::core::MongoConfig, + config::core::DatabaseConfig, deployment::Deployment, permission::Permission, procedure::Procedure, @@ -22,11 +21,13 @@ use komodo_client::entities::{ user_group::UserGroup, variable::Variable, }; +use mongo_indexed::{create_index, create_unique_index}; use mungos::{ init::MongoBuilder, mongodb::{Collection, Database}, }; +#[derive(Debug)] pub struct DbClient { pub users: Collection, pub user_groups: Collection, @@ -56,28 +57,33 @@ pub struct DbClient { impl DbClient { pub async fn new( - MongoConfig { + DatabaseConfig { uri, address, username, password, app_name, db_name, - }: &MongoConfig, + }: &DatabaseConfig, ) -> anyhow::Result { let mut client = MongoBuilder::default().app_name(app_name); - match (uri, address, username, password) { - (Some(uri), _, _, _) => { + match ( + !uri.is_empty(), + !address.is_empty(), + !username.is_empty(), + !password.is_empty(), + ) { + (true, _, _, _) => { client = client.uri(uri); } - (_, Some(address), Some(username), Some(password)) => { + (_, true, true, true) => { client = client .address(address) .username(username) .password(password); } - (_, Some(address), _, _) => { + (_, true, _, _) => { client = client.address(address); } _ => { diff --git a/bin/core/src/helpers/build.rs b/bin/core/src/helpers/build.rs deleted file mode 100644 index cae7a8080..000000000 --- a/bin/core/src/helpers/build.rs +++ /dev/null @@ -1,49 +0,0 @@ -use async_timing_util::{wait_until_timelength, Timelength}; -use komodo_client::{ - api::write::RefreshBuildCache, entities::user::build_user, -}; -use mungos::find::find_collect; -use resolver_api::Resolve; - -use crate::{ - config::core_config, - state::{db_client, State}, -}; - -pub fn spawn_build_refresh_loop() { - let interval: Timelength = core_config() - .build_poll_interval - .try_into() - .expect("Invalid build poll interval"); - tokio::spawn(async move { - refresh_builds().await; - loop { - wait_until_timelength(interval, 2000).await; - refresh_builds().await; - } - }); -} - -async fn refresh_builds() { - let Ok(builds) = - find_collect(&db_client().await.builds, None, None) - .await - .inspect_err(|e| { - warn!("failed to get builds from db in refresh task | {e:#}") - }) - else { - return; - }; - for build in builds { - State - .resolve( - RefreshBuildCache { build: build.id }, - build_user().clone(), - ) - .await - .inspect_err(|e| { - warn!("failed to refresh build cache in refresh task | build: {} | {e:#}", build.name) - }) - .ok(); - } -} diff --git a/bin/core/src/helpers/builder.rs b/bin/core/src/helpers/builder.rs index fc28c5d58..abd24fd8e 100644 --- a/bin/core/src/helpers/builder.rs +++ b/bin/core/src/helpers/builder.rs @@ -93,7 +93,9 @@ async fn get_aws_builder( update_update(update.clone()).await?; - let periphery_address = format!("http://{ip}:{}", config.port); + let protocol = if config.use_https { "https" } else { "http" }; + let periphery_address = + format!("{protocol}://{ip}:{}", config.port); let periphery = PeripheryClient::new(&periphery_address, &core_config().passkey); @@ -191,6 +193,7 @@ pub fn start_aws_builder_log( assign_public_ip, security_group_ids, use_public_ip, + use_https, .. } = config; @@ -206,6 +209,7 @@ pub fn start_aws_builder_log( format!("{}: {readable_sec_group_ids}", muted("security groups")), format!("{}: {assign_public_ip}", muted("assign public ip")), format!("{}: {use_public_ip}", muted("use public ip")), + format!("{}: {use_https}", muted("use https")), ] .join("\n") } diff --git a/bin/core/src/helpers/interpolate.rs b/bin/core/src/helpers/interpolate.rs index beed62cf6..7154ad90e 100644 --- a/bin/core/src/helpers/interpolate.rs +++ b/bin/core/src/helpers/interpolate.rs @@ -1,60 +1,10 @@ use std::collections::HashSet; use anyhow::Context; -use komodo_client::entities::{ - update::Update, EnvironmentVar, SystemCommand, -}; +use komodo_client::entities::{update::Update, SystemCommand}; use super::query::VariablesAndSecrets; -pub fn interpolate_variables_secrets_into_environment( - VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, - environment: &mut Vec, - global_replacers: &mut HashSet<(String, String)>, - secret_replacers: &mut HashSet<(String, String)>, -) -> anyhow::Result<()> { - for env in environment { - if env.value.is_empty() { - continue; - } - - // first pass - global variables - let (res, more_replacers) = svi::interpolate_variables( - &env.value, - variables, - svi::Interpolator::DoubleBrackets, - false, - ) - .with_context(|| { - format!( - "failed to interpolate global variables into env var '{}'", - env.variable - ) - })?; - global_replacers.extend(more_replacers); - - // second pass - core secrets - let (res, more_replacers) = svi::interpolate_variables( - &res, - secrets, - svi::Interpolator::DoubleBrackets, - false, - ) - .with_context(|| { - format!( - "failed to interpolate core secrets into env var '{}'", - env.variable - ) - })?; - secret_replacers.extend(more_replacers); - - // set env value with the result - env.value = res; - } - - Ok(()) -} - pub fn interpolate_variables_secrets_into_extra_args( VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, extra_args: &mut Vec, @@ -101,28 +51,24 @@ pub fn interpolate_variables_secrets_into_extra_args( Ok(()) } -pub fn interpolate_variables_secrets_into_container_command( +pub fn interpolate_variables_secrets_into_string( VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets, - command: &mut String, + target: &mut String, global_replacers: &mut HashSet<(String, String)>, secret_replacers: &mut HashSet<(String, String)>, ) -> anyhow::Result<()> { - if command.is_empty() { + if target.is_empty() { return Ok(()); } // first pass - global variables let (res, more_replacers) = svi::interpolate_variables( - command, + target, variables, svi::Interpolator::DoubleBrackets, false, ) - .with_context(|| { - format!( - "failed to interpolate global variables into command '{command}'", - ) - })?; + .context("Failed to interpolate core variables")?; global_replacers.extend(more_replacers); // second pass - core secrets @@ -132,15 +78,11 @@ pub fn interpolate_variables_secrets_into_container_command( svi::Interpolator::DoubleBrackets, false, ) - .with_context(|| { - format!( - "failed to interpolate core secrets into command '{command}'", - ) - })?; + .context("Failed to interpolate core secrets")?; secret_replacers.extend(more_replacers); // set command with the result - *command = res; + *target = res; Ok(()) } diff --git a/bin/core/src/helpers/mod.rs b/bin/core/src/helpers/mod.rs index 8f733634e..8e4776039 100644 --- a/bin/core/src/helpers/mod.rs +++ b/bin/core/src/helpers/mod.rs @@ -2,7 +2,6 @@ use std::{str::FromStr, time::Duration}; use anyhow::{anyhow, Context}; use futures::future::join_all; -use mongo_indexed::Document; use komodo_client::{ api::write::CreateServer, entities::{ @@ -15,6 +14,7 @@ use komodo_client::{ ResourceTarget, }, }; +use mongo_indexed::Document; use mungos::{ find::find_collect, mongodb::bson::{doc, oid::ObjectId, to_document, Bson}, @@ -30,8 +30,6 @@ use crate::{ }; pub mod action_state; -pub mod alert; -pub mod build; pub mod builder; pub mod cache; pub mod channel; @@ -39,9 +37,6 @@ pub mod interpolate; pub mod procedure; pub mod prune; pub mod query; -pub mod repo; -pub mod stack; -pub mod sync; pub mod update; // pub mod resource; @@ -79,7 +74,6 @@ pub async fn git_token( mut on_https_found: impl FnMut(bool), ) -> anyhow::Result> { let db_provider = db_client() - .await .git_accounts .find_one(doc! { "domain": provider_domain, "username": account_username }) .await @@ -111,7 +105,6 @@ pub async fn registry_token( account_username: &str, ) -> anyhow::Result> { let provider = db_client() - .await .registry_accounts .find_one(doc! { "domain": provider_domain, "username": account_username }) .await @@ -165,7 +158,6 @@ pub async fn create_permission( } let target: ResourceTarget = target.into(); if let Err(e) = db_client() - .await .permissions .insert_one(Permission { id: Default::default(), @@ -215,7 +207,6 @@ async fn startup_in_progress_update_cleanup() { // This static log won't fail to serialize, unwrap ok. let log = to_document(&log).unwrap(); if let Err(e) = db_client() - .await .updates .update_many( doc! { "status": "InProgress" }, @@ -237,7 +228,7 @@ async fn startup_in_progress_update_cleanup() { /// Run on startup, ensure open alerts pointing to invalid resources are closed. async fn startup_open_alert_cleanup() { - let db = db_client().await; + let db = db_client(); let Ok(alerts) = find_collect(&db.alerts, doc! { "resolved": false }, None) .await @@ -290,30 +281,29 @@ async fn startup_open_alert_cleanup() { } /// Ensures a default server exists with the defined address -pub async fn ensure_server() { - let ensure_server = &core_config().ensure_server; - if ensure_server.is_empty() { +pub async fn ensure_first_server() { + let first_server = &core_config().first_server; + if first_server.is_empty() { return; } - let db = db_client().await; + let db = db_client(); let Ok(server) = db .servers - .find_one(doc! { "config.address": ensure_server }) + .find_one(Document::new()) .await - .inspect_err(|e| error!("Failed to initialize 'ensure_server'. Failed to query db. {e:?}")) + .inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}")) else { return; }; if server.is_some() { return; } - if let Err(e) = State .resolve( CreateServer { name: format!("server-{}", random_string(5)), config: PartialServerConfig { - address: Some(ensure_server.to_string()), + address: Some(first_server.to_string()), enabled: Some(true), ..Default::default() }, @@ -322,6 +312,6 @@ pub async fn ensure_server() { ) .await { - error!("Failed to initialize 'ensure_server'. Failed to CreateServer. {e:?}"); + error!("Failed to initialize 'first_server'. Failed to CreateServer. {e:?}"); } } diff --git a/bin/core/src/helpers/procedure.rs b/bin/core/src/helpers/procedure.rs index a59a16f62..fbdf784a7 100644 --- a/bin/core/src/helpers/procedure.rs +++ b/bin/core/src/helpers/procedure.rs @@ -34,7 +34,7 @@ pub async fn execute_procedure( add_line_to_update( update, &format!( - "{}: executing stage: '{}'", + "{}: Executing stage: '{}'", muted("INFO"), bold(&stage.name) ), @@ -55,7 +55,7 @@ pub async fn execute_procedure( .await .with_context(|| { format!( - "failed stage '{}' execution after {:?}", + "Failed stage '{}' execution after {:?}", bold(&stage.name), timer.elapsed(), ) @@ -65,7 +65,7 @@ pub async fn execute_procedure( &format!( "{}: {} stage '{}' execution in {:?}", muted("INFO"), - colored("finished", Color::Green), + colored("Finished", Color::Green), bold(&stage.name), timer.elapsed() ), @@ -76,6 +76,7 @@ pub async fn execute_procedure( Ok(()) } +#[allow(dependency_on_unit_never_type_fallback)] #[instrument(skip(update))] async fn execute_stage( executions: Vec, @@ -87,11 +88,11 @@ async fn execute_stage( let now = Instant::now(); add_line_to_update( update, - &format!("{}: executing: {execution:?}", muted("INFO")), + &format!("{}: Executing: {execution:?}", muted("INFO")), ) .await; let fail_log = format!( - "{}: failed on {execution:?}", + "{}: Failed on {execution:?}", colored("ERROR", Color::Red) ); let res = @@ -103,7 +104,7 @@ async fn execute_stage( &format!( "{}: {} execution in {:?}: {execution:?}", muted("INFO"), - colored("finished", Color::Green), + colored("Finished", Color::Green), now.elapsed() ), ) @@ -140,7 +141,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RunProcedure"), + .context("Failed at RunProcedure"), &update_id, ) .await? @@ -156,7 +157,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RunBuild"), + .context("Failed at RunBuild"), &update_id, ) .await? @@ -172,7 +173,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at CancelBuild"), + .context("Failed at CancelBuild"), &update_id, ) .await? @@ -188,7 +189,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at Deploy"), + .context("Failed at Deploy"), &update_id, ) .await? @@ -204,7 +205,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StartDeployment"), + .context("Failed at StartDeployment"), &update_id, ) .await? @@ -220,7 +221,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RestartDeployment"), + .context("Failed at RestartDeployment"), &update_id, ) .await? @@ -236,7 +237,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PauseDeployment"), + .context("Failed at PauseDeployment"), &update_id, ) .await? @@ -252,7 +253,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at UnpauseDeployment"), + .context("Failed at UnpauseDeployment"), &update_id, ) .await? @@ -268,7 +269,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StopDeployment"), + .context("Failed at StopDeployment"), &update_id, ) .await? @@ -284,7 +285,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RemoveDeployment"), + .context("Failed at RemoveDeployment"), &update_id, ) .await? @@ -300,7 +301,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at CloneRepo"), + .context("Failed at CloneRepo"), &update_id, ) .await? @@ -316,7 +317,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PullRepo"), + .context("Failed at PullRepo"), &update_id, ) .await? @@ -332,7 +333,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at BuildRepo"), + .context("Failed at BuildRepo"), &update_id, ) .await? @@ -348,7 +349,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at CancelRepoBuild"), + .context("Failed at CancelRepoBuild"), &update_id, ) .await? @@ -364,7 +365,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StartContainer"), + .context("Failed at StartContainer"), &update_id, ) .await? @@ -380,7 +381,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RestartContainer"), + .context("Failed at RestartContainer"), &update_id, ) .await? @@ -396,7 +397,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PauseContainer"), + .context("Failed at PauseContainer"), &update_id, ) .await? @@ -412,7 +413,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at UnpauseContainer"), + .context("Failed at UnpauseContainer"), &update_id, ) .await? @@ -428,7 +429,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StopContainer"), + .context("Failed at StopContainer"), &update_id, ) .await? @@ -444,7 +445,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RemoveContainer"), + .context("Failed at RemoveContainer"), &update_id, ) .await? @@ -460,7 +461,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StartAllContainers"), + .context("Failed at StartAllContainers"), &update_id, ) .await? @@ -476,7 +477,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RestartAllContainers"), + .context("Failed at RestartAllContainers"), &update_id, ) .await? @@ -492,7 +493,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PauseAllContainers"), + .context("Failed at PauseAllContainers"), &update_id, ) .await? @@ -508,7 +509,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at UnpauseAllContainers"), + .context("Failed at UnpauseAllContainers"), &update_id, ) .await? @@ -524,7 +525,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StopAllContainers"), + .context("Failed at StopAllContainers"), &update_id, ) .await? @@ -540,7 +541,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneContainers"), + .context("Failed at PruneContainers"), &update_id, ) .await? @@ -556,7 +557,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at DeleteNetwork"), + .context("Failed at DeleteNetwork"), &update_id, ) .await? @@ -572,7 +573,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneNetworks"), + .context("Failed at PruneNetworks"), &update_id, ) .await? @@ -588,7 +589,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at DeleteImage"), + .context("Failed at DeleteImage"), &update_id, ) .await? @@ -604,7 +605,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneImages"), + .context("Failed at PruneImages"), &update_id, ) .await? @@ -620,7 +621,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at DeleteVolume"), + .context("Failed at DeleteVolume"), &update_id, ) .await? @@ -636,7 +637,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneVolumes"), + .context("Failed at PruneVolumes"), &update_id, ) .await? @@ -652,7 +653,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneDockerBuilders"), + .context("Failed at PruneDockerBuilders"), &update_id, ) .await? @@ -668,7 +669,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneBuildx"), + .context("Failed at PruneBuildx"), &update_id, ) .await? @@ -684,7 +685,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PruneSystem"), + .context("Failed at PruneSystem"), &update_id, ) .await? @@ -700,7 +701,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RunSync"), + .context("Failed at RunSync"), &update_id, ) .await? @@ -716,7 +717,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at DeployStack"), + .context("Failed at DeployStack"), &update_id, ) .await? @@ -732,7 +733,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StartStack"), + .context("Failed at StartStack"), &update_id, ) .await? @@ -748,7 +749,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at RestartStack"), + .context("Failed at RestartStack"), &update_id, ) .await? @@ -764,7 +765,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at PauseStack"), + .context("Failed at PauseStack"), &update_id, ) .await? @@ -780,7 +781,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at UnpauseStack"), + .context("Failed at UnpauseStack"), &update_id, ) .await? @@ -796,7 +797,7 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at StopStack"), + .context("Failed at StopStack"), &update_id, ) .await? @@ -812,16 +813,14 @@ async fn execute_execution( State .resolve(req, (user, update)) .await - .context("failed at DestroyStack"), + .context("Failed at DestroyStack"), &update_id, ) .await? } Execution::Sleep(req) => { - tokio::time::sleep(Duration::from_millis( - req.duration_ms as u64, - )) - .await; + let duration = Duration::from_millis(req.duration_ms as u64); + tokio::time::sleep(duration).await; Update { success: true, ..Default::default() @@ -851,9 +850,9 @@ async fn handle_resolve_result( let log = Log::error("execution error", format_serror(&e.into())); let mut update = - find_one_by_id(&db_client().await.updates, update_id) + find_one_by_id(&db_client().updates, update_id) .await - .context("failed to query to db")? + .context("Failed to query to db")? .context("no update exists with given id")?; update.logs.push(log); update.finalize(); @@ -873,6 +872,6 @@ async fn add_line_to_update(update: &Mutex, line: &str) { let update = lock.clone(); drop(lock); if let Err(e) = update_update(update).await { - error!("failed to update an update during procedure | {e:#}"); + error!("Failed to update an update during procedure | {e:#}"); }; } diff --git a/bin/core/src/helpers/prune.rs b/bin/core/src/helpers/prune.rs index 669f08ab5..fda502f3b 100644 --- a/bin/core/src/helpers/prune.rs +++ b/bin/core/src/helpers/prune.rs @@ -30,7 +30,7 @@ pub fn spawn_prune_loop() { } async fn prune_images() -> anyhow::Result<()> { - let futures = find_collect(&db_client().await.servers, None, None) + let futures = find_collect(&db_client().servers, None, None) .await .context("failed to get servers from db")? .into_iter() @@ -66,13 +66,14 @@ async fn prune_stats() -> anyhow::Result<()> { - core_config().keep_stats_for_days as u128 * ONE_DAY_MS) as i64; let res = db_client() - .await .stats .delete_many(doc! { "ts": { "$lt": delete_before_ts } }) .await?; - info!("deleted {} stats from db", res.deleted_count); + if res.deleted_count > 0 { + info!("deleted {} stats from db", res.deleted_count); + } Ok(()) } @@ -84,12 +85,13 @@ async fn prune_alerts() -> anyhow::Result<()> { - core_config().keep_alerts_for_days as u128 * ONE_DAY_MS) as i64; let res = db_client() - .await .alerts .delete_many(doc! { "ts": { "$lt": delete_before_ts } }) .await?; - info!("deleted {} alerts from db", res.deleted_count); + if res.deleted_count > 0 { + info!("deleted {} alerts from db", res.deleted_count); + } Ok(()) } diff --git a/bin/core/src/helpers/query.rs b/bin/core/src/helpers/query.rs index 90f10fe1f..9913e75da 100644 --- a/bin/core/src/helpers/query.rs +++ b/bin/core/src/helpers/query.rs @@ -32,11 +32,10 @@ use mungos::{ use crate::{ config::core_config, resource::{self, get_user_permission_on_resource}, + stack::compose_container_match_regex, state::{db_client, deployment_status_cache, stack_status_cache}, }; -use super::stack::compose_container_match_regex; - // user: Id or username #[instrument(level = "debug")] pub async fn get_user(user: &str) -> anyhow::Result { @@ -44,7 +43,6 @@ pub async fn get_user(user: &str) -> anyhow::Result { return Ok(user); } db_client() - .await .users .find_one(id_or_username_filter(user)) .await @@ -184,7 +182,6 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result { Err(_) => doc! { "name": id_or_name }, }; db_client() - .await .tags .find_one(query) .await @@ -207,7 +204,7 @@ pub async fn get_tag_check_owner( pub async fn get_id_to_tags( filter: impl Into>, ) -> anyhow::Result> { - let res = find_collect(&db_client().await.tags, filter, None) + let res = find_collect(&db_client().tags, filter, None) .await .context("failed to query db for tags")? .into_iter() @@ -221,7 +218,7 @@ pub async fn get_user_user_groups( user_id: &str, ) -> anyhow::Result> { find_collect( - &db_client().await.user_groups, + &db_client().user_groups, doc! { "users": user_id }, @@ -315,7 +312,6 @@ pub fn id_or_username_filter(id_or_username: &str) -> Document { pub async fn get_variable(name: &str) -> anyhow::Result { db_client() - .await .variables .find_one(doc! { "name": &name }) .await @@ -331,7 +327,6 @@ pub async fn get_latest_update( operation: Operation, ) -> anyhow::Result> { db_client() - .await .updates .find_one(doc! { "target.type": resource_type.as_ref(), @@ -354,10 +349,9 @@ pub struct VariablesAndSecrets { pub async fn get_variables_and_secrets( ) -> anyhow::Result { - let variables = - find_collect(&db_client().await.variables, None, None) - .await - .context("failed to get all variables from db")?; + let variables = find_collect(&db_client().variables, None, None) + .await + .context("failed to get all variables from db")?; let mut secrets = core_config().secrets.clone(); // extend secrets with secret variables diff --git a/bin/core/src/helpers/repo.rs b/bin/core/src/helpers/repo.rs deleted file mode 100644 index 1382f8344..000000000 --- a/bin/core/src/helpers/repo.rs +++ /dev/null @@ -1,48 +0,0 @@ -use async_timing_util::{wait_until_timelength, Timelength}; -use komodo_client::{ - api::write::RefreshRepoCache, entities::user::repo_user, -}; -use mungos::find::find_collect; -use resolver_api::Resolve; - -use crate::{ - config::core_config, - state::{db_client, State}, -}; - -pub fn spawn_repo_refresh_loop() { - let interval: Timelength = core_config() - .repo_poll_interval - .try_into() - .expect("Invalid repo poll interval"); - tokio::spawn(async move { - refresh_repos().await; - loop { - wait_until_timelength(interval, 1000).await; - refresh_repos().await; - } - }); -} - -async fn refresh_repos() { - let Ok(repos) = find_collect(&db_client().await.repos, None, None) - .await - .inspect_err(|e| { - warn!("failed to get repos from db in refresh task | {e:#}") - }) - else { - return; - }; - for repo in repos { - State - .resolve( - RefreshRepoCache { repo: repo.id }, - repo_user().clone(), - ) - .await - .inspect_err(|e| { - warn!("failed to refresh repo cache in refresh task | repo: {} | {e:#}", repo.name) - }) - .ok(); - } -} diff --git a/bin/core/src/helpers/stack/mod.rs b/bin/core/src/helpers/stack/mod.rs deleted file mode 100644 index b37c62b53..000000000 --- a/bin/core/src/helpers/stack/mod.rs +++ /dev/null @@ -1,101 +0,0 @@ -use anyhow::{anyhow, Context}; -use async_timing_util::{wait_until_timelength, Timelength}; -use komodo_client::{ - api::write::RefreshStackCache, - entities::{ - permission::PermissionLevel, - server::{Server, ServerState}, - stack::Stack, - user::{stack_user, User}, - }, -}; -use mungos::find::find_collect; -use regex::Regex; -use resolver_api::Resolve; - -use crate::{ - config::core_config, - resource, - state::{db_client, State}, -}; - -use super::query::get_server_with_state; - -pub mod execute; -pub mod remote; -pub mod services; - -pub fn spawn_stack_refresh_loop() { - let interval: Timelength = core_config() - .stack_poll_interval - .try_into() - .expect("Invalid stack poll interval"); - tokio::spawn(async move { - refresh_stacks().await; - loop { - wait_until_timelength(interval, 3000).await; - refresh_stacks().await; - } - }); -} - -async fn refresh_stacks() { - let Ok(stacks) = - find_collect(&db_client().await.stacks, None, None) - .await - .inspect_err(|e| { - warn!("failed to get stacks from db in refresh task | {e:#}") - }) - else { - return; - }; - for stack in stacks { - State - .resolve( - RefreshStackCache { stack: stack.id }, - stack_user().clone(), - ) - .await - .inspect_err(|e| { - warn!("failed to refresh stack cache in refresh task | stack: {} | {e:#}", stack.name) - }) - .ok(); - } -} - -pub async fn get_stack_and_server( - stack: &str, - user: &User, - permission_level: PermissionLevel, - block_if_server_unreachable: bool, -) -> anyhow::Result<(Stack, Server)> { - let stack = resource::get_check_permissions::( - stack, - user, - permission_level, - ) - .await?; - - if stack.config.server_id.is_empty() { - return Err(anyhow!("Stack has no server configured")); - } - - let (server, status) = - get_server_with_state(&stack.config.server_id).await?; - if block_if_server_unreachable && status != ServerState::Ok { - return Err(anyhow!( - "cannot send action when server is unreachable or disabled" - )); - } - - Ok((stack, server)) -} - -pub fn compose_container_match_regex( - container_name: &str, -) -> anyhow::Result { - let regex = format!("^{container_name}-?[0-9]*$"); - Regex::new(®ex).with_context(|| { - format!("failed to construct valid regex from {regex}") - }) -} diff --git a/bin/core/src/helpers/sync/file.rs b/bin/core/src/helpers/sync/file.rs deleted file mode 100644 index 35b386213..000000000 --- a/bin/core/src/helpers/sync/file.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::{fs, path::Path}; - -use anyhow::{anyhow, Context}; -use formatting::{colored, muted, Color}; -use komodo_client::entities::{toml::ResourcesToml, update::Log}; -use serde::de::DeserializeOwned; - -pub fn read_resources( - path: &Path, -) -> anyhow::Result<(ResourcesToml, Log)> { - let mut res = ResourcesToml::default(); - let mut log = - format!("{}: reading resources from {path:?}", muted("INFO")); - read_resources_recursive(path, &mut res, &mut log).with_context( - || format!("failed to read resources from {path:?}"), - )?; - Ok((res, Log::simple("read remote resources", log))) -} - -fn read_resources_recursive( - path: &Path, - resources: &mut ResourcesToml, - log: &mut String, -) -> anyhow::Result<()> { - let res = - fs::metadata(path).context("failed to get path metadata")?; - if res.is_file() { - if !path - .extension() - .map(|ext| ext == "toml") - .unwrap_or_default() - { - return Ok(()); - } - let more = parse_toml_file::(path) - .context("failed to parse resource file")?; - - log.push('\n'); - log.push_str(&format!( - "{}: {} from {}", - muted("INFO"), - colored("adding resources", Color::Green), - colored(path.display(), Color::Blue) - )); - - resources.servers.extend(more.servers); - resources.deployments.extend(more.deployments); - resources.stacks.extend(more.stacks); - resources.builds.extend(more.builds); - resources.repos.extend(more.repos); - resources.procedures.extend(more.procedures); - resources.alerters.extend(more.alerters); - resources.builders.extend(more.builders); - resources.server_templates.extend(more.server_templates); - resources.resource_syncs.extend(more.resource_syncs); - resources.user_groups.extend(more.user_groups); - resources.variables.extend(more.variables); - - Ok(()) - } else if res.is_dir() { - let directory = fs::read_dir(path) - .context("failed to read directory contents")?; - for entry in directory.into_iter().flatten() { - let path = entry.path(); - read_resources_recursive(&path, resources, log).with_context( - || format!("failed to read resources from {path:?}"), - )?; - } - Ok(()) - } else { - Err(anyhow!("resources path is neither file nor directory")) - } -} - -fn parse_toml_file( - path: impl AsRef, -) -> anyhow::Result { - let contents = std::fs::read_to_string(path) - .context("failed to read file contents")?; - toml::from_str(&contents) - // the error without this comes through with multiple lines (\n) and looks bad - .map_err(|e| anyhow!("{e:#}")) - .context("failed to parse toml contents") -} diff --git a/bin/core/src/helpers/sync/mod.rs b/bin/core/src/helpers/sync/mod.rs deleted file mode 100644 index 6f32ef033..000000000 --- a/bin/core/src/helpers/sync/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -use async_timing_util::{wait_until_timelength, Timelength}; -use komodo_client::{ - api::write::RefreshResourceSyncPending, entities::user::sync_user, -}; -use mungos::find::find_collect; -use resolver_api::Resolve; - -use crate::{ - config::core_config, - state::{db_client, State}, -}; - -// pub mod deployment; -pub mod deploy; -pub mod remote; -pub mod resource; -pub mod user_groups; -pub mod variables; - -mod file; -mod resources; - -pub fn spawn_sync_refresh_loop() { - let interval: Timelength = core_config() - .sync_poll_interval - .try_into() - .expect("Invalid sync poll interval"); - tokio::spawn(async move { - refresh_syncs().await; - loop { - wait_until_timelength(interval, 0).await; - refresh_syncs().await; - } - }); -} - -async fn refresh_syncs() { - let Ok(syncs) = - find_collect(&db_client().await.resource_syncs, None, None) - .await - .inspect_err(|e| { - warn!( - "failed to get resource syncs from db in refresh task | {e:#}" - ) - }) - else { - return; - }; - for sync in syncs { - if sync.config.repo.is_empty() { - continue; - } - State - .resolve( - RefreshResourceSyncPending { sync: sync.id }, - sync_user().clone(), - ) - .await - .inspect_err(|e| { - warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name) - }) - .ok(); - } -} diff --git a/bin/core/src/helpers/sync/remote.rs b/bin/core/src/helpers/sync/remote.rs deleted file mode 100644 index 890aeb393..000000000 --- a/bin/core/src/helpers/sync/remote.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::fs; - -use anyhow::{anyhow, Context}; -use komodo_client::entities::{ - sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs, -}; - -use crate::{ - config::core_config, - helpers::{git_token, random_string}, - state::resource_sync_lock_cache, -}; - -pub async fn get_remote_resources( - sync: &ResourceSync, -) -> anyhow::Result<( - anyhow::Result, - Vec, - // commit short hash - String, - // commit message - String, -)> { - let mut clone_args: CloneArgs = sync.into(); - - let config = core_config(); - - let access_token = match (&clone_args.account, &clone_args.provider) - { - (None, _) => None, - (Some(_), None) => { - return Err(anyhow!( - "Account is configured, but provider is empty" - )) - } - (Some(username), Some(provider)) => { - git_token(provider, username, |https| clone_args.https = https) - .await - .with_context( - || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), - )? - } - }; - - fs::create_dir_all(&config.repo_directory) - .context("failed to create sync directory")?; - - // lock simultaneous access to same directory - let lock = resource_sync_lock_cache() - .get_or_insert_default(&sync.id) - .await; - let _lock = lock.lock().await; - - let repo_path = config.repo_directory.join(random_string(10)); - // This overrides any other method of determining clone path. - clone_args.destination = Some(repo_path.display().to_string()); - - // Don't want to run these on core. - clone_args.on_clone = None; - clone_args.on_pull = None; - - let (mut logs, hash, message, _) = git::clone( - clone_args, - &config.repo_directory, - access_token, - &[], - "", - None, - &[], - ) - .await - .context("failed to clone resource repo")?; - - let hash = hash.context("failed to get commit hash")?; - let message = - message.context("failed to get commit hash message")?; - - let resource_path = repo_path.join(&sync.config.resource_path); - - let res = super::file::read_resources(&resource_path).map( - |(resources, log)| { - logs.push(log); - resources - }, - ); - - if repo_path.exists() { - if let Err(e) = std::fs::remove_dir_all(&repo_path) { - warn!("failed to remove sync repo directory | {e:?}") - } - } - - Ok((res, logs, hash, message)) -} diff --git a/bin/core/src/helpers/update.rs b/bin/core/src/helpers/update.rs index a7965abd6..4b91bfa15 100644 --- a/bin/core/src/helpers/update.rs +++ b/bin/core/src/helpers/update.rs @@ -44,7 +44,6 @@ pub async fn add_update( mut update: Update, ) -> anyhow::Result { update.id = db_client() - .await .updates .insert_one(&update) .await @@ -64,7 +63,6 @@ pub async fn add_update_without_send( update: &Update, ) -> anyhow::Result { let id = db_client() - .await .updates .insert_one(update) .await @@ -78,7 +76,7 @@ pub async fn add_update_without_send( #[instrument(level = "debug")] pub async fn update_update(update: Update) -> anyhow::Result<()> { - update_one_by_id(&db_client().await.updates, &update.id, mungos::update::Update::Set(to_document(&update)?), None) + update_one_by_id(&db_client().updates, &update.id, mungos::update::Update::Set(to_document(&update)?), None) .await .context("failed to update the update on db. the update build process was deleted")?; let update = update_list_item(update).await?; @@ -93,7 +91,7 @@ async fn update_list_item( let username = if User::is_service_user(&update.operator) { update.operator.clone() } else { - find_one_by_id(&db_client().await.users, &update.operator) + find_one_by_id(&db_client().users, &update.operator) .await .context("failed to query mongo for user")? .with_context(|| { diff --git a/bin/core/src/main.rs b/bin/core/src/main.rs index ed3c11f9a..ea65c1504 100644 --- a/bin/core/src/main.rs +++ b/bin/core/src/main.rs @@ -5,13 +5,15 @@ use std::{net::SocketAddr, str::FromStr}; use anyhow::Context; use axum::Router; +use axum_server::tls_openssl::OpenSSLConfig; use tower_http::{ cors::{Any, CorsLayer}, services::{ServeDir, ServeFile}, }; -use crate::config::{core_config, frontend_path}; +use crate::config::core_config; +mod alert; mod api; mod auth; mod cloud; @@ -21,37 +23,45 @@ mod helpers; mod listener; mod monitor; mod resource; +mod stack; mod state; +mod sync; mod ws; async fn app() -> anyhow::Result<()> { dotenvy::dotenv().ok(); let config = core_config(); logger::init(&config.logging)?; - info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION")); - info!("config: {:?}", config.sanitized()); - // includes init db_client check to crash on db init failure - helpers::startup_cleanup().await; - // Maybe initialize default server in All In One deployment. - helpers::ensure_server().await; + info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION")); + info!("{:?}", config.sanitized()); + + tokio::join!( + // Init db_client check to crash on db init failure + state::init_db_client(), + // Init default OIDC client (defined in config / env vars / compose secret file) + auth::oidc::client::init_default_oidc_client() + ); + tokio::join!( + // Maybe initialize first server + helpers::ensure_first_server(), + // Cleanup open updates / invalid alerts + helpers::startup_cleanup(), + ); // init jwt client to crash on failure state::jwt_client(); // Spawn tasks monitor::spawn_monitor_loop(); - helpers::prune::spawn_prune_loop(); - helpers::stack::spawn_stack_refresh_loop(); - helpers::sync::spawn_sync_refresh_loop(); - helpers::build::spawn_build_refresh_loop(); - helpers::repo::spawn_repo_refresh_loop(); + resource::spawn_resource_refresh_loop(); resource::spawn_build_state_refresh_loop(); resource::spawn_repo_state_refresh_loop(); resource::spawn_procedure_state_refresh_loop(); resource::spawn_resource_sync_state_refresh_loop(); + helpers::prune::spawn_prune_loop(); // Setup static frontend services - let frontend_path = frontend_path(); + let frontend_path = &config.frontend_path; let frontend_index = ServeFile::new(format!("{frontend_path}/index.html")); let serve_dir = ServeDir::new(frontend_path) @@ -67,19 +77,29 @@ async fn app() -> anyhow::Result<()> { .nest("/ws", ws::router()) .nest_service("/", serve_dir) .fallback_service(frontend_index) - .layer(cors()?); + .layer(cors()?) + .into_make_service(); let socket_addr = SocketAddr::from_str(&format!("0.0.0.0:{}", core_config().port)) .context("failed to parse socket addr")?; - let listener = tokio::net::TcpListener::bind(&socket_addr) - .await - .context("failed to bind to tcp listener")?; - - info!("Komodo Core listening on {socket_addr}"); - - axum::serve(listener, app).await.context("server crashed")?; + if config.ssl_enabled { + info!("🔒 Core SSL Enabled"); + info!("Komodo Core starting on https://{socket_addr}"); + let ssl_config = OpenSSLConfig::from_pem_file( + &config.ssl_cert_file, + &config.ssl_key_file, + ) + .context("Failed to parse ssl ")?; + axum_server::bind_openssl(socket_addr, ssl_config) + .serve(app) + .await? + } else { + info!("🔓 Core SSL Disabled"); + info!("Komodo Core starting on http://{socket_addr}"); + axum_server::bind(socket_addr).serve(app).await? + } Ok(()) } diff --git a/bin/core/src/monitor/alert/deployment.rs b/bin/core/src/monitor/alert/deployment.rs index 640a10fac..bf296a01d 100644 --- a/bin/core/src/monitor/alert/deployment.rs +++ b/bin/core/src/monitor/alert/deployment.rs @@ -7,8 +7,8 @@ use komodo_client::entities::{ }; use crate::{ - helpers::alert::send_alerts, monitor::deployment_status_cache, - resource, state::db_client, + alert::send_alerts, monitor::deployment_status_cache, resource, + state::db_client, }; #[instrument(level = "debug")] @@ -73,7 +73,7 @@ pub async fn alert_deployments( return; } send_alerts(&alerts).await; - let res = db_client().await.alerts.insert_many(alerts).await; + let res = db_client().alerts.insert_many(alerts).await; if let Err(e) = res { error!("failed to record deployment status alerts to db | {e:#}"); } diff --git a/bin/core/src/monitor/alert/server.rs b/bin/core/src/monitor/alert/server.rs index e5c193fc3..aa0210a17 100644 --- a/bin/core/src/monitor/alert/server.rs +++ b/bin/core/src/monitor/alert/server.rs @@ -2,13 +2,13 @@ use std::{collections::HashMap, path::PathBuf, str::FromStr}; use anyhow::Context; use derive_variants::ExtractVariant; -use mongo_indexed::Indexed; use komodo_client::entities::{ alert::{Alert, AlertData, AlertDataVariant, SeverityLevel}, komodo_timestamp, optional_string, server::{ServerListItem, ServerState}, ResourceTarget, }; +use mongo_indexed::Indexed; use mungos::{ bulk_update::{self, BulkUpdate}, find::find_collect, @@ -16,7 +16,7 @@ use mungos::{ }; use crate::{ - helpers::alert::send_alerts, + alert::send_alerts, state::{db_client, server_status_cache}, }; @@ -366,7 +366,7 @@ async fn open_alerts(alerts: &[(Alert, SendAlerts)]) { return; } - let db = db_client().await; + let db = db_client(); let open = || async { let ids = db @@ -431,7 +431,7 @@ async fn update_alerts(alerts: &[(Alert, SendAlerts)]) { }).collect::>(); bulk_update::bulk_update( - &db_client().await.db, + &db_client().db, Alert::default_collection_name(), &updates, false, @@ -472,7 +472,6 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) { .collect::>>()?; db_client() - .await .alerts .update_many( doc! { "_id": { "$in": &alert_ids } }, @@ -518,7 +517,7 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) { async fn get_open_alerts( ) -> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> { let alerts = find_collect( - &db_client().await.alerts, + &db_client().alerts, doc! { "resolved": false }, None, ) diff --git a/bin/core/src/monitor/alert/stack.rs b/bin/core/src/monitor/alert/stack.rs index 845f53f97..6fa488e4f 100644 --- a/bin/core/src/monitor/alert/stack.rs +++ b/bin/core/src/monitor/alert/stack.rs @@ -7,7 +7,7 @@ use komodo_client::entities::{ }; use crate::{ - helpers::alert::send_alerts, + alert::send_alerts, resource, state::{db_client, stack_status_cache}, }; @@ -72,7 +72,7 @@ pub async fn alert_stacks( return; } send_alerts(&alerts).await; - let res = db_client().await.alerts.insert_many(alerts).await; + let res = db_client().alerts.insert_many(alerts).await; if let Err(e) = res { error!("failed to record stack status alerts to db | {e:#}"); } diff --git a/bin/core/src/monitor/helpers.rs b/bin/core/src/monitor/helpers.rs index cc94d4a81..fcd0e0cc1 100644 --- a/bin/core/src/monitor/helpers.rs +++ b/bin/core/src/monitor/helpers.rs @@ -1,8 +1,14 @@ use komodo_client::entities::{ - alert::SeverityLevel, deployment::{Deployment, DeploymentState}, docker::{ + alert::SeverityLevel, + deployment::{Deployment, DeploymentState}, + docker::{ container::ContainerListItem, image::ImageListItem, network::NetworkListItem, volume::VolumeListItem, - }, repo::Repo, server::{Server, ServerConfig, ServerHealth, ServerState}, stack::{ComposeProject, Stack, StackState}, stats::{SingleDiskUsage, SystemStats} + }, + repo::Repo, + server::{Server, ServerConfig, ServerHealth, ServerState}, + stack::{ComposeProject, Stack, StackState}, + stats::{SingleDiskUsage, SystemStats}, }; use serror::Serror; diff --git a/bin/core/src/monitor/mod.rs b/bin/core/src/monitor/mod.rs index 75ad5393f..925b7e7df 100644 --- a/bin/core/src/monitor/mod.rs +++ b/bin/core/src/monitor/mod.rs @@ -98,21 +98,16 @@ pub fn spawn_monitor_loop() { } async fn refresh_server_cache(ts: i64) { - let servers = match find_collect( - &db_client().await.servers, - None, - None, - ) - .await - { - Ok(servers) => servers, - Err(e) => { - error!( - "failed to get server list (manage status cache) | {e:#}" - ); - return; - } - }; + let servers = + match find_collect(&db_client().servers, None, None).await { + Ok(servers) => servers, + Err(e) => { + error!( + "failed to get server list (manage status cache) | {e:#}" + ); + return; + } + }; let futures = servers.into_iter().map(|server| async move { update_cache_for_server(&server).await; }); @@ -124,17 +119,17 @@ async fn refresh_server_cache(ts: i64) { pub async fn update_cache_for_server(server: &Server) { let (deployments, repos, stacks) = tokio::join!( find_collect( - &db_client().await.deployments, + &db_client().deployments, doc! { "config.server_id": &server.id }, None, ), find_collect( - &db_client().await.repos, + &db_client().repos, doc! { "config.server_id": &server.id }, None, ), find_collect( - &db_client().await.stacks, + &db_client().stacks, doc! { "config.server_id": &server.id }, None, ) diff --git a/bin/core/src/monitor/record.rs b/bin/core/src/monitor/record.rs index 942be11d6..d8b7a5eda 100644 --- a/bin/core/src/monitor/record.rs +++ b/bin/core/src/monitor/record.rs @@ -30,7 +30,7 @@ pub async fn record_server_stats(ts: i64) { }) .collect::>(); if !records.is_empty() { - let res = db_client().await.stats.insert_many(records).await; + let res = db_client().stats.insert_many(records).await; if let Err(e) = res { error!("failed to record server stats | {e:#}"); } diff --git a/bin/core/src/monitor/resources.rs b/bin/core/src/monitor/resources.rs index d33cf7a93..c2e40ba9b 100644 --- a/bin/core/src/monitor/resources.rs +++ b/bin/core/src/monitor/resources.rs @@ -6,12 +6,10 @@ use komodo_client::entities::{ }; use crate::{ - helpers::{ - query::get_stack_state_from_containers, - stack::{ - compose_container_match_regex, - services::extract_services_from_stack, - }, + helpers::query::get_stack_state_from_containers, + stack::{ + compose_container_match_regex, + services::extract_services_from_stack, }, state::{deployment_status_cache, stack_status_cache}, }; diff --git a/bin/core/src/resource/alerter.rs b/bin/core/src/resource/alerter.rs index cd507a6e5..ca0f42475 100644 --- a/bin/core/src/resource/alerter.rs +++ b/bin/core/src/resource/alerter.rs @@ -27,7 +27,7 @@ impl super::KomodoResource for Alerter { async fn coll( ) -> &'static Collection> { - &db_client().await.alerters + &db_client().alerters } async fn to_list_item( diff --git a/bin/core/src/resource/build.rs b/bin/core/src/resource/build.rs index b4073db2e..6abdc0808 100644 --- a/bin/core/src/resource/build.rs +++ b/bin/core/src/resource/build.rs @@ -8,6 +8,7 @@ use komodo_client::entities::{ PartialBuildConfig, }, builder::Builder, + environment_vars_from_str, permission::PermissionLevel, resource::Resource, update::Update, @@ -20,6 +21,7 @@ use mungos::{ }; use crate::{ + config::core_config, helpers::{empty_or_only_spaces, query::get_latest_update}, state::{action_states, build_state_cache, db_client}, }; @@ -38,7 +40,7 @@ impl super::KomodoResource for Build { async fn coll( ) -> &'static Collection> { - &db_client().await.builds + &db_client().builds } async fn to_list_item( @@ -80,7 +82,9 @@ impl super::KomodoResource for Build { } fn user_can_create(user: &User) -> bool { - user.admin || user.create_build_permissions + user.admin + || (!core_config().disable_non_admin_create + && user.create_build_permissions) } async fn validate_create_config( @@ -152,7 +156,7 @@ pub fn spawn_build_state_refresh_loop() { pub async fn refresh_build_state_cache() { let _ = async { - let builds = find_collect(&db_client().await.builds, None, None) + let builds = find_collect(&db_client().builds, None, None) .await .context("failed to get builds from db")?; let cache = build_state_cache(); @@ -181,11 +185,13 @@ async fn validate_config( config.builder_id = Some(builder.id) } } - if let Some(build_args) = &mut config.build_args { - build_args.retain(|v| { - !empty_or_only_spaces(&v.variable) - && !empty_or_only_spaces(&v.value) - }) + if let Some(build_args) = &config.build_args { + environment_vars_from_str(build_args) + .context("Invalid build_args")?; + } + if let Some(secret_args) = &config.secret_args { + environment_vars_from_str(secret_args) + .context("Invalid secret_args")?; } if let Some(extra_args) = &mut config.extra_args { extra_args.retain(|v| !empty_or_only_spaces(v)) @@ -258,7 +264,7 @@ async fn latest_2_build_updates( id: &str, ) -> anyhow::Result<[Option; 2]> { let mut builds = find_collect( - &db_client().await.updates, + &db_client().updates, doc! { "target.type": "Build", "target.id": id, diff --git a/bin/core/src/resource/builder.rs b/bin/core/src/resource/builder.rs index e5c98d4e1..cd608fab6 100644 --- a/bin/core/src/resource/builder.rs +++ b/bin/core/src/resource/builder.rs @@ -33,7 +33,7 @@ impl super::KomodoResource for Builder { async fn coll( ) -> &'static Collection> { - &db_client().await.builders + &db_client().builders } async fn to_list_item( @@ -130,7 +130,6 @@ impl super::KomodoResource for Builder { ) -> anyhow::Result<()> { // remove the builder from any attached builds db_client() - .await .builds .update_many( doc! { "config.builder.params.builder_id": &resource.id }, diff --git a/bin/core/src/resource/deployment.rs b/bin/core/src/resource/deployment.rs index b9bb4ddf6..2397ebbfc 100644 --- a/bin/core/src/resource/deployment.rs +++ b/bin/core/src/resource/deployment.rs @@ -3,11 +3,12 @@ use formatting::format_serror; use komodo_client::entities::{ build::Build, deployment::{ - Deployment, DeploymentConfig, DeploymentConfigDiff, - DeploymentImage, DeploymentListItem, DeploymentListItemInfo, - DeploymentQuerySpecifics, DeploymentState, - PartialDeploymentConfig, + conversions_from_str, Deployment, DeploymentConfig, + DeploymentConfigDiff, DeploymentImage, DeploymentListItem, + DeploymentListItemInfo, DeploymentQuerySpecifics, + DeploymentState, PartialDeploymentConfig, }, + environment_vars_from_str, permission::PermissionLevel, resource::Resource, server::Server, @@ -19,6 +20,7 @@ use mungos::mongodb::Collection; use periphery_client::api::container::RemoveContainer; use crate::{ + config::core_config, helpers::{ empty_or_only_spaces, periphery_client, query::get_deployment_state, @@ -44,7 +46,7 @@ impl super::KomodoResource for Deployment { async fn coll( ) -> &'static Collection> { - &db_client().await.deployments + &db_client().deployments } async fn to_list_item( @@ -86,12 +88,12 @@ impl super::KomodoResource for Deployment { }), image: status .as_ref() - .map(|s| { - s.curr - .container - .as_ref() - .and_then(|c| c.image.clone()) - .unwrap_or_else(|| String::from("Unknown")) + .and_then(|s| { + s.curr.container.as_ref().map(|c| { + c.image + .clone() + .unwrap_or_else(|| String::from("Unknown")) + }) }) .unwrap_or(build_image), server_id: deployment.config.server_id, @@ -115,8 +117,8 @@ impl super::KomodoResource for Deployment { Operation::CreateDeployment } - fn user_can_create(_user: &User) -> bool { - true + fn user_can_create(user: &User) -> bool { + user.admin || !core_config().disable_non_admin_create } async fn validate_create_config( @@ -279,23 +281,15 @@ async fn validate_config( }); } } - if let Some(volumes) = &mut config.volumes { - volumes.retain(|v| { - !empty_or_only_spaces(&v.local) - && !empty_or_only_spaces(&v.container) - }) + if let Some(volumes) = &config.volumes { + conversions_from_str(volumes).context("Invalid volumes")?; } - if let Some(ports) = &mut config.ports { - ports.retain(|v| { - !empty_or_only_spaces(&v.local) - && !empty_or_only_spaces(&v.container) - }) + if let Some(ports) = &config.ports { + conversions_from_str(ports).context("Invalid ports")?; } - if let Some(environment) = &mut config.environment { - environment.retain(|v| { - !empty_or_only_spaces(&v.variable) - && !empty_or_only_spaces(&v.value) - }) + if let Some(environment) = &config.environment { + environment_vars_from_str(environment) + .context("Invalid environment")?; } if let Some(extra_args) = &mut config.extra_args { extra_args.retain(|v| !empty_or_only_spaces(v)) diff --git a/bin/core/src/resource/mod.rs b/bin/core/src/resource/mod.rs index da32624b7..5eeca2b5d 100644 --- a/bin/core/src/resource/mod.rs +++ b/bin/core/src/resource/mod.rs @@ -12,6 +12,7 @@ use komodo_client::{ komodo_timestamp, permission::PermissionLevel, resource::{AddFilters, Resource, ResourceQuery}, + tag::Tag, to_komodo_name, update::Update, user::User, @@ -49,6 +50,7 @@ mod build; mod builder; mod deployment; mod procedure; +mod refresh; mod repo; mod server; mod server_template; @@ -61,6 +63,7 @@ pub use build::{ pub use procedure::{ refresh_procedure_state_cache, spawn_procedure_state_refresh_loop, }; +pub use refresh::spawn_resource_refresh_loop; pub use repo::{ refresh_repo_state_cache, spawn_repo_state_refresh_loop, }; @@ -82,7 +85,8 @@ pub trait KomodoResource { + From + PartialDiff + 'static; - type PartialConfig: Default + type PartialConfig: Clone + + Default + From + Serialize + MaybeNone; @@ -90,7 +94,8 @@ pub trait KomodoResource { + Serialize + Diff + MaybeNone; - type Info: Send + type Info: Clone + + Send + Sync + Unpin + Default @@ -274,7 +279,7 @@ pub async fn get_resource_ids_for_user( ))), // And any ids using the permissions table find_collect( - &db_client().await.permissions, + &db_client().permissions, doc! { "$or": user_target_query(&user.id, &groups)?, "resource_target.type": resource_type.as_ref(), @@ -351,7 +356,7 @@ pub async fn get_user_permission_on_resource( // Overlay any specific permissions let permission = find_collect( - &db_client().await.permissions, + &db_client().permissions, doc! { "$or": user_target_query(&user.id, &groups)?, "resource_target.type": resource_type.as_ref(), @@ -435,6 +440,8 @@ pub type IdResourceMap = HashMap< #[instrument(level = "debug")] pub async fn get_id_to_resource_map( + id_to_tags: &HashMap, + match_tags: &[String], ) -> anyhow::Result> { let res = find_collect(T::coll().await, None, None) .await @@ -442,6 +449,34 @@ pub async fn get_id_to_resource_map( format!("failed to pull {}s from mongo", T::resource_type()) })? .into_iter() + .filter(|resource| { + if match_tags.is_empty() { + return true; + } + for tag in match_tags.iter() { + for resource_tag in &resource.tags { + match ObjectId::from_str(resource_tag) { + Ok(_) => match id_to_tags + .get(resource_tag) + .map(|tag| tag.name.as_str()) + { + Some(name) => { + if tag != name { + return false; + } + } + None => return false, + }, + Err(_) => { + if resource_tag != tag { + return false; + } + } + } + } + } + true + }) .map(|r| (r.id.clone(), r)) .collect(); Ok(res) @@ -765,7 +800,6 @@ where let target: ResourceTarget = target.into(); let (variant, id) = target.extract_variant_id(); if let Err(e) = db_client() - .await .permissions .delete_many(doc! { "resource_target.type": variant.as_ref(), @@ -799,7 +833,6 @@ where ResourceTarget::System(_) => return, }; if let Err(e) = db_client() - .await .users .update_many( doc! {}, diff --git a/bin/core/src/resource/procedure.rs b/bin/core/src/resource/procedure.rs index 9849bd534..388b252e0 100644 --- a/bin/core/src/resource/procedure.rs +++ b/bin/core/src/resource/procedure.rs @@ -27,7 +27,10 @@ use mungos::{ mongodb::{bson::doc, options::FindOneOptions, Collection}, }; -use crate::state::{action_states, db_client, procedure_state_cache}; +use crate::{ + config::core_config, + state::{action_states, db_client, procedure_state_cache}, +}; impl super::KomodoResource for Procedure { type Config = ProcedureConfig; @@ -43,7 +46,7 @@ impl super::KomodoResource for Procedure { async fn coll( ) -> &'static Collection> { - &db_client().await.procedures + &db_client().procedures } async fn to_list_item( @@ -77,8 +80,8 @@ impl super::KomodoResource for Procedure { Operation::CreateProcedure } - fn user_can_create(_user: &User) -> bool { - true + fn user_can_create(user: &User) -> bool { + user.admin || !core_config().disable_non_admin_create } async fn validate_create_config( @@ -574,7 +577,7 @@ pub fn spawn_procedure_state_refresh_loop() { pub async fn refresh_procedure_state_cache() { let _ = async { let procedures = - find_collect(&db_client().await.procedures, None, None) + find_collect(&db_client().procedures, None, None) .await .context("failed to get procedures from db")?; let cache = procedure_state_cache(); @@ -609,7 +612,6 @@ async fn get_procedure_state(id: &String) -> ProcedureState { async fn get_procedure_state_from_db(id: &str) -> ProcedureState { async { let state = db_client() - .await .updates .find_one(doc! { "target.type": "Procedure", diff --git a/bin/core/src/resource/refresh.rs b/bin/core/src/resource/refresh.rs new file mode 100644 index 000000000..fc20fdd88 --- /dev/null +++ b/bin/core/src/resource/refresh.rs @@ -0,0 +1,140 @@ +use async_timing_util::{wait_until_timelength, Timelength}; +use komodo_client::{ + api::write::{ + RefreshBuildCache, RefreshRepoCache, RefreshResourceSyncPending, + RefreshStackCache, + }, + entities::user::{build_user, repo_user, stack_user, sync_user}, +}; +use mungos::find::find_collect; +use resolver_api::Resolve; + +use crate::{ + config::core_config, + state::{db_client, State}, +}; + +pub fn spawn_resource_refresh_loop() { + let interval: Timelength = core_config() + .resource_poll_interval + .try_into() + .expect("Invalid resource poll interval"); + tokio::spawn(async move { + refresh_all().await; + loop { + wait_until_timelength(interval, 3000).await; + refresh_all().await; + } + }); +} + +async fn refresh_all() { + refresh_stacks().await; + refresh_builds().await; + refresh_repos().await; + refresh_syncs().await; +} + +async fn refresh_stacks() { + let Ok(stacks) = find_collect(&db_client().stacks, None, None) + .await + .inspect_err(|e| { + warn!( + "Failed to get Stacks from database in refresh task | {e:#}" + ) + }) + else { + return; + }; + for stack in stacks { + State + .resolve( + RefreshStackCache { stack: stack.id }, + stack_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("Failed to refresh Stack cache in refresh task | Stack: {} | {e:#}", stack.name) + }) + .ok(); + } +} + +async fn refresh_builds() { + let Ok(builds) = find_collect(&db_client().builds, None, None) + .await + .inspect_err(|e| { + warn!( + "Failed to get Builds from database in refresh task | {e:#}" + ) + }) + else { + return; + }; + for build in builds { + State + .resolve( + RefreshBuildCache { build: build.id }, + build_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("Failed to refresh Build cache in refresh task | Build: {} | {e:#}", build.name) + }) + .ok(); + } +} + +async fn refresh_repos() { + let Ok(repos) = find_collect(&db_client().repos, None, None) + .await + .inspect_err(|e| { + warn!( + "Failed to get Repos from database in refresh task | {e:#}" + ) + }) + else { + return; + }; + for repo in repos { + State + .resolve( + RefreshRepoCache { repo: repo.id }, + repo_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("Failed to refresh Repo cache in refresh task | Repo: {} | {e:#}", repo.name) + }) + .ok(); + } +} + +async fn refresh_syncs() { + let Ok(syncs) = + find_collect(&db_client().resource_syncs, None, None) + .await + .inspect_err(|e| { + warn!( + "failed to get resource syncs from db in refresh task | {e:#}" + ) + }) + else { + return; + }; + for sync in syncs { + if sync.config.repo.is_empty() { + continue; + } + State + .resolve( + RefreshResourceSyncPending { sync: sync.id }, + sync_user().clone(), + ) + .await + .inspect_err(|e| { + warn!("Failed to refresh ResourceSync in refresh task | Sync: {} | {e:#}", sync.name) + }) + .ok(); + } +} diff --git a/bin/core/src/resource/repo.rs b/bin/core/src/resource/repo.rs index caf3e5e84..7ec509f69 100644 --- a/bin/core/src/resource/repo.rs +++ b/bin/core/src/resource/repo.rs @@ -22,6 +22,7 @@ use mungos::{ use periphery_client::api::git::DeleteRepo; use crate::{ + config::core_config, helpers::periphery_client, state::{ action_states, db_client, repo_state_cache, repo_status_cache, @@ -44,7 +45,7 @@ impl super::KomodoResource for Repo { async fn coll( ) -> &'static Collection> { - &db_client().await.repos + &db_client().repos } async fn to_list_item( @@ -90,8 +91,8 @@ impl super::KomodoResource for Repo { Operation::CreateRepo } - fn user_can_create(_user: &User) -> bool { - true + fn user_can_create(user: &User) -> bool { + user.admin || !core_config().disable_non_admin_create } async fn validate_create_config( @@ -183,7 +184,7 @@ pub fn spawn_repo_state_refresh_loop() { pub async fn refresh_repo_state_cache() { let _ = async { - let repos = find_collect(&db_client().await.repos, None, None) + let repos = find_collect(&db_client().repos, None, None) .await .context("failed to get repos from db")?; let cache = repo_state_cache(); @@ -257,7 +258,6 @@ async fn get_repo_state(id: &String) -> RepoState { async fn get_repo_state_from_db(id: &str) -> RepoState { async { let state = db_client() - .await .updates .find_one(doc! { "target.type": "Repo", diff --git a/bin/core/src/resource/server.rs b/bin/core/src/resource/server.rs index 08acfd7ad..5e05c97ef 100644 --- a/bin/core/src/resource/server.rs +++ b/bin/core/src/resource/server.rs @@ -13,6 +13,7 @@ use komodo_client::entities::{ use mungos::mongodb::{bson::doc, Collection}; use crate::{ + config::core_config, monitor::update_cache_for_server, state::{action_states, db_client, server_status_cache}, }; @@ -31,7 +32,7 @@ impl super::KomodoResource for Server { async fn coll( ) -> &'static Collection> { - &db_client().await.servers + &db_client().servers } async fn to_list_item( @@ -72,7 +73,9 @@ impl super::KomodoResource for Server { } fn user_can_create(user: &User) -> bool { - user.admin || user.create_server_permissions + user.admin + || (!core_config().disable_non_admin_create + && user.create_server_permissions) } async fn validate_create_config( @@ -122,7 +125,7 @@ impl super::KomodoResource for Server { resource: &Resource, _update: &mut Update, ) -> anyhow::Result<()> { - let db = db_client().await; + let db = db_client(); let id = &resource.id; diff --git a/bin/core/src/resource/server_template.rs b/bin/core/src/resource/server_template.rs index 86d48e286..ba5433cac 100644 --- a/bin/core/src/resource/server_template.rs +++ b/bin/core/src/resource/server_template.rs @@ -31,7 +31,7 @@ impl super::KomodoResource for ServerTemplate { async fn coll( ) -> &'static Collection> { - &db_client().await.server_templates + &db_client().server_templates } async fn to_list_item( diff --git a/bin/core/src/resource/stack.rs b/bin/core/src/resource/stack.rs index ecef40157..9aa6e86e6 100644 --- a/bin/core/src/resource/stack.rs +++ b/bin/core/src/resource/stack.rs @@ -21,6 +21,7 @@ use periphery_client::api::compose::ComposeExecution; use resolver_api::Resolve; use crate::{ + config::core_config, helpers::{periphery_client, query::get_stack_state}, monitor::update_cache_for_server, state::{ @@ -45,7 +46,7 @@ impl super::KomodoResource for Stack { async fn coll( ) -> &'static Collection> { - &db_client().await.stacks + &db_client().stacks } async fn to_list_item( @@ -107,8 +108,10 @@ impl super::KomodoResource for Stack { status, services, project_missing, + file_contents: !stack.config.file_contents.is_empty(), server_id: stack.config.server_id, missing_files: stack.info.missing_files, + files_on_host: stack.config.files_on_host, git_provider: stack.config.git_provider, repo: stack.config.repo, branch: stack.config.branch, @@ -134,7 +137,7 @@ impl super::KomodoResource for Stack { } fn user_can_create(user: &User) -> bool { - user.admin + user.admin || !core_config().disable_non_admin_create } async fn validate_create_config( @@ -158,7 +161,7 @@ impl super::KomodoResource for Stack { .await { update.push_error_log( - "refresh stack cache", + "Refresh stack cache", format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into()) ); }; @@ -320,7 +323,7 @@ async fn validate_config( // pub async fn refresh_resource_sync_state_cache() { // let _ = async { // let resource_syncs = -// find_collect(&db_client().await.resource_syncs, None, None) +// find_collect(&db_client().resource_syncs, None, None) // .await // .context("failed to get resource_syncs from db")?; // let cache = resource_sync_state_cache(); diff --git a/bin/core/src/resource/sync.rs b/bin/core/src/resource/sync.rs index 4b727b692..1e386c89b 100644 --- a/bin/core/src/resource/sync.rs +++ b/bin/core/src/resource/sync.rs @@ -1,27 +1,32 @@ use std::time::Duration; use anyhow::Context; -use komodo_client::entities::{ - komodo_timestamp, - resource::Resource, - sync::{ - PartialResourceSyncConfig, PendingSyncUpdatesData, ResourceSync, - ResourceSyncConfig, ResourceSyncConfigDiff, ResourceSyncInfo, - ResourceSyncListItem, ResourceSyncListItemInfo, - ResourceSyncQuerySpecifics, ResourceSyncState, +use formatting::format_serror; +use komodo_client::{ + api::write::RefreshResourceSyncPending, + entities::{ + komodo_timestamp, + resource::Resource, + sync::{ + PartialResourceSyncConfig, ResourceSync, ResourceSyncConfig, + ResourceSyncConfigDiff, ResourceSyncInfo, ResourceSyncListItem, + ResourceSyncListItemInfo, ResourceSyncQuerySpecifics, + ResourceSyncState, + }, + update::Update, + user::{sync_user, User}, + Operation, ResourceTargetVariant, }, - update::Update, - user::User, - Operation, ResourceTargetVariant, }; use mongo_indexed::doc; use mungos::{ find::find_collect, mongodb::{options::FindOneOptions, Collection}, }; +use resolver_api::Resolve; use crate::state::{ - action_states, db_client, resource_sync_state_cache, + action_states, db_client, resource_sync_state_cache, State, }; impl super::KomodoResource for ResourceSync { @@ -38,29 +43,31 @@ impl super::KomodoResource for ResourceSync { async fn coll( ) -> &'static Collection> { - &db_client().await.resource_syncs + &db_client().resource_syncs } async fn to_list_item( resource_sync: Resource, ) -> Self::ListItem { - let state = get_resource_sync_state( - &resource_sync.id, - &resource_sync.info.pending.data, - ) - .await; + let state = + get_resource_sync_state(&resource_sync.id, &resource_sync.info) + .await; ResourceSyncListItem { id: resource_sync.id, name: resource_sync.name, tags: resource_sync.tags, resource_type: ResourceTargetVariant::ResourceSync, info: ResourceSyncListItemInfo { + file_contents: !resource_sync.config.file_contents.is_empty(), + files_on_host: resource_sync.config.files_on_host, + managed: resource_sync.config.managed, git_provider: resource_sync.config.git_provider, repo: resource_sync.config.repo, branch: resource_sync.config.branch, last_sync_ts: resource_sync.info.last_sync_ts, last_sync_hash: resource_sync.info.last_sync_hash, last_sync_message: resource_sync.info.last_sync_message, + resource_path: resource_sync.config.resource_path, state, }, } @@ -93,9 +100,23 @@ impl super::KomodoResource for ResourceSync { } async fn post_create( - _created: &Resource, - _update: &mut Update, + created: &Resource, + update: &mut Update, ) -> anyhow::Result<()> { + if let Err(e) = State + .resolve( + RefreshResourceSyncPending { + sync: created.id.clone(), + }, + sync_user().to_owned(), + ) + .await + { + update.push_error_log( + "Refresh sync pending", + format_serror(&e.context("The sync pending cache has failed to refresh. This is likely due to a misconfiguration of the sync").into()) + ); + }; Ok(()) } @@ -114,10 +135,10 @@ impl super::KomodoResource for ResourceSync { } async fn post_update( - _updated: &Resource, - _update: &mut Update, + updated: &Resource, + update: &mut Update, ) -> anyhow::Result<()> { - Ok(()) + Self::post_create(updated, update).await } // DELETE @@ -130,7 +151,7 @@ impl super::KomodoResource for ResourceSync { resource: &Resource, _update: &mut Update, ) -> anyhow::Result<()> { - db_client().await.alerts + db_client().alerts .update_many( doc! { "target.type": "ResourceSync", "target.id": &resource.id }, doc! { "$set": { @@ -164,7 +185,7 @@ pub fn spawn_resource_sync_state_refresh_loop() { pub async fn refresh_resource_sync_state_cache() { let _ = async { let resource_syncs = - find_collect(&db_client().await.resource_syncs, None, None) + find_collect(&db_client().resource_syncs, None, None) .await .context("failed to get resource_syncs from db")?; let cache = resource_sync_state_cache(); @@ -183,7 +204,7 @@ pub async fn refresh_resource_sync_state_cache() { async fn get_resource_sync_state( id: &String, - data: &PendingSyncUpdatesData, + data: &ResourceSyncInfo, ) -> ResourceSyncState { if let Some(state) = action_states() .resource_sync @@ -204,13 +225,14 @@ async fn get_resource_sync_state( { return state; } - let data = match data { - PendingSyncUpdatesData::Err(_) => { - return ResourceSyncState::Failed - } - PendingSyncUpdatesData::Ok(data) => data, - }; - if !data.no_updates() { + if data.pending_error.is_some() { + return ResourceSyncState::Failed; + } + if !data.resource_updates.is_empty() + || !data.variable_updates.is_empty() + || !data.user_group_updates.is_empty() + || data.pending_deploy.to_deploy > 0 + { return ResourceSyncState::Pending; } resource_sync_state_cache() @@ -224,12 +246,14 @@ async fn get_resource_sync_state_from_db( ) -> ResourceSyncState { async { let state = db_client() - .await .updates .find_one(doc! { "target.type": "ResourceSync", "target.id": id, - "operation": "RunSync" + "$or": [ + { "operation": "RunSync" }, + { "operation": "CommitSync" }, + ], }) .with_options( FindOneOptions::builder() diff --git a/bin/core/src/helpers/stack/execute.rs b/bin/core/src/stack/execute.rs similarity index 100% rename from bin/core/src/helpers/stack/execute.rs rename to bin/core/src/stack/execute.rs diff --git a/bin/core/src/stack/mod.rs b/bin/core/src/stack/mod.rs new file mode 100644 index 000000000..71d526f0a --- /dev/null +++ b/bin/core/src/stack/mod.rs @@ -0,0 +1,51 @@ +use anyhow::{anyhow, Context}; +use komodo_client::entities::{ + permission::PermissionLevel, + server::{Server, ServerState}, + stack::Stack, + user::User, +}; +use regex::Regex; + +use crate::{helpers::query::get_server_with_state, resource}; + +pub mod execute; +pub mod remote; +pub mod services; + +pub async fn get_stack_and_server( + stack: &str, + user: &User, + permission_level: PermissionLevel, + block_if_server_unreachable: bool, +) -> anyhow::Result<(Stack, Server)> { + let stack = resource::get_check_permissions::( + stack, + user, + permission_level, + ) + .await?; + + if stack.config.server_id.is_empty() { + return Err(anyhow!("Stack has no server configured")); + } + + let (server, status) = + get_server_with_state(&stack.config.server_id).await?; + if block_if_server_unreachable && status != ServerState::Ok { + return Err(anyhow!( + "cannot send action when server is unreachable or disabled" + )); + } + + Ok((stack, server)) +} + +pub fn compose_container_match_regex( + container_name: &str, +) -> anyhow::Result { + let regex = format!("^{container_name}-?[0-9]*$"); + Regex::new(®ex).with_context(|| { + format!("failed to construct valid regex from {regex}") + }) +} diff --git a/bin/core/src/helpers/stack/remote.rs b/bin/core/src/stack/remote.rs similarity index 51% rename from bin/core/src/helpers/stack/remote.rs rename to bin/core/src/stack/remote.rs index bab70b416..433416895 100644 --- a/bin/core/src/helpers/stack/remote.rs +++ b/bin/core/src/stack/remote.rs @@ -1,51 +1,39 @@ -use std::{ - fs, - path::{Path, PathBuf}, -}; +use std::{fs, path::PathBuf}; -use anyhow::{anyhow, Context}; +use anyhow::Context; use formatting::format_serror; use komodo_client::entities::{ - stack::{ComposeContents, Stack}, - update::Log, - CloneArgs, + stack::Stack, update::Log, CloneArgs, FileContents, }; -use crate::{ - config::core_config, - helpers::{git_token, random_string}, -}; +use crate::{config::core_config, helpers::git_token}; + +pub struct RemoteComposeContents { + pub successful: Vec, + pub errored: Vec, + pub hash: Option, + pub message: Option, + pub _logs: Vec, +} /// Returns Result<(read paths, error paths, logs, short hash, commit message)> pub async fn get_remote_compose_contents( stack: &Stack, // Collect any files which are missing in the repo. mut missing_files: Option<&mut Vec>, -) -> anyhow::Result<( - // Successful contents - Vec, - // error contents - Vec, - // logs - Vec, - // commit short hash - Option, - // commit message - Option, -)> { - let repo_path = - core_config().repo_directory.join(random_string(10)); - - let (logs, hash, message) = clone_remote_repo(&repo_path, stack) - .await - .context("failed to clone stack repo")?; +) -> anyhow::Result { + let clone_args: CloneArgs = stack.into(); + let (repo_path, _logs, hash, message) = + ensure_remote_repo(clone_args) + .await + .context("failed to clone stack repo")?; let run_directory = repo_path.join(&stack.config.run_directory); // This will remove any intermediate '/./' which can be a problem for some OS. let run_directory = run_directory.components().collect::(); - let mut oks = Vec::new(); - let mut errs = Vec::new(); + let mut successful = Vec::new(); + let mut errored = Vec::new(); for path in stack.file_paths() { let file_path = run_directory.join(path); @@ -58,60 +46,53 @@ pub async fn get_remote_compose_contents( match fs::read_to_string(&file_path).with_context(|| { format!("failed to read file contents from {file_path:?}") }) { - Ok(contents) => oks.push(ComposeContents { + Ok(contents) => successful.push(FileContents { path: path.to_string(), contents, }), - Err(e) => errs.push(ComposeContents { + Err(e) => errored.push(FileContents { path: path.to_string(), contents: format_serror(&e.into()), }), } } - if repo_path.exists() { - if let Err(e) = std::fs::remove_dir_all(&repo_path) { - warn!("failed to remove stack repo directory | {e:?}") - } - } - - Ok((oks, errs, logs, hash, message)) + Ok(RemoteComposeContents { + successful, + errored, + hash, + message, + _logs, + }) } -/// Returns (logs, hash, message) -pub async fn clone_remote_repo( - repo_path: &Path, - stack: &Stack, -) -> anyhow::Result<(Vec, Option, Option)> { - let mut clone_args: CloneArgs = stack.into(); - +/// Returns (destination, logs, hash, message) +pub async fn ensure_remote_repo( + mut clone_args: CloneArgs, +) -> anyhow::Result<(PathBuf, Vec, Option, Option)> +{ let config = core_config(); - let access_token = match (&clone_args.account, &clone_args.provider) - { - (None, _) => None, - (Some(_), None) => { - return Err(anyhow!( - "Account is configured, but provider is empty" - )) - } - (Some(username), Some(provider)) => { - git_token(provider, username, |https| { + let access_token = if let Some(username) = &clone_args.account { + git_token(&clone_args.provider, username, |https| { clone_args.https = https }) .await .with_context( - || format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"), + || format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider), )? - } + } else { + None }; + let repo_path = + clone_args.unique_path(&core_config().repo_directory)?; + clone_args.destination = Some(repo_path.display().to_string()); // Don't want to run these on core. clone_args.on_clone = None; clone_args.on_pull = None; - clone_args.destination = Some(repo_path.display().to_string()); - git::clone( + git::pull_or_clone( clone_args, &config.repo_directory, access_token, @@ -122,5 +103,5 @@ pub async fn clone_remote_repo( ) .await .context("failed to clone stack repo") - .map(|(a, b, c, _)| (a, b, c)) + .map(|res| (repo_path, res.logs, res.hash, res.message)) } diff --git a/bin/core/src/helpers/stack/services.rs b/bin/core/src/stack/services.rs similarity index 75% rename from bin/core/src/helpers/stack/services.rs rename to bin/core/src/stack/services.rs index d1db0ddc9..4d64ae9fa 100644 --- a/bin/core/src/helpers/stack/services.rs +++ b/bin/core/src/stack/services.rs @@ -1,10 +1,12 @@ use anyhow::Context; -use komodo_client::entities::stack::{ - ComposeContents, ComposeFile, ComposeService, Stack, - StackServiceNames, +use komodo_client::entities::{ + stack::{ComposeFile, ComposeService, Stack, StackServiceNames}, + FileContents, }; -use crate::helpers::stack::remote::get_remote_compose_contents; +use super::remote::{ + get_remote_compose_contents, RemoteComposeContents, +}; /// Passing fresh will re-extract services from compose file, whether local or remote (repo) pub async fn extract_services_from_stack( @@ -20,29 +22,32 @@ pub async fn extract_services_from_stack( } let compose_contents = if stack.config.file_contents.is_empty() { - let (contents, errors, _, _, _) = - get_remote_compose_contents(stack, None).await.context( - "failed to get remote compose files to extract services", - )?; - if !errors.is_empty() { + let RemoteComposeContents { + successful, + errored, + .. + } = get_remote_compose_contents(stack, None).await.context( + "failed to get remote compose files to extract services", + )?; + if !errored.is_empty() { let mut e = anyhow::Error::msg("Trace root"); - for err in errors { + for err in errored { e = e.context(format!("{}: {}", err.path, err.contents)); } return Err( e.context("Failed to read one or more remote compose files"), ); } - contents + successful } else { - vec![ComposeContents { + vec![FileContents { path: String::from("compose.yaml"), contents: stack.config.file_contents.clone(), }] }; let mut res = Vec::new(); - for ComposeContents { path, contents } in &compose_contents { + for FileContents { path, contents } in &compose_contents { extract_services_into_res( &stack.project_name(true), contents, diff --git a/bin/core/src/state.rs b/bin/core/src/state.rs index db3d18a86..39bdeee9c 100644 --- a/bin/core/src/state.rs +++ b/bin/core/src/state.rs @@ -16,7 +16,6 @@ use komodo_client::entities::{ use octorust::auth::{ Credentials, InstallationTokenGenerator, JWTCredentials, }; -use tokio::sync::{Mutex, OnceCell}; use crate::{ auth::jwt::JwtClient, @@ -31,22 +30,20 @@ use crate::{ pub struct State; -pub async fn db_client() -> &'static DbClient { - static DB_CLIENT: OnceCell = OnceCell::const_new(); +static DB_CLIENT: OnceLock = OnceLock::new(); + +pub fn db_client() -> &'static DbClient { DB_CLIENT - .get_or_init(|| async { - match DbClient::new(&core_config().mongo) - .await - .context("failed to initialize mongo client") - { - Ok(client) => client, - Err(e) => { - error!("{e:#}"); - panic!("Exiting"); - } - } - }) + .get() + .expect("db_client accessed before initialized") +} + +pub async fn init_db_client() { + let client = DbClient::new(&core_config().database) .await + .context("failed to initialize database client") + .unwrap(); + DB_CLIENT.set(client).expect("db_clint"); } pub fn jwt_client() -> &'static JwtClient { @@ -202,11 +199,3 @@ pub fn resource_sync_state_cache() -> &'static ResourceSyncStateCache OnceLock::new(); RESOURCE_SYNC_STATE_CACHE.get_or_init(Default::default) } - -pub type ResourceSyncLockCache = Cache>>; - -pub fn resource_sync_lock_cache() -> &'static ResourceSyncLockCache { - static RESOURCE_SYNC_LOCK_CACHE: OnceLock = - OnceLock::new(); - RESOURCE_SYNC_LOCK_CACHE.get_or_init(Default::default) -} diff --git a/bin/core/src/helpers/sync/deploy.rs b/bin/core/src/sync/deploy.rs similarity index 98% rename from bin/core/src/helpers/sync/deploy.rs rename to bin/core/src/sync/deploy.rs index e388a1519..c8b52e597 100644 --- a/bin/core/src/helpers/sync/deploy.rs +++ b/bin/core/src/sync/deploy.rs @@ -25,15 +25,12 @@ use resolver_api::Resolve; use crate::{ api::execute::ExecuteRequest, - config::core_config, - helpers::{ - random_string, stack::remote::clone_remote_repo, - update::init_execution_update, - }, + helpers::update::init_execution_update, + stack::remote::ensure_remote_repo, state::{deployment_status_cache, stack_status_cache, State}, }; -use super::resource::{AllResourcesById, ResourceSync}; +use super::{AllResourcesById, ResourceSyncTrait}; /// All entries in here are due to be deployed, /// after the given dependencies, @@ -179,7 +176,7 @@ pub async fn deploy_from_cache( pub async fn get_updates_for_view( params: SyncDeployParams<'_>, -) -> Option { +) -> SyncDeployUpdate { let inner = async { let mut update = SyncDeployUpdate { to_deploy: 0, @@ -209,16 +206,16 @@ pub async fn get_updates_for_view( update.log.push_str(&lines.join("\n-------------------\n")); - anyhow::Ok((update.to_deploy > 0).then_some(update)) + anyhow::Ok(update) }; match inner.await { Ok(res) => res, - Err(e) => Some(SyncDeployUpdate { + Err(e) => SyncDeployUpdate { to_deploy: 0, log: format_serror( &e.context("failed to get deploy updates for view").into(), ), - }), + }, } } @@ -597,9 +594,7 @@ fn build_cache_for_stack<'a>( // Can use 'original' for this (config hasn't changed) if stack.latest_hash { if let Some(deployed_hash) = &original.info.deployed_hash { - let repo_path = - core_config().repo_directory.join(random_string(10)); - let (_, hash, _) = clone_remote_repo(&repo_path, original) + let (_, _, hash, _) = ensure_remote_repo(original.into()) .await .context("failed to get latest hash for repo based stack") .with_context(|| { diff --git a/bin/core/src/helpers/sync/resource.rs b/bin/core/src/sync/execute.rs similarity index 50% rename from bin/core/src/helpers/sync/resource.rs rename to bin/core/src/sync/execute.rs index 16bf0955c..06f030aa5 100644 --- a/bin/core/src/helpers/sync/resource.rs +++ b/bin/core/src/sync/execute.rs @@ -5,53 +5,118 @@ use formatting::{bold, colored, muted, Color}; use komodo_client::{ api::write::{UpdateDescription, UpdateTagsOnResource}, entities::{ - self, alerter::Alerter, build::Build, builder::Builder, - deployment::Deployment, procedure::Procedure, repo::Repo, - server::Server, server_template::ServerTemplate, stack::Stack, - sync::SyncUpdate, tag::Tag, toml::ResourceToml, update::Log, - user::sync_user, ResourceTarget, + tag::Tag, toml::ResourceToml, update::Log, user::sync_user, }, }; use mungos::find::find_collect; -use partial_derive2::{Diff, FieldDiff, MaybeNone}; +use partial_derive2::MaybeNone; use resolver_api::Resolve; -use crate::{resource::KomodoResource, state::State}; +use crate::state::State; -pub type ToUpdate = Vec>; -pub type ToCreate = Vec>; -/// Vec of resource names -pub type ToDelete = Vec; +use super::{ + AllResourcesById, ResourceSyncTrait, ToCreate, ToDelete, ToUpdate, + ToUpdateItem, UpdatesResult, +}; -type UpdatesResult = (ToCreate, ToUpdate, ToDelete); +/// Gets all the resources to update. For use in sync execution. +pub async fn get_updates_for_execution< + Resource: ResourceSyncTrait, +>( + resources: Vec>, + delete: bool, + all_resources: &AllResourcesById, + id_to_tags: &HashMap, + match_tags: &[String], +) -> anyhow::Result> { + let map = find_collect(Resource::coll().await, None, None) + .await + .context("failed to get resources from db")? + .into_iter() + .filter(|r| { + Resource::include_resource( + &r.config, &r.tags, id_to_tags, match_tags, + ) + }) + .map(|r| (r.name.clone(), r)) + .collect::>(); -pub struct ToUpdateItem { - pub id: String, - pub resource: ResourceToml, - pub update_description: bool, - pub update_tags: bool, + let mut to_create = ToCreate::::new(); + let mut to_update = ToUpdate::::new(); + let mut to_delete = ToDelete::new(); + + if delete { + for resource in map.values() { + if !resources.iter().any(|r| r.name == resource.name) { + to_delete.push(resource.name.clone()); + } + } + } + + for mut resource in resources { + // only resource that might not be included is resource sync + if !Resource::include_resource_partial( + &resource.config, + &resource.tags, + id_to_tags, + match_tags, + ) { + continue; + } + match map.get(&resource.name) { + Some(original) => { + // First merge toml resource config (partial) onto default resource config. + // Makes sure things that aren't defined in toml (come through as None) actually get removed. + let config: Resource::Config = resource.config.into(); + resource.config = config.into(); + + Resource::validate_partial_config(&mut resource.config); + + let mut diff = Resource::get_diff( + original.config.clone(), + resource.config, + all_resources, + )?; + + Resource::validate_diff(&mut diff); + + let original_tags = original + .tags + .iter() + .filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone())) + .collect::>(); + + // Only proceed if there are any fields to update, + // or a change to tags / description + if diff.is_none() + && resource.description == original.description + && resource.tags == original_tags + { + continue; + } + + // Minimizes updates through diffing. + resource.config = diff.into(); + + let update = ToUpdateItem { + id: original.id.clone(), + update_description: resource.description + != original.description, + update_tags: resource.tags != original_tags, + resource, + }; + + to_update.push(update); + } + None => to_create.push(resource), + } + } + + Ok((to_create, to_update, to_delete)) } -pub trait ResourceSync: KomodoResource + Sized { - fn resource_target(id: String) -> ResourceTarget; - - /// Apply any changes to incoming toml partial config - /// before it is diffed against existing config - fn validate_partial_config(_config: &mut Self::PartialConfig) {} - - /// Diffs the declared toml (partial) against the full existing config. - /// Removes all fields from toml (partial) that haven't changed. - fn get_diff( - original: Self::Config, - update: Self::PartialConfig, - resources: &AllResourcesById, - ) -> anyhow::Result; - - /// Apply any changes to computed config diff - /// before logging - fn validate_diff(_diff: &mut Self::ConfigDiff) {} - - async fn run_updates( +pub trait ExecuteResourceSync: ResourceSyncTrait { + async fn execute_sync_updates( to_create: ToCreate, to_update: ToUpdate, to_delete: ToDelete, @@ -207,230 +272,7 @@ pub trait ResourceSync: KomodoResource + Sized { } } -/// Turns all the diffs into a readable string -pub async fn get_updates_for_view( - resources: Vec>, - delete: bool, - all_resources: &AllResourcesById, - id_to_tags: &HashMap, -) -> anyhow::Result> { - let map = find_collect(Resource::coll().await, None, None) - .await - .context("failed to get resources from db")? - .into_iter() - .map(|r| (r.name.clone(), r)) - .collect::>(); - - let mut update = SyncUpdate { - log: format!("{} Updates", Resource::resource_type()), - ..Default::default() - }; - - let mut to_delete = Vec::::new(); - if delete { - for resource in map.values() { - if !resources.iter().any(|r| r.name == resource.name) { - update.to_delete += 1; - to_delete.push(resource.name.clone()) - } - } - } - - for mut resource in resources { - match map.get(&resource.name) { - Some(original) => { - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: Resource::Config = resource.config.into(); - resource.config = config.into(); - - Resource::validate_partial_config(&mut resource.config); - - let mut diff = Resource::get_diff( - original.config.clone(), - resource.config, - all_resources, - )?; - - Resource::validate_diff(&mut diff); - - let original_tags = original - .tags - .iter() - .filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone())) - .collect::>(); - - // Only proceed if there are any fields to update, - // or a change to tags / description - if diff.is_none() - && resource.description == original.description - && resource.tags == original_tags - { - continue; - } - - update.to_update += 1; - - update.log.push_str(&format!( - "\n\n{}: {}: '{}'\n-------------------", - colored("UPDATE", Color::Blue), - Resource::resource_type(), - bold(&resource.name) - )); - - let mut lines = Vec::::new(); - if resource.description != original.description { - lines.push(format!( - "{}: 'description'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(&original.description, Color::Red), - muted("to"), - colored(&resource.description, Color::Green) - )); - } - if resource.tags != original_tags { - let from = - colored(&format!("{:?}", original_tags), Color::Red); - let to = - colored(&format!("{:?}", resource.tags), Color::Green); - lines.push(format!( - "{}: 'tags'\n{}: {from}\n{}: {to}", - muted("field"), - muted("from"), - muted("to"), - )); - } - lines.extend(diff.iter_field_diffs().map( - |FieldDiff { field, from, to }| { - format!( - "{}: '{field}'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(from, Color::Red), - muted("to"), - colored(to, Color::Green) - ) - }, - )); - update.log.push('\n'); - update.log.push_str(&lines.join("\n-------------------\n")); - } - None => { - update.to_create += 1; - update.log.push_str(&format!( - "\n\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}", - colored("CREATE", Color::Green), - Resource::resource_type(), - bold(&resource.name), - muted("description"), - resource.description, - muted("tags"), - resource.tags, - muted("config"), - serde_json::to_string_pretty(&resource.config) - .context("failed to serialize config to json")? - )) - } - } - } - - for name in to_delete { - update.log.push_str(&format!( - "\n\n{}: {}: '{}'\n-------------------", - colored("DELETE", Color::Red), - Resource::resource_type(), - bold(&name) - )); - } - - let any_change = update.to_create > 0 - || update.to_update > 0 - || update.to_delete > 0; - - Ok(any_change.then_some(update)) -} - -/// Gets all the resources to update. For use in sync execution. -pub async fn get_updates_for_execution( - resources: Vec>, - delete: bool, - all_resources: &AllResourcesById, - id_to_tags: &HashMap, -) -> anyhow::Result> { - let map = find_collect(Resource::coll().await, None, None) - .await - .context("failed to get resources from db")? - .into_iter() - .map(|r| (r.name.clone(), r)) - .collect::>(); - - let mut to_create = ToCreate::::new(); - let mut to_update = ToUpdate::::new(); - let mut to_delete = ToDelete::new(); - - if delete { - for resource in map.values() { - if !resources.iter().any(|r| r.name == resource.name) { - to_delete.push(resource.name.clone()); - } - } - } - - for mut resource in resources { - match map.get(&resource.name) { - Some(original) => { - // First merge toml resource config (partial) onto default resource config. - // Makes sure things that aren't defined in toml (come through as None) actually get removed. - let config: Resource::Config = resource.config.into(); - resource.config = config.into(); - - Resource::validate_partial_config(&mut resource.config); - - let mut diff = Resource::get_diff( - original.config.clone(), - resource.config, - all_resources, - )?; - - Resource::validate_diff(&mut diff); - - let original_tags = original - .tags - .iter() - .filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone())) - .collect::>(); - - // Only proceed if there are any fields to update, - // or a change to tags / description - if diff.is_none() - && resource.description == original.description - && resource.tags == original_tags - { - continue; - } - - // Minimizes updates through diffing. - resource.config = diff.into(); - - let update = ToUpdateItem { - id: original.id.clone(), - update_description: resource.description - != original.description, - update_tags: resource.tags != original_tags, - resource, - }; - - to_update.push(update); - } - None => to_create.push(resource), - } - } - - Ok((to_create, to_update, to_delete)) -} - -pub async fn run_update_tags( +pub async fn run_update_tags( id: String, name: &str, tags: Vec, @@ -466,7 +308,7 @@ pub async fn run_update_tags( } } -pub async fn run_update_description( +pub async fn run_update_description( id: String, name: &str, description: String, @@ -500,51 +342,3 @@ pub async fn run_update_description( )); } } - -pub struct AllResourcesById { - pub servers: HashMap, - pub deployments: HashMap, - pub stacks: HashMap, - pub builds: HashMap, - pub repos: HashMap, - pub procedures: HashMap, - pub builders: HashMap, - pub alerters: HashMap, - pub templates: HashMap, - pub syncs: HashMap, -} - -impl AllResourcesById { - pub async fn load() -> anyhow::Result { - Ok(Self { - servers: crate::resource::get_id_to_resource_map::() - .await?, - deployments: crate::resource::get_id_to_resource_map::< - Deployment, - >() - .await?, - builds: crate::resource::get_id_to_resource_map::() - .await?, - repos: crate::resource::get_id_to_resource_map::() - .await?, - procedures: - crate::resource::get_id_to_resource_map::().await?, - builders: crate::resource::get_id_to_resource_map::() - .await?, - alerters: crate::resource::get_id_to_resource_map::() - .await?, - templates: crate::resource::get_id_to_resource_map::< - ServerTemplate, - >() - .await?, - syncs: crate::resource::get_id_to_resource_map::< - entities::sync::ResourceSync, - >() - .await?, - stacks: crate::resource::get_id_to_resource_map::< - entities::stack::Stack, - >() - .await?, - }) - } -} diff --git a/bin/core/src/sync/file.rs b/bin/core/src/sync/file.rs new file mode 100644 index 000000000..da4e4068f --- /dev/null +++ b/bin/core/src/sync/file.rs @@ -0,0 +1,166 @@ +use std::{fs, path::Path}; + +use anyhow::{anyhow, Context}; +use formatting::{colored, format_serror, muted, Color}; +use komodo_client::entities::{ + toml::{ResourceToml, ResourcesToml}, + update::Log, + FileContents, +}; + +pub fn read_resources( + path: &Path, + match_tags: &[String], + logs: &mut Vec, + files: &mut Vec, + file_errors: &mut Vec, +) -> anyhow::Result { + let mut res = ResourcesToml::default(); + let mut log = + format!("{}: reading resources from {path:?}", muted("INFO")); + if let Err(e) = read_resources_recursive( + path, + match_tags, + &mut res, + &mut log, + files, + file_errors, + ) + .with_context(|| format!("failed to read resources from {path:?}")) + { + file_errors.push(FileContents { + path: path.display().to_string(), + contents: format_serror(&e.into()), + }); + logs.push(Log::error("read remote resources", log)); + } else { + logs.push(Log::simple("read remote resources", log)); + }; + Ok(res) +} + +fn read_resources_recursive( + path: &Path, + match_tags: &[String], + resources: &mut ResourcesToml, + log: &mut String, + files: &mut Vec, + file_errors: &mut Vec, +) -> anyhow::Result<()> { + let res = + fs::metadata(path).context("failed to get path metadata")?; + if res.is_file() { + if !path + .extension() + .map(|ext| ext == "toml") + .unwrap_or_default() + { + return Ok(()); + } + let contents = std::fs::read_to_string(path) + .context("failed to read file contents")?; + files.push(FileContents { + path: path.display().to_string(), + contents: contents.clone(), + }); + let more = toml::from_str::(&contents) + // the error without this comes through with multiple lines (\n) and looks bad + .map_err(|e| anyhow!("{e:#}")) + .context("failed to parse resource file contents")?; + + log.push('\n'); + log.push_str(&format!( + "{}: {} from {}", + muted("INFO"), + colored("adding resources", Color::Green), + colored(path.display(), Color::Blue) + )); + + extend_resources(resources, more, match_tags); + + Ok(()) + } else if res.is_dir() { + let directory = fs::read_dir(path) + .context("failed to read directory contents")?; + for entry in directory.into_iter().flatten() { + let path = entry.path(); + if let Err(e) = read_resources_recursive( + &path, + match_tags, + resources, + log, + files, + file_errors, + ) + .with_context(|| { + format!("failed to read resources from {path:?}") + }) { + file_errors.push(FileContents { + path: path.display().to_string(), + contents: format_serror(&e.into()), + }); + log.push('\n'); + log.push_str(&format!( + "{}: {} from {}", + colored("ERROR", Color::Red), + colored("adding resources", Color::Green), + colored(path.display(), Color::Blue) + )); + } + } + Ok(()) + } else { + Err(anyhow!("resources path is neither file nor directory")) + } +} + +pub fn extend_resources( + resources: &mut ResourcesToml, + more: ResourcesToml, + match_tags: &[String], +) { + resources + .servers + .extend(filter_by_tag(more.servers, match_tags)); + resources + .stacks + .extend(filter_by_tag(more.stacks, match_tags)); + resources + .deployments + .extend(filter_by_tag(more.deployments, match_tags)); + resources + .builds + .extend(filter_by_tag(more.builds, match_tags)); + resources + .repos + .extend(filter_by_tag(more.repos, match_tags)); + resources + .procedures + .extend(filter_by_tag(more.procedures, match_tags)); + resources + .alerters + .extend(filter_by_tag(more.alerters, match_tags)); + resources + .builders + .extend(filter_by_tag(more.builders, match_tags)); + resources + .server_templates + .extend(filter_by_tag(more.server_templates, match_tags)); + resources + .resource_syncs + .extend(filter_by_tag(more.resource_syncs, match_tags)); + resources.user_groups.extend(more.user_groups); + resources.variables.extend(more.variables); +} + +fn filter_by_tag( + resources: Vec>, + match_tags: &[String], +) -> Vec> { + resources + .into_iter() + .filter(|resource| { + match_tags.iter().all(|tag| resource.tags.contains(tag)) + }) + .collect() +} diff --git a/bin/core/src/sync/mod.rs b/bin/core/src/sync/mod.rs new file mode 100644 index 000000000..f18d58e21 --- /dev/null +++ b/bin/core/src/sync/mod.rs @@ -0,0 +1,156 @@ +use std::{collections::HashMap, str::FromStr}; + +use komodo_client::entities::{ + alerter::Alerter, build::Build, builder::Builder, + deployment::Deployment, procedure::Procedure, repo::Repo, + server::Server, server_template::ServerTemplate, stack::Stack, + sync::ResourceSync, tag::Tag, toml::ResourceToml, ResourceTarget, +}; +use mungos::mongodb::bson::oid::ObjectId; +use toml::ToToml; + +pub mod deploy; +pub mod execute; +pub mod file; +pub mod remote; +pub mod resources; +pub mod toml; +pub mod user_groups; +pub mod variables; +pub mod view; + +pub type ToUpdate = Vec>; +pub type ToCreate = Vec>; +/// Vec of resource names +pub type ToDelete = Vec; + +type UpdatesResult = (ToCreate, ToUpdate, ToDelete); + +pub struct ToUpdateItem { + pub id: String, + pub resource: ResourceToml, + pub update_description: bool, + pub update_tags: bool, +} + +pub trait ResourceSyncTrait: ToToml + Sized { + fn resource_target(id: String) -> ResourceTarget; + + /// To exclude resource syncs with "file_contents" (they aren't compatible) + fn include_resource( + _config: &Self::Config, + resource_tags: &[String], + id_to_tags: &HashMap, + match_tags: &[String], + ) -> bool { + include_resource_by_tags(resource_tags, id_to_tags, match_tags) + } + + /// To exclude resource syncs with "file_contents" (they aren't compatible) + fn include_resource_partial( + _config: &Self::PartialConfig, + resource_tags: &[String], + id_to_tags: &HashMap, + match_tags: &[String], + ) -> bool { + include_resource_by_tags(resource_tags, id_to_tags, match_tags) + } + + /// Apply any changes to incoming toml partial config + /// before it is diffed against existing config + fn validate_partial_config(_config: &mut Self::PartialConfig) {} + + /// Diffs the declared toml (partial) against the full existing config. + /// Removes all fields from toml (partial) that haven't changed. + fn get_diff( + original: Self::Config, + update: Self::PartialConfig, + resources: &AllResourcesById, + ) -> anyhow::Result; + + /// Apply any changes to computed config diff + /// before logging + fn validate_diff(_diff: &mut Self::ConfigDiff) {} +} + +pub fn include_resource_by_tags( + resource_tags: &[String], + id_to_tags: &HashMap, + match_tags: &[String], +) -> bool { + let tag_names = resource_tags + .iter() + .filter_map(|resource_tag| { + match ObjectId::from_str(resource_tag) { + Ok(_) => id_to_tags.get(resource_tag).map(|tag| &tag.name), + Err(_) => Some(resource_tag), + } + }) + .collect::>(); + match_tags.iter().all(|tag| tag_names.contains(&tag)) +} + +pub struct AllResourcesById { + pub servers: HashMap, + pub deployments: HashMap, + pub stacks: HashMap, + pub builds: HashMap, + pub repos: HashMap, + pub procedures: HashMap, + pub builders: HashMap, + pub alerters: HashMap, + pub templates: HashMap, + pub syncs: HashMap, +} + +impl AllResourcesById { + /// Use `match_tags` to filter resources by tag. + pub async fn load() -> anyhow::Result { + let map = HashMap::new(); + let id_to_tags = ↦ + let match_tags = &[]; + Ok(Self { + servers: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + deployments: crate::resource::get_id_to_resource_map::< + Deployment, + >(id_to_tags, match_tags) + .await?, + builds: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + repos: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + procedures: + crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + builders: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + alerters: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + templates: crate::resource::get_id_to_resource_map::< + ServerTemplate, + >(id_to_tags, match_tags) + .await?, + syncs: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + stacks: crate::resource::get_id_to_resource_map::( + id_to_tags, match_tags, + ) + .await?, + }) + } +} diff --git a/bin/core/src/sync/remote.rs b/bin/core/src/sync/remote.rs new file mode 100644 index 000000000..c41df6191 --- /dev/null +++ b/bin/core/src/sync/remote.rs @@ -0,0 +1,161 @@ +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; +use git::GitRes; +use komodo_client::entities::{ + sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs, + FileContents, +}; + +use crate::{config::core_config, helpers::git_token}; + +use super::file::extend_resources; + +pub struct RemoteResources { + pub resources: anyhow::Result, + pub files: Vec, + pub file_errors: Vec, + pub logs: Vec, + pub hash: Option, + pub message: Option, +} + +/// Use `match_tags` to filter resources by tag. +pub async fn get_remote_resources( + sync: &ResourceSync, +) -> anyhow::Result { + if sync.config.files_on_host { + // ============= + // FILES ON HOST + // ============= + let path = sync + .config + .resource_path + .parse::() + .context("Resource path is not valid path")?; + let (mut logs, mut files, mut file_errors) = + (Vec::new(), Vec::new(), Vec::new()); + let resources = super::file::read_resources( + &path, + &sync.config.match_tags, + &mut logs, + &mut files, + &mut file_errors, + ); + return Ok(RemoteResources { + resources, + files, + file_errors, + logs, + hash: None, + message: None, + }); + } else if sync.config.managed + || !sync.config.file_contents.is_empty() + { + // ========== + // UI DEFINED + // ========== + let mut resources = ResourcesToml::default(); + let resources = if !sync.config.file_contents.is_empty() { + toml::from_str::(&sync.config.file_contents) + .context("failed to parse resource file contents") + .map(|more| { + extend_resources( + &mut resources, + more, + &sync.config.match_tags, + ); + resources + }) + } else { + Ok(resources) + }; + + // filter_by_ + return Ok(RemoteResources { + resources, + files: vec![FileContents { + path: "database file".to_string(), + contents: sync.config.file_contents.clone(), + }], + file_errors: vec![], + logs: vec![Log::simple( + "Read from database", + "Resources added from database file".to_string(), + )], + hash: None, + message: None, + }); + } + + // =============== + // REPO BASED SYNC + // =============== + + if sync.config.repo.is_empty() { + return Err(anyhow!("No sync files configured")); + } + + let mut clone_args: CloneArgs = sync.into(); + + let access_token = if let Some(account) = &clone_args.account { + git_token(&clone_args.provider, account, |https| clone_args.https = https) + .await + .with_context( + || format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider), + )? + } else { + None + }; + + let repo_path = + clone_args.unique_path(&core_config().repo_directory)?; + clone_args.destination = Some(repo_path.display().to_string()); + // Don't want to run these on core. + clone_args.on_clone = None; + clone_args.on_pull = None; + + let GitRes { + mut logs, + hash, + message, + .. + } = git::pull_or_clone( + clone_args, + &core_config().repo_directory, + access_token, + &[], + "", + None, + &[], + ) + .await + .with_context(|| { + format!("Failed to update resource repo at {repo_path:?}") + })?; + + let hash = hash.context("failed to get commit hash")?; + let message = + message.context("failed to get commit hash message")?; + + let resource_path = repo_path.join(&sync.config.resource_path); + + let (mut files, mut file_errors) = (Vec::new(), Vec::new()); + let resources = super::file::read_resources( + &resource_path, + &sync.config.match_tags, + &mut logs, + &mut files, + &mut file_errors, + ); + + Ok(RemoteResources { + resources, + files, + file_errors, + logs, + hash: Some(hash), + message: Some(message), + }) +} diff --git a/bin/core/src/helpers/sync/resources.rs b/bin/core/src/sync/resources.rs similarity index 88% rename from bin/core/src/helpers/sync/resources.rs rename to bin/core/src/sync/resources.rs index 34729da62..b4d702d8d 100644 --- a/bin/core/src/helpers/sync/resources.rs +++ b/bin/core/src/sync/resources.rs @@ -1,8 +1,9 @@ +use std::collections::HashMap; + use formatting::{bold, colored, muted, Color}; use komodo_client::{ api::execute::Execution, entities::{ - self, alerter::Alerter, build::Build, builder::{Builder, BuilderConfig}, @@ -12,6 +13,8 @@ use komodo_client::{ server::Server, server_template::ServerTemplate, stack::Stack, + sync::ResourceSync, + tag::Tag, update::Log, user::sync_user, ResourceTarget, @@ -20,18 +23,19 @@ use komodo_client::{ use partial_derive2::{MaybeNone, PartialDiff}; use crate::{ - helpers::sync::resource::{ - run_update_description, run_update_tags, ResourceSync, + resource::KomodoResource, + sync::{ + execute::{run_update_description, run_update_tags}, ToUpdateItem, }, - resource::KomodoResource, }; -use super::resource::{ - AllResourcesById, ToCreate, ToDelete, ToUpdate, +use super::{ + execute::ExecuteResourceSync, include_resource_by_tags, + AllResourcesById, ResourceSyncTrait, ToCreate, ToDelete, ToUpdate, }; -impl ResourceSync for Server { +impl ResourceSyncTrait for Server { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Server(id) } @@ -45,7 +49,9 @@ impl ResourceSync for Server { } } -impl ResourceSync for Deployment { +impl ExecuteResourceSync for Server {} + +impl ResourceSyncTrait for Deployment { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Deployment(id) } @@ -80,7 +86,9 @@ impl ResourceSync for Deployment { } } -impl ResourceSync for Stack { +impl ExecuteResourceSync for Deployment {} + +impl ResourceSyncTrait for Stack { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Stack(id) } @@ -101,7 +109,9 @@ impl ResourceSync for Stack { } } -impl ResourceSync for Build { +impl ExecuteResourceSync for Stack {} + +impl ResourceSyncTrait for Build { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Build(id) } @@ -132,7 +142,9 @@ impl ResourceSync for Build { } } -impl ResourceSync for Repo { +impl ExecuteResourceSync for Build {} + +impl ResourceSyncTrait for Repo { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Repo(id) } @@ -160,7 +172,9 @@ impl ResourceSync for Repo { } } -impl ResourceSync for Alerter { +impl ExecuteResourceSync for Repo {} + +impl ResourceSyncTrait for Alerter { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Alerter(id) } @@ -174,7 +188,9 @@ impl ResourceSync for Alerter { } } -impl ResourceSync for Builder { +impl ExecuteResourceSync for Alerter {} + +impl ResourceSyncTrait for Builder { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Builder(id) } @@ -197,7 +213,9 @@ impl ResourceSync for Builder { } } -impl ResourceSync for ServerTemplate { +impl ExecuteResourceSync for Builder {} + +impl ResourceSyncTrait for ServerTemplate { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::ServerTemplate(id) } @@ -211,11 +229,72 @@ impl ResourceSync for ServerTemplate { } } -impl ResourceSync for entities::sync::ResourceSync { +impl ExecuteResourceSync for ServerTemplate {} + +impl ResourceSyncTrait for ResourceSync { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::ResourceSync(id) } + fn include_resource( + config: &Self::Config, + resource_tags: &[String], + id_to_tags: &HashMap, + match_tags: &[String], + ) -> bool { + if !include_resource_by_tags( + resource_tags, + id_to_tags, + match_tags, + ) { + return false; + } + // don't include fresh sync + let contents_empty = config.file_contents.is_empty(); + if contents_empty + && !config.files_on_host + && config.repo.is_empty() + { + return false; + } + // The file contents MUST be empty + contents_empty && + // The sync must be files on host mode OR NOT managed + (config.files_on_host || !config.managed) + } + + fn include_resource_partial( + config: &Self::PartialConfig, + resource_tags: &[String], + id_to_tags: &HashMap, + match_tags: &[String], + ) -> bool { + if !include_resource_by_tags( + resource_tags, + id_to_tags, + match_tags, + ) { + return false; + } + // don't include fresh sync + let contents_empty = config + .file_contents + .as_ref() + .map(String::is_empty) + .unwrap_or(true); + let files_on_host = config.files_on_host.unwrap_or_default(); + if contents_empty + && !files_on_host + && config.repo.as_ref().map(String::is_empty).unwrap_or(true) + { + return false; + } + // The file contents MUST be empty + contents_empty && + // The sync must be files on host mode OR NOT managed + (files_on_host || !config.managed.unwrap_or_default()) + } + fn get_diff( original: Self::Config, update: Self::PartialConfig, @@ -225,7 +304,9 @@ impl ResourceSync for entities::sync::ResourceSync { } } -impl ResourceSync for Procedure { +impl ExecuteResourceSync for ResourceSync {} + +impl ResourceSyncTrait for Procedure { fn resource_target(id: String) -> ResourceTarget { ResourceTarget::Procedure(id) } @@ -546,8 +627,10 @@ impl ResourceSync for Procedure { } Ok(original.partial_diff(update)) } +} - async fn run_updates( +impl ExecuteResourceSync for Procedure { + async fn execute_sync_updates( mut to_create: ToCreate, mut to_update: ToUpdate, to_delete: ToDelete, diff --git a/bin/core/src/sync/toml.rs b/bin/core/src/sync/toml.rs new file mode 100644 index 000000000..3a832d12c --- /dev/null +++ b/bin/core/src/sync/toml.rs @@ -0,0 +1,775 @@ +use std::collections::HashMap; + +use anyhow::Context; +use komodo_client::{ + api::execute::Execution, + entities::{ + alerter::Alerter, + build::Build, + builder::{Builder, BuilderConfig, PartialBuilderConfig}, + deployment::{Deployment, DeploymentImage}, + procedure::Procedure, + repo::Repo, + resource::Resource, + server::Server, + server_template::{PartialServerTemplateConfig, ServerTemplate}, + stack::Stack, + sync::ResourceSync, + tag::Tag, + toml::ResourceToml, + }, +}; +use ordered_hash_map::OrderedHashMap; +use partial_derive2::{MaybeNone, PartialDiff}; + +use crate::resource::KomodoResource; + +use super::AllResourcesById; + +pub const TOML_PRETTY_OPTIONS: toml_pretty::Options = + toml_pretty::Options { + tab: " ", + skip_empty_string: true, + max_inline_array_length: 30, + inline_array: false, + }; + +pub trait ToToml: KomodoResource { + /// Replace linked ids (server_id, build_id, etc) with the resource name. + fn replace_ids( + _resource: &mut Resource, + _all: &AllResourcesById, + ) { + } + + fn edit_config_object( + _resource: &ResourceToml, + config: OrderedHashMap, + ) -> anyhow::Result> { + Ok(config) + } + + fn push_additional( + _resource: ResourceToml, + _toml: &mut String, + ) { + } + + fn push_to_toml_string( + mut resource: ResourceToml, + toml: &mut String, + ) -> anyhow::Result<()> { + resource.config = + Self::Config::default().minimize_partial(resource.config); + + let mut resource_map: OrderedHashMap = + serde_json::from_str(&serde_json::to_string(&resource)?)?; + resource_map.remove("config"); + + let config = serde_json::from_str(&serde_json::to_string( + &resource.config, + )?)?; + + let config = Self::edit_config_object(&resource, config)?; + + toml.push_str( + &toml_pretty::to_string(&resource_map, TOML_PRETTY_OPTIONS) + .context("failed to serialize resource to toml")?, + ); + + toml.push_str(&format!( + "\n[{}.config]\n", + Self::resource_type().toml_header() + )); + + toml.push_str( + &toml_pretty::to_string(&config, TOML_PRETTY_OPTIONS) + .context("failed to serialize resource config to toml")?, + ); + + Self::push_additional(resource, toml); + + Ok(()) + } +} + +pub fn resource_toml_to_toml_string( + resource: ResourceToml, +) -> anyhow::Result { + let mut toml = String::new(); + toml + .push_str(&format!("[[{}]]\n", R::resource_type().toml_header())); + R::push_to_toml_string(resource, &mut toml)?; + Ok(toml) +} + +pub fn resource_push_to_toml( + mut resource: Resource, + toml: &mut String, + all: &AllResourcesById, + all_tags: &HashMap, +) -> anyhow::Result<()> { + R::replace_ids(&mut resource, all); + if !toml.is_empty() { + toml.push_str("\n\n##\n\n"); + } + toml + .push_str(&format!("[[{}]]\n", R::resource_type().toml_header())); + R::push_to_toml_string( + convert_resource::(resource, all_tags), + toml, + )?; + Ok(()) +} + +pub fn resource_to_toml( + resource: Resource, + all: &AllResourcesById, + all_tags: &HashMap, +) -> anyhow::Result { + let mut toml = String::new(); + resource_push_to_toml::(resource, &mut toml, all, all_tags)?; + Ok(toml) +} + +pub fn convert_resource( + resource: Resource, + all_tags: &HashMap, +) -> ResourceToml { + ResourceToml { + name: resource.name, + tags: resource + .tags + .iter() + .filter_map(|t| all_tags.get(t).map(|t| t.name.clone())) + .collect(), + description: resource.description, + deploy: false, + after: Default::default(), + latest_hash: false, + // The config still needs to be minimized. + // This happens in ToToml::push_to_toml + config: resource.config.into(), + } +} + +// These have no linked resource ids to replace +impl ToToml for Alerter {} +impl ToToml for Server {} +impl ToToml for ResourceSync {} + +impl ToToml for Stack { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + resource.config.server_id.clone_from( + all + .servers + .get(&resource.config.server_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ); + } + + fn edit_config_object( + _resource: &ResourceToml, + config: OrderedHashMap, + ) -> anyhow::Result> { + config + .into_iter() + .map(|(key, value)| { + match key.as_str() { + "server_id" => return Ok((String::from("server"), value)), + _ => {} + } + Ok((key, value)) + }) + .collect() + } +} + +impl ToToml for Deployment { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + resource.config.server_id.clone_from( + all + .servers + .get(&resource.config.server_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ); + if let DeploymentImage::Build { build_id, .. } = + &mut resource.config.image + { + build_id.clone_from( + all + .builds + .get(build_id) + .map(|b| &b.name) + .unwrap_or(&String::new()), + ); + } + } + + fn edit_config_object( + resource: &ResourceToml, + config: OrderedHashMap, + ) -> anyhow::Result> { + config + .into_iter() + .map(|(key, mut value)| { + match key.as_str() { + "server_id" => return Ok((String::from("server"), value)), + "image" => { + if let Some(DeploymentImage::Build { version, .. }) = + &resource.config.image + { + let image = value + .get_mut("params") + .context("deployment image has no params")? + .as_object_mut() + .context("deployment image params is not object")?; + if let Some(build) = image.remove("build_id") { + image.insert(String::from("build"), build); + } + if version.is_none() { + image.remove("version"); + } else { + image.insert( + "version".to_string(), + serde_json::Value::String(version.to_string()), + ); + } + } + } + _ => {} + } + Ok((key, value)) + }) + .collect() + } +} + +impl ToToml for Build { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + resource.config.builder_id.clone_from( + all + .builders + .get(&resource.config.builder_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ); + } + + fn edit_config_object( + resource: &ResourceToml, + config: OrderedHashMap, + ) -> anyhow::Result> { + config + .into_iter() + .map(|(key, value)| match key.as_str() { + "builder_id" => return Ok((String::from("builder"), value)), + "version" => { + match ( + &resource.config.version, + resource.config.auto_increment_version, + ) { + (None, _) => Ok((key, value)), + (_, Some(true)) | (_, None) => { + // The toml shouldn't have a version attached if auto incrementing. + // Empty string will be filtered out in final toml. + Ok((key, serde_json::Value::String(String::new()))) + } + (Some(version), _) => Ok(( + key, + serde_json::Value::String(version.to_string()), + )), + } + } + _ => Ok((key, value)), + }) + .collect() + } +} + +impl ToToml for Repo { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + resource.config.server_id.clone_from( + all + .servers + .get(&resource.config.server_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ); + resource.config.builder_id.clone_from( + all + .builders + .get(&resource.config.builder_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ); + } + + fn edit_config_object( + _resource: &ResourceToml, + config: OrderedHashMap, + ) -> anyhow::Result> { + config + .into_iter() + .map(|(key, value)| { + match key.as_str() { + "server_id" => return Ok((String::from("server"), value)), + "builder_id" => { + return Ok((String::from("builder"), value)) + } + _ => {} + } + Ok((key, value)) + }) + .collect() + } +} + +impl ToToml for ServerTemplate { + fn push_additional( + resource: ResourceToml, + toml: &mut String, + ) { + let empty_params = match resource.config { + PartialServerTemplateConfig::Aws(config) => config.is_none(), + PartialServerTemplateConfig::Hetzner(config) => { + config.is_none() + } + }; + if empty_params { + // toml_pretty will remove empty map + // but in this case its needed to deserialize the enums. + toml.push_str("\nconfig.params = {}"); + } + } +} + +impl ToToml for Builder { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + if let BuilderConfig::Server(config) = &mut resource.config { + config.server_id.clone_from( + all + .servers + .get(&config.server_id) + .map(|s| &s.name) + .unwrap_or(&String::new()), + ) + } + } + + fn push_additional( + resource: ResourceToml, + toml: &mut String, + ) { + let empty_params = match resource.config { + PartialBuilderConfig::Aws(config) => config.is_none(), + PartialBuilderConfig::Server(config) => config.is_none(), + }; + if empty_params { + // toml_pretty will remove empty map + // but in this case its needed to deserialize the enums. + toml.push_str("\nconfig.params = {}"); + } + } +} + +impl ToToml for Procedure { + fn replace_ids( + resource: &mut Resource, + all: &AllResourcesById, + ) { + for stage in &mut resource.config.stages { + for execution in &mut stage.executions { + match &mut execution.execution { + Execution::RunProcedure(exec) => exec.procedure.clone_from( + all + .procedures + .get(&exec.procedure) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::RunBuild(exec) => exec.build.clone_from( + all + .builds + .get(&exec.build) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::CancelBuild(exec) => exec.build.clone_from( + all + .builds + .get(&exec.build) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::Deploy(exec) => exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::StartDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::RestartDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::PauseDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::UnpauseDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::StopDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::DestroyDeployment(exec) => { + exec.deployment.clone_from( + all + .deployments + .get(&exec.deployment) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::CloneRepo(exec) => exec.repo.clone_from( + all + .repos + .get(&exec.repo) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PullRepo(exec) => exec.repo.clone_from( + all + .repos + .get(&exec.repo) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::BuildRepo(exec) => exec.repo.clone_from( + all + .repos + .get(&exec.repo) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::CancelRepoBuild(exec) => exec.repo.clone_from( + all + .repos + .get(&exec.repo) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::StartContainer(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::RestartContainer(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::PauseContainer(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::UnpauseContainer(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::StopContainer(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DestroyContainer(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::StartAllContainers(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::RestartAllContainers(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::PauseAllContainers(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::UnpauseAllContainers(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::StopAllContainers(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::PruneContainers(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DeleteNetwork(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PruneNetworks(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DeleteImage(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PruneImages(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DeleteVolume(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PruneVolumes(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PruneDockerBuilders(exec) => { + exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ) + } + Execution::PruneBuildx(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PruneSystem(exec) => exec.server.clone_from( + all + .servers + .get(&exec.server) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::RunSync(exec) => exec.sync.clone_from( + all + .syncs + .get(&exec.sync) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DeployStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::StartStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::RestartStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::PauseStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::UnpauseStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::StopStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::DestroyStack(exec) => exec.stack.clone_from( + all + .stacks + .get(&exec.stack) + .map(|r| &r.name) + .unwrap_or(&String::new()), + ), + Execution::Sleep(_) | Execution::None(_) => {} + } + } + } + } + + fn push_to_toml_string( + mut resource: ResourceToml, + toml: &mut String, + ) -> anyhow::Result<()> { + resource.config = + Self::Config::default().minimize_partial(resource.config); + + let mut parsed: OrderedHashMap = + serde_json::from_str(&serde_json::to_string(&resource)?)?; + + let config = parsed + .get_mut("config") + .context("procedure has no config?")? + .as_object_mut() + .context("config is not object?")?; + + let stages = config.remove("stages"); + + toml.push_str( + &toml_pretty::to_string(&parsed, TOML_PRETTY_OPTIONS) + .context("failed to serialize procedures to toml")?, + ); + + if let Some(stages) = stages { + let stages = + stages.as_array().context("stages is not array")?; + for stage in stages { + toml.push_str("\n\n[[procedure.config.stage]]\n"); + toml.push_str( + &toml_pretty::to_string(stage, TOML_PRETTY_OPTIONS) + .context("failed to serialize procedures to toml")?, + ); + } + } + + Ok(()) + } +} diff --git a/bin/core/src/helpers/sync/user_groups.rs b/bin/core/src/sync/user_groups.rs similarity index 80% rename from bin/core/src/helpers/sync/user_groups.rs rename to bin/core/src/sync/user_groups.rs index 5f2f34b86..58c1edca4 100644 --- a/bin/core/src/helpers/sync/user_groups.rs +++ b/bin/core/src/sync/user_groups.rs @@ -12,7 +12,7 @@ use komodo_client::{ }, entities::{ permission::{PermissionLevel, UserTarget}, - sync::SyncUpdate, + sync::DiffData, toml::{PermissionToml, UserGroupToml}, update::Log, user::sync_user, @@ -25,7 +25,7 @@ use resolver_api::Resolve; use crate::state::{db_client, State}; -use super::resource::AllResourcesById; +use super::{toml::TOML_PRETTY_OPTIONS, AllResourcesById}; pub struct UpdateItem { user_group: UserGroupToml, @@ -42,31 +42,31 @@ pub async fn get_updates_for_view( user_groups: Vec, delete: bool, all_resources: &AllResourcesById, -) -> anyhow::Result> { - let map = find_collect(&db_client().await.user_groups, None, None) +) -> anyhow::Result> { + let map = find_collect(&db_client().user_groups, None, None) .await .context("failed to query db for UserGroups")? .into_iter() .map(|ug| (ug.name.clone(), ug)) .collect::>(); - let mut update = SyncUpdate { - log: String::from("User Group Updates"), - ..Default::default() - }; - - let mut to_delete = Vec::::new(); + let mut diffs = Vec::::new(); if delete { for user_group in map.values() { if !user_groups.iter().any(|ug| ug.name == user_group.name) { - update.to_delete += 1; - to_delete.push(user_group.name.clone()); + diffs.push(DiffData::Delete { + current: format!( + "[[user_group]]\n{}", + toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS) + .context("failed to serialize user group to toml")? + ), + }); } } } - let id_to_user = find_collect(&db_client().await.users, None, None) + let id_to_user = find_collect(&db_client().users, None, None) .await .context("failed to query db for Users")? .into_iter() @@ -93,36 +93,20 @@ pub async fn get_updates_for_view( let original = match map.get(&user_group.name).cloned() { Some(original) => original, None => { - update.to_create += 1; - if user_group.all.is_empty() { - update.log.push_str(&format!( - "\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}", - colored("CREATE", Color::Green), - colored(&user_group.name, Color::Green), - muted("users"), - user_group.users, - muted("permissions"), - user_group.permissions, - )); - } else { - update.log.push_str(&format!( - "\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}\n{}: {:#?}", - colored("CREATE", Color::Green), - colored(&user_group.name, Color::Green), - muted("users"), - user_group.users, - muted("base permissions"), - user_group.all, - muted("permissions"), - user_group.permissions, - )); - } + diffs.push(DiffData::Create { + proposed: format!( + "[[user_group]]\n{}", + toml_pretty::to_string(&user_group, TOML_PRETTY_OPTIONS) + .context("failed to serialize user group to toml")? + ), + }); continue; } }; let mut original_users = original .users + .clone() .into_iter() .filter_map(|user_id| { id_to_user.get(&user_id).map(|u| u.username.clone()) @@ -132,7 +116,7 @@ pub async fn get_updates_for_view( let mut original_permissions = State .resolve( ListUserTargetPermissions { - user_target: UserTarget::UserGroup(original.id), + user_target: UserTarget::UserGroup(original.id.clone()), }, sync_user().to_owned(), ) @@ -237,146 +221,22 @@ pub async fn get_updates_for_view( // only add log after diff detected if update_users || update_all || update_permissions { - update.to_update += 1; - update.log.push_str(&format!( - "\n\n{}: user group: '{}'\n-------------------", - colored("UPDATE", Color::Blue), - bold(&user_group.name), - )); - - let mut lines = Vec::::new(); - - if update_users { - let adding = user_group - .users - .iter() - .filter(|user| !original_users.contains(user)) - .map(|user| user.as_str()) - .collect::>(); - let adding = if adding.is_empty() { - String::from("None") - } else { - colored(adding.join(", "), Color::Green) - }; - let removing = original_users - .iter() - .filter(|user| !user_group.users.contains(user)) - .map(|user| user.as_str()) - .collect::>(); - let removing = if removing.is_empty() { - String::from("None") - } else { - colored(removing.join(", "), Color::Red) - }; - lines.push(format!( - "{}: 'users'\n{}: {removing}\n{}: {adding}", - muted("field"), - muted("removing"), - muted("adding"), - )) - } - - if update_all { - let updates = all_diff - .into_iter() - .map(|(variant, (orig, incoming))| { - format!( - "{}: {} {} {}", - bold(variant), - colored(orig, Color::Red), - muted("->"), - colored(incoming, Color::Green) - ) - }) - .collect::>() - .join("\n"); - lines.push(format!( - "{}: 'base permission'\n{updates}", - muted("field"), - )) - } - - if update_permissions { - let adding = user_group - .permissions - .iter() - .filter(|permission| { - // add if original has no exising permission on the target - !original_permissions - .iter() - .any(|p| p.target == permission.target) - }) - .map(|permission| format!("{permission:?}")) - .collect::>(); - let adding = if adding.is_empty() { - String::from("None") - } else { - colored(adding.join(", "), Color::Green) - }; - let updating = user_group - .permissions - .iter() - .filter(|permission| { - // update if original has exising permission on the target with different level - let Some(level) = original_permissions - .iter() - .find(|p| p.target == permission.target) - .map(|p| p.level) - else { - return false; - }; - permission.level != level - }) - .map(|permission| format!("{permission:?}")) - .collect::>(); - let updating = if updating.is_empty() { - String::from("None") - } else { - colored(updating.join(", "), Color::Blue) - }; - let removing = original_permissions - .iter() - .filter(|permission| { - // remove if incoming has no permission on the target - !user_group - .permissions - .iter() - .any(|p| p.target == permission.target) - }) - .map(|permission| format!("{permission:?}")) - .collect::>(); - let removing = if removing.is_empty() { - String::from("None") - } else { - colored(removing.join(", "), Color::Red) - }; - lines.push(format!( - "{}: 'permissions'\n{}: {removing}\n{}: {updating}\n{}: {adding}", - muted("field"), - muted("removing"), - muted("updating"), - muted("adding"), - )) - } - - update.log.push('\n'); - update.log.push_str(&lines.join("\n-------------------\n")); + diffs.push(DiffData::Update { + proposed: format!( + "[[user_group]]\n{}", + toml_pretty::to_string(&user_group, TOML_PRETTY_OPTIONS) + .context("failed to serialize user group to toml")? + ), + current: format!( + "[[user_group]]\n{}", + toml_pretty::to_string(&original, TOML_PRETTY_OPTIONS) + .context("failed to serialize user group to toml")? + ), + }); } } - for name in &to_delete { - update.log.push_str(&format!( - "\n\n{}: user group: '{}'\n-------------------", - colored("DELETE", Color::Red), - bold(name), - )); - } - - let any_change = update.to_create > 0 - || update.to_update > 0 - || update.to_delete > 0; - - Ok(any_change.then_some(update)) + Ok(diffs) } pub async fn get_updates_for_execution( @@ -388,7 +248,7 @@ pub async fn get_updates_for_execution( Vec, Vec, )> { - let map = find_collect(&db_client().await.user_groups, None, None) + let map = find_collect(&db_client().user_groups, None, None) .await .context("failed to query db for UserGroups")? .into_iter() @@ -414,7 +274,7 @@ pub async fn get_updates_for_execution( return Ok((to_create, to_update, to_delete)); } - let id_to_user = find_collect(&db_client().await.users, None, None) + let id_to_user = find_collect(&db_client().users, None, None) .await .context("failed to query db for Users")? .into_iter() diff --git a/bin/core/src/helpers/sync/variables.rs b/bin/core/src/sync/variables.rs similarity index 58% rename from bin/core/src/helpers/sync/variables.rs rename to bin/core/src/sync/variables.rs index e2b3d0a96..37382b68b 100644 --- a/bin/core/src/helpers/sync/variables.rs +++ b/bin/core/src/sync/variables.rs @@ -4,11 +4,11 @@ use anyhow::Context; use formatting::{bold, colored, muted, Color}; use komodo_client::{ api::write::{ - CreateVariable, DeleteVariable, UpdateVariableDescription, UpdateVariableIsSecret, UpdateVariableValue + CreateVariable, DeleteVariable, UpdateVariableDescription, + UpdateVariableIsSecret, UpdateVariableValue, }, entities::{ - sync::SyncUpdate, update::Log, user::sync_user, - variable::Variable, + sync::DiffData, update::Log, user::sync_user, variable::Variable, }, }; use mungos::find::find_collect; @@ -16,6 +16,8 @@ use resolver_api::Resolve; use crate::state::{db_client, State}; +use super::toml::TOML_PRETTY_OPTIONS; + pub struct ToUpdateItem { pub variable: Variable, pub update_value: bool, @@ -24,28 +26,28 @@ pub struct ToUpdateItem { } pub async fn get_updates_for_view( - variables: Vec, + variables: &[Variable], delete: bool, -) -> anyhow::Result> { - let map = find_collect(&db_client().await.variables, None, None) +) -> anyhow::Result> { + let map = find_collect(&db_client().variables, None, None) .await .context("failed to query db for variables")? .into_iter() .map(|v| (v.name.clone(), v)) .collect::>(); - let mut update = SyncUpdate { - log: String::from("Variable Updates"), - ..Default::default() - }; - - let mut to_delete = Vec::::new(); + let mut diffs = Vec::::new(); if delete { for variable in map.values() { if !variables.iter().any(|v| v.name == variable.name) { - update.to_delete += 1; - to_delete.push(variable.name.clone()); + diffs.push(DiffData::Delete { + current: format!( + "[[variable]]\n{}", + toml_pretty::to_string(&variable, TOML_PRETTY_OPTIONS) + .context("failed to serialize variable to toml")? + ), + }); } } } @@ -53,141 +55,44 @@ pub async fn get_updates_for_view( for variable in variables { match map.get(&variable.name) { Some(original) => { - let item = ToUpdateItem { - update_value: original.value != variable.value, - update_description: original.description - != variable.description, - update_is_secret: original.is_secret != variable.is_secret, - variable, - }; - if !item.update_value && !item.update_description { + if original.value == variable.value + && original.description == variable.description + { continue; } - update.to_update += 1; - update.log.push_str(&format!( - "\n\n{}: variable: '{}'\n-------------------", - colored("UPDATE", Color::Blue), - bold(&item.variable.name), - )); - - let mut lines = Vec::::new(); - - if item.update_value { - let mut log = format!("{}: 'value'\n", muted("field"),); - if item.variable.is_secret { - log.push_str(&format!( - "{}: {}\n{}: {}", - muted("from"), - colored( - original.value.replace(|_| true, "#"), - Color::Red - ), - muted("to"), - colored( - item.variable.value.replace(|_| true, "#"), - Color::Green - ) - )); - } else { - log.push_str(&format!( - "{}: {}\n{}: {}", - muted("from"), - colored(&original.value, Color::Red), - muted("to"), - colored(&item.variable.value, Color::Green) - )); - } - lines.push(log); - } - - if item.update_description { - lines.push(format!( - "{}: 'description'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(&original.description, Color::Red), - muted("to"), - colored(&item.variable.description, Color::Green) - )) - } - - if item.update_is_secret { - lines.push(format!( - "{}: 'is_secret'\n{}: {}\n{}: {}", - muted("field"), - muted("from"), - colored(original.is_secret, Color::Red), - muted("to"), - colored(item.variable.is_secret, Color::Green) - )) - } - - update.log.push('\n'); - update.log.push_str(&lines.join("\n-------------------\n")); + diffs.push(DiffData::Update { + proposed: format!( + "[[variable]]\n{}", + toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS) + .context("failed to serialize variable to toml")? + ), + current: format!( + "[[variable]]\n{}", + toml_pretty::to_string(original, TOML_PRETTY_OPTIONS) + .context("failed to serialize variable to toml")? + ), + }); } None => { - update.to_create += 1; - if variable.description.is_empty() { - update.log.push_str(&format!( - "\n\n{}: variable: {}", - colored("CREATE", Color::Green), - colored(&variable.name, Color::Green), - )); - if variable.is_secret { - update - .log - .push_str(&format!("\n{}: true", muted("is secret"),)); - } else { - update.log.push_str(&format!( - "\n{}: {}", - muted("value"), - variable.value, - )); - } - } else { - update.log.push_str(&format!( - "\n\n{}: variable: {}\n{}: {}", - colored("CREATE", Color::Green), - colored(&variable.name, Color::Green), - muted("description"), - variable.description, - )); - if variable.is_secret { - update - .log - .push_str(&format!("\n{}: true", muted("is secret"),)); - } else { - update.log.push_str(&format!( - "\n{}: {}", - muted("value"), - variable.value, - )); - } - } + diffs.push(DiffData::Create { + proposed: format!( + "[[variable]]\n{}", + toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS) + .context("failed to serialize variable to toml")? + ), + }); } } } - for name in &to_delete { - update.log.push_str(&format!( - "\n\n{}: variable: '{}'\n-------------------", - colored("DELETE", Color::Red), - bold(name), - )); - } - - let any_change = update.to_create > 0 - || update.to_update > 0 - || update.to_delete > 0; - - Ok(any_change.then_some(update)) + Ok(diffs) } pub async fn get_updates_for_execution( variables: Vec, delete: bool, ) -> anyhow::Result<(Vec, Vec, Vec)> { - let map = find_collect(&db_client().await.variables, None, None) + let map = find_collect(&db_client().variables, None, None) .await .context("failed to query db for variables")? .into_iter() diff --git a/bin/core/src/sync/view.rs b/bin/core/src/sync/view.rs new file mode 100644 index 000000000..7d3dfc498 --- /dev/null +++ b/bin/core/src/sync/view.rs @@ -0,0 +1,133 @@ +use std::collections::HashMap; + +use anyhow::Context; +use komodo_client::entities::{ + sync::{DiffData, ResourceDiff}, + tag::Tag, + toml::ResourceToml, +}; +use mungos::find::find_collect; +use partial_derive2::MaybeNone; + +use super::{AllResourcesById, ResourceSyncTrait}; + +pub async fn push_updates_for_view( + resources: Vec>, + delete: bool, + all_resources: &AllResourcesById, + all_tags: &HashMap, + match_tags: &[String], + diffs: &mut Vec, +) -> anyhow::Result<()> { + let current_map = find_collect(Resource::coll().await, None, None) + .await + .context("failed to get resources from db")? + .into_iter() + .filter(|r| { + Resource::include_resource( + &r.config, &r.tags, all_tags, match_tags, + ) + }) + .map(|r| (r.name.clone(), r)) + .collect::>(); + + if delete { + for current_resource in current_map.values() { + if !resources.iter().any(|r| r.name == current_resource.name) { + diffs.push(ResourceDiff { + target: Resource::resource_target( + current_resource.id.clone(), + ), + data: DiffData::Delete { + current: super::toml::resource_to_toml::( + current_resource.clone(), + all_resources, + all_tags, + )?, + }, + }); + } + } + } + + for mut proposed_resource in resources { + // only resource that might not be included is resource sync + if !Resource::include_resource_partial( + &proposed_resource.config, + &proposed_resource.tags, + all_tags, + match_tags, + ) { + continue; + } + match current_map.get(&proposed_resource.name) { + Some(current_resource) => { + // First merge toml resource config (partial) onto default resource config. + // Makes sure things that aren't defined in toml (come through as None) actually get removed. + let propsed_config: Resource::Config = + proposed_resource.config.into(); + proposed_resource.config = propsed_config.into(); + + Resource::validate_partial_config( + &mut proposed_resource.config, + ); + + let proposed = super::toml::resource_toml_to_toml_string::< + Resource, + >(proposed_resource.clone())?; + + let mut diff = Resource::get_diff( + current_resource.config.clone(), + proposed_resource.config, + all_resources, + )?; + + Resource::validate_diff(&mut diff); + + let current_tags = current_resource + .tags + .iter() + .filter_map(|id| all_tags.get(id).map(|t| t.name.clone())) + .collect::>(); + + // Only proceed if there are any fields to update, + // or a change to tags / description + if diff.is_none() + && proposed_resource.description + == current_resource.description + && proposed_resource.tags == current_tags + { + continue; + } + + diffs.push(ResourceDiff { + target: Resource::resource_target( + current_resource.id.clone(), + ), + data: DiffData::Update { + proposed, + current: super::toml::resource_to_toml::( + current_resource.clone(), + all_resources, + all_tags, + )?, + }, + }); + } + None => { + diffs.push(ResourceDiff { + // resources to Create don't have ids yet. + target: Resource::resource_target(String::new()), + + data: DiffData::Create { + proposed: super::toml::resource_toml_to_toml_string::< + Resource, + >(proposed_resource)?, + }, + }); + } + } + } + + Ok(()) +} diff --git a/bin/core/src/ws.rs b/bin/core/src/ws.rs index d4b673e03..667cb430a 100644 --- a/bin/core/src/ws.rs +++ b/bin/core/src/ws.rs @@ -51,7 +51,7 @@ async fn ws_handler(ws: WebSocketUpgrade) -> impl IntoResponse { let cancel_clone = cancel.clone(); tokio::spawn(async move { - let db_client = db_client().await; + let db_client = db_client(); loop { // poll for updates off the receiver / await cancel. let update = select! { diff --git a/bin/migrator/Cargo.toml b/bin/migrator/Cargo.toml index 4e88f3462..6ed02387a 100644 --- a/bin/migrator/Cargo.toml +++ b/bin/migrator/Cargo.toml @@ -10,10 +10,10 @@ repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -komodo_client.workspace = true +# komodo_client.workspace = true logger.workspace = true # -mungos.workspace = true +# mungos.workspace = true # tokio.workspace = true anyhow.workspace = true diff --git a/bin/migrator/src/legacy/v1_11/build.rs b/bin/migrator/src/legacy/v1_11/build.rs index 1b8c6bf8e..236bc841f 100644 --- a/bin/migrator/src/legacy/v1_11/build.rs +++ b/bin/migrator/src/legacy/v1_11/build.rs @@ -240,7 +240,7 @@ impl From ) } ImageRegistry::AwsEcr(label) => { - komodo_client::entities::build::ImageRegistry::AwsEcr(label) + komodo_client::entities::build::ImageRegistry::None(NoData {}) } } } diff --git a/bin/migrator/src/legacy/v1_11/mod.rs b/bin/migrator/src/legacy/v1_11/mod.rs index 6f676c84e..26d2e476f 100644 --- a/bin/migrator/src/legacy/v1_11/mod.rs +++ b/bin/migrator/src/legacy/v1_11/mod.rs @@ -1,48 +1,48 @@ -use mungos::{init::MongoBuilder, mongodb::Collection}; -use serde::{Deserialize, Serialize}; +// use mungos::{init::MongoBuilder, mongodb::Collection}; +// use serde::{Deserialize, Serialize}; -pub mod build; -pub mod deployment; -pub mod resource; +// pub mod build; +// pub mod deployment; +// pub mod resource; -pub struct DbClient { - pub builds: Collection, - pub deployments: Collection, -} +// pub struct DbClient { +// pub builds: Collection, +// pub deployments: Collection, +// } -impl DbClient { - pub async fn new( - legacy_uri: &str, - legacy_db_name: &str, - ) -> DbClient { - let client = MongoBuilder::default() - .uri(legacy_uri) - .build() - .await - .expect("failed to init legacy mongo client"); - let db = client.database(legacy_db_name); - DbClient { - builds: db.collection("Build"), - deployments: db.collection("Deployment"), - } - } -} +// impl DbClient { +// pub async fn new( +// legacy_uri: &str, +// legacy_db_name: &str, +// ) -> DbClient { +// let client = MongoBuilder::default() +// .uri(legacy_uri) +// .build() +// .await +// .expect("failed to init legacy mongo client"); +// let db = client.database(legacy_db_name); +// DbClient { +// builds: db.collection("Build"), +// deployments: db.collection("Deployment"), +// } +// } +// } -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, -)] -pub struct Version { - pub major: i32, - pub minor: i32, - pub patch: i32, -} +// #[derive( +// Serialize, Deserialize, Debug, Clone, Default, PartialEq, +// )] +// pub struct Version { +// pub major: i32, +// pub minor: i32, +// pub patch: i32, +// } -#[derive( - Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, -)] -pub struct SystemCommand { - #[serde(default)] - pub path: String, - #[serde(default)] - pub command: String, -} +// #[derive( +// Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, +// )] +// pub struct SystemCommand { +// #[serde(default)] +// pub path: String, +// #[serde(default)] +// pub command: String, +// } diff --git a/bin/migrator/src/main.rs b/bin/migrator/src/main.rs index 2b0431c8f..16165373f 100644 --- a/bin/migrator/src/main.rs +++ b/bin/migrator/src/main.rs @@ -1,3 +1,4 @@ +#![allow(unused)] #[macro_use] extern crate tracing; @@ -30,12 +31,12 @@ async fn main() -> anyhow::Result<()> { match env.migration { Migration::V1_11 => { - let db = legacy::v1_11::DbClient::new( - &env.target_uri, - &env.target_db_name, - ) - .await; - migrate::v1_11::migrate_all_in_place(&db).await? + // let db = legacy::v1_11::DbClient::new( + // &env.target_uri, + // &env.target_db_name, + // ) + // .await; + // migrate::v1_11::migrate_all_in_place(&db).await? } } diff --git a/bin/migrator/src/migrate/v1_11.rs b/bin/migrator/src/migrate/v1_11.rs index 52549f05e..60cd15ae3 100644 --- a/bin/migrator/src/migrate/v1_11.rs +++ b/bin/migrator/src/migrate/v1_11.rs @@ -1,72 +1,70 @@ -use anyhow::Context; -use komodo_client::entities::{ - build::Build, deployment::Deployment, -}; -use mungos::{ - find::find_collect, - mongodb::bson::{doc, to_document}, -}; +// use anyhow::Context; +// use komodo_client::entities::{build::Build, deployment::Deployment}; +// use mungos::{ +// find::find_collect, +// mongodb::bson::{doc, to_document}, +// }; -use crate::legacy::v1_11; +// use crate::legacy::v1_11; -pub async fn migrate_all_in_place( - db: &v1_11::DbClient, -) -> anyhow::Result<()> { - migrate_builds_in_place(db).await?; - migrate_deployments_in_place(db).await?; - Ok(()) -} +// pub async fn migrate_all_in_place( +// db: &v1_11::DbClient, +// ) -> anyhow::Result<()> { +// migrate_builds_in_place(db).await?; +// migrate_deployments_in_place(db).await?; +// Ok(()) +// } -pub async fn migrate_builds_in_place( - db: &v1_11::DbClient, -) -> anyhow::Result<()> { - let builds = find_collect(&db.builds, None, None) - .await - .context("failed to get builds")? - .into_iter() - .map(Into::into) - .collect::>(); +// pub async fn migrate_builds_in_place( +// db: &v1_11::DbClient, +// ) -> anyhow::Result<()> { +// let builds = find_collect(&db.builds, None, None) +// .await +// .context("failed to get builds")? +// .into_iter() +// .map(Into::into) +// .collect::>(); - info!("migrating {} builds...", builds.len()); +// info!("migrating {} builds...", builds.len()); - for build in builds { - db.builds - .update_one( - doc! { "name": &build.name }, - doc! { "$set": to_document(&build)? }, - ) - .await - .context("failed to insert builds on target")?; - } +// for build in builds { +// db.builds +// .update_one( +// doc! { "name": &build.name }, +// doc! { "$set": to_document(&build)? }, +// ) +// .await +// .context("failed to insert builds on target")?; +// } - info!("builds have been migrated\n"); +// info!("builds have been migrated\n"); - Ok(()) -} +// Ok(()) +// } -pub async fn migrate_deployments_in_place( - db: &v1_11::DbClient, -) -> anyhow::Result<()> { - let deployments = find_collect(&db.deployments, None, None) - .await - .context("failed to get deployments")? - .into_iter() - .map(Into::into) - .collect::>(); +// pub async fn migrate_deployments_in_place( +// db: &v1_11::DbClient, +// ) -> anyhow::Result<()> { +// let deployments = find_collect(&db.deployments, None, None) +// .await +// .context("failed to get deployments")? +// .into_iter() +// .map(Into::into) +// .collect::>(); - info!("migrating {} deployments...", deployments.len()); +// info!("migrating {} deployments...", deployments.len()); - for deployment in deployments { - db.deployments - .update_one( - doc! { "name": &deployment.name }, - doc! { "$set": to_document(&deployment)? }, - ) - .await - .context("failed to insert deployments on target")?; - } +// for deployment in deployments { +// db.deployments +// .update_one( +// doc! { "name": &deployment.name }, +// doc! { "$set": to_document(&deployment)? }, +// ) +// .await +// .context("failed to insert deployments on target")?; +// } - info!("deployments have been migrated\n"); +// info!("deployments have been migrated\n"); - Ok(()) -} +// Ok(()) +// } diff --git a/bin/periphery/Cargo.toml b/bin/periphery/Cargo.toml index afab2fe87..123b79c78 100644 --- a/bin/periphery/Cargo.toml +++ b/bin/periphery/Cargo.toml @@ -15,8 +15,9 @@ path = "src/main.rs" [dependencies] # local -periphery_client.workspace = true komodo_client.workspace = true +periphery_client.workspace = true +environment_file.workspace = true formatting.workspace = true command.workspace = true logger.workspace = true @@ -29,6 +30,7 @@ resolver_api.workspace = true run_command.workspace = true svi.workspace = true # external +axum-server.workspace = true axum-extra.workspace = true serde_json.workspace = true futures.workspace = true diff --git a/bin/periphery/Dockerfile b/bin/periphery/Dockerfile deleted file mode 100644 index af8dee79b..000000000 --- a/bin/periphery/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -# Build Periphery -FROM rust:1.81.0-bookworm AS builder -WORKDIR /builder -COPY . . -RUN cargo build -p komodo_periphery --release - -# Final Image -FROM debian:bookworm-slim - -# Install Deps -RUN apt update && apt install -y git curl ca-certificates && \ - curl -fsSL https://get.docker.com | sh - -# Copy -COPY --from=builder /builder/target/release/periphery / - -# Hint at the port -EXPOSE 8120 - -# Label for Ghcr -LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo -LABEL org.opencontainers.image.description="Komodo Periphery" -LABEL org.opencontainers.image.licenses=GPL-3.0 - -# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose. -ENTRYPOINT [ "./periphery" ] \ No newline at end of file diff --git a/bin/periphery/alpine.Dockerfile b/bin/periphery/alpine.Dockerfile new file mode 100644 index 000000000..6123c92fd --- /dev/null +++ b/bin/periphery/alpine.Dockerfile @@ -0,0 +1,35 @@ +## This one produces smaller images, +## but alpine uses `musl` instead of `glibc`. +## This makes it take longer / more resources to build, +## and may negatively affect runtime performance. + +# Build Periphery +FROM rust:1.81.0-alpine AS builder +WORKDIR /builder +COPY . . +RUN apk update && apk --no-cache add musl-dev openssl-dev openssl-libs-static +RUN cargo build -p komodo_periphery --release + +# Final Image +FROM alpine:3.20 + +# Install Deps +RUN apk update && apk add --no-cache --virtual .build-deps \ + docker-cli docker-cli-compose openssl ca-certificates git git-lfs bash + +# Setup an application directory +WORKDIR /app + +# Copy +COPY --from=builder /builder/target/release/periphery /app + +# Hint at the port +EXPOSE 8120 + +# Label for Ghcr +LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo +LABEL org.opencontainers.image.description="Komodo Periphery" +LABEL org.opencontainers.image.licenses=GPL-3.0 + +# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose. +ENTRYPOINT [ "/app/periphery" ] \ No newline at end of file diff --git a/bin/periphery/debian-deps.sh b/bin/periphery/debian-deps.sh new file mode 100644 index 000000000..06ed908b3 --- /dev/null +++ b/bin/periphery/debian-deps.sh @@ -0,0 +1,18 @@ +#!/bin/bash +apt-get update +apt-get install -y git curl wget ca-certificates +install -m 0755 -d /etc/apt/keyrings +curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc +chmod a+r /etc/apt/keyrings/docker.asc + +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null + +apt-get update + +# apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +apt-get install -y docker-ce-cli docker-buildx-plugin docker-compose-plugin + +rm -rf /var/lib/apt/lists/* \ No newline at end of file diff --git a/bin/periphery/slim.Dockerfile b/bin/periphery/debian.Dockerfile similarity index 70% rename from bin/periphery/slim.Dockerfile rename to bin/periphery/debian.Dockerfile index 58aec3e59..a097de929 100644 --- a/bin/periphery/slim.Dockerfile +++ b/bin/periphery/debian.Dockerfile @@ -1,15 +1,15 @@ # Build Periphery -FROM rust:1.81.0-alpine AS builder +FROM rust:1.81.0-bullseye AS builder WORKDIR /builder COPY . . -RUN apk update && apk --no-cache add musl-dev openssl-dev openssl-libs-static RUN cargo build -p komodo_periphery --release # Final Image -FROM alpine:3.20 +FROM debian:bullseye-slim -# Install Deps -RUN apk update && apk add docker-cli docker-cli-compose openssl git git-lfs bash +# # Install Deps +COPY ./bin/periphery/debian-deps.sh . +RUN sh ./debian-deps.sh && rm ./debian-deps.sh # Setup an application directory WORKDIR /app @@ -18,7 +18,7 @@ WORKDIR /app COPY --from=builder /builder/target/release/periphery /app # Hint at the port -EXPOSE 8120 +EXPOSE 8120 # Label for Ghcr LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo diff --git a/bin/periphery/src/api/build.rs b/bin/periphery/src/api/build.rs index 61e25068c..77a59fa2b 100644 --- a/bin/periphery/src/api/build.rs +++ b/bin/periphery/src/api/build.rs @@ -3,11 +3,14 @@ use command::run_komodo_command; use formatting::format_serror; use komodo_client::entities::{ build::{Build, BuildConfig}, - get_image_name, optional_string, to_komodo_name, + environment_vars_from_str, get_image_name, optional_string, + to_komodo_name, update::Log, EnvironmentVar, Version, }; -use periphery_client::api::build::{self, PruneBuilders, PruneBuildx}; +use periphery_client::api::build::{ + self, PruneBuilders, PruneBuildx, +}; use resolver_api::Resolve; use crate::{ @@ -23,7 +26,6 @@ impl Resolve for State { &self, build::Build { build, - aws_ecr, registry_token, additional_tags, replacers: core_replacers, @@ -54,9 +56,9 @@ impl Resolve for State { // Maybe docker login let should_push = match docker_login( - image_registry, + &image_registry.domain, + &image_registry.account, registry_token.as_deref(), - aws_ecr.as_ref(), ) .await { @@ -83,12 +85,19 @@ impl Resolve for State { }; // Get command parts - let image_name = get_image_name(&build, |_| aws_ecr) - .context("failed to make image name")?; - let build_args = parse_build_args(build_args); + let image_name = + get_image_name(&build).context("failed to make image name")?; + let build_args = parse_build_args( + &environment_vars_from_str(build_args) + .context("Invalid build_args")?, + ); + let secret_args = environment_vars_from_str(secret_args) + .context("Invalid secret_args")?; let _secret_args = - parse_secret_args(secret_args, *skip_secret_interp)?; - let labels = parse_labels(labels); + parse_secret_args(&secret_args, *skip_secret_interp)?; + let labels = parse_labels( + &environment_vars_from_str(labels).context("Invalid labels")?, + ); let extra_args = parse_extra_args(extra_args); let buildx = if *use_buildx { " buildx" } else { "" }; let image_tags = @@ -133,7 +142,7 @@ impl Resolve for State { logs.push(build_log); } - cleanup_secret_env_vars(secret_args); + cleanup_secret_env_vars(&secret_args); Ok(logs) } @@ -236,4 +245,4 @@ impl Resolve for State { let command = String::from("docker buildx prune -a -f"); Ok(run_komodo_command("prune buildx", command).await) } -} \ No newline at end of file +} diff --git a/bin/periphery/src/api/compose.rs b/bin/periphery/src/api/compose.rs index de1ec61aa..3680e44f8 100644 --- a/bin/periphery/src/api/compose.rs +++ b/bin/periphery/src/api/compose.rs @@ -4,9 +4,7 @@ use anyhow::{anyhow, Context}; use command::run_komodo_command; use formatting::format_serror; use komodo_client::entities::{ - stack::{ComposeContents, ComposeProject}, - to_komodo_name, - update::Log, + stack::ComposeProject, to_komodo_name, update::Log, FileContents, }; use periphery_client::api::compose::*; use resolver_api::Resolve; @@ -78,6 +76,24 @@ pub struct DockerComposeLsItem { // +const DEFAULT_COMPOSE_CONTENTS: &str = "## 🦎 Hello Komodo 🦎 +services: + hello_world: + image: hello-world + # networks: + # - default + # ports: + # - 3000:3000 + # volumes: + # - data:/data + +# networks: +# default: {} + +# volumes: +# data: +"; + impl Resolve for State { async fn resolve( &self, @@ -90,10 +106,14 @@ impl Resolve for State { ) -> anyhow::Result { let root = periphery_config().stack_dir.join(to_komodo_name(&name)); - let run_directory = root.join(&run_directory); - let run_directory = run_directory.canonicalize().context( - "failed to validate run directory on host (canonicalize error)", - )?; + let run_directory = + root.join(&run_directory).components().collect::(); + + if !run_directory.exists() { + fs::create_dir_all(&run_directory) + .await + .context("Failed to initialize run directory")?; + } let file_paths = file_paths .iter() @@ -105,19 +125,24 @@ impl Resolve for State { let mut res = GetComposeContentsOnHostResponse::default(); for full_path in &file_paths { + if !full_path.exists() { + fs::write(&full_path, DEFAULT_COMPOSE_CONTENTS) + .await + .context("Failed to init missing compose file on host")?; + } match fs::read_to_string(&full_path).await.with_context(|| { format!( - "failed to read compose file contents at {full_path:?}" + "Failed to read compose file contents at {full_path:?}" ) }) { Ok(contents) => { - res.contents.push(ComposeContents { + res.contents.push(FileContents { path: full_path.display().to_string(), contents, }); } Err(e) => { - res.errors.push(ComposeContents { + res.errors.push(FileContents { path: full_path.display().to_string(), contents: format_serror(&e.into()), }); @@ -179,6 +204,46 @@ impl Resolve for State { // +impl Resolve for State { + #[instrument(name = "WriteComposeContentsToHost", skip(self))] + async fn resolve( + &self, + WriteComposeContentsToHost { + name, + run_directory, + file_path, + contents, + }: WriteComposeContentsToHost, + _: (), + ) -> anyhow::Result { + let root = + periphery_config().stack_dir.join(to_komodo_name(&name)); + let run_directory = root.join(&run_directory); + let run_directory = run_directory.canonicalize().context( + "failed to validate run directory on host (canonicalize error)", + )?; + let file_path = run_directory + .join(file_path) + .components() + .collect::(); + // Ensure parent directory exists + if let Some(parent) = file_path.parent() { + let _ = fs::create_dir_all(&parent).await; + } + fs::write(&file_path, contents).await.with_context(|| { + format!( + "Failed to write compose file contents to {file_path:?}" + ) + })?; + Ok(Log::simple( + "Write contents to host", + format!("File contents written to {file_path:?}"), + )) + } +} + +// + impl Resolve for State { #[instrument( name = "ComposeUp", diff --git a/bin/periphery/src/api/deploy.rs b/bin/periphery/src/api/deploy.rs index 808d2172e..7f7076a76 100644 --- a/bin/periphery/src/api/deploy.rs +++ b/bin/periphery/src/api/deploy.rs @@ -2,14 +2,13 @@ use anyhow::Context; use command::run_komodo_command; use formatting::format_serror; use komodo_client::entities::{ - build::{ImageRegistry, StandardRegistryConfig}, deployment::{ - extract_registry_domain, Conversion, Deployment, - DeploymentConfig, DeploymentImage, RestartMode, + conversions_from_str, extract_registry_domain, Conversion, + Deployment, DeploymentConfig, DeploymentImage, RestartMode, }, - to_komodo_name, + environment_vars_from_str, to_komodo_name, update::Log, - EnvironmentVar, NoData, + EnvironmentVar, }; use periphery_client::api::container::{Deploy, RemoveContainer}; use resolver_api::Resolve; @@ -24,7 +23,7 @@ use crate::{ impl Resolve for State { #[instrument( name = "Deploy", - skip(self, core_replacers, aws_ecr, registry_token) + skip(self, core_replacers, registry_token) )] async fn resolve( &self, @@ -34,7 +33,6 @@ impl Resolve for State { stop_time, registry_token, replacers: core_replacers, - aws_ecr, }: Deploy, _: (), ) -> anyhow::Result { @@ -55,22 +53,10 @@ impl Resolve for State { )); }; - let image_registry = if aws_ecr.is_some() { - ImageRegistry::AwsEcr(String::new()) - } else if deployment.config.image_registry_account.is_empty() { - ImageRegistry::None(NoData {}) - } else { - ImageRegistry::Standard(StandardRegistryConfig { - account: deployment.config.image_registry_account.clone(), - domain: extract_registry_domain(image)?, - ..Default::default() - }) - }; - if let Err(e) = docker_login( - &image_registry, + &extract_registry_domain(image)?, + &deployment.config.image_registry_account, registry_token.as_deref(), - aws_ecr.as_ref(), ) .await { @@ -96,7 +82,8 @@ impl Resolve for State { .await; debug!("container stopped and removed"); - let command = docker_run_command(&deployment, image); + let command = docker_run_command(&deployment, image) + .context("Unable to generate valid docker run command")?; debug!("docker run command: {command}"); if deployment.config.skip_secret_interp { @@ -148,18 +135,29 @@ fn docker_run_command( .. }: &Deployment, image: &str, -) -> String { +) -> anyhow::Result { let name = to_komodo_name(name); - let ports = parse_conversions(ports, "-p"); - let volumes = volumes.to_owned(); - let volumes = parse_conversions(&volumes, "-v"); + let ports = parse_conversions( + &conversions_from_str(ports).context("Invalid ports")?, + "-p", + ); + let volumes = parse_conversions( + &conversions_from_str(volumes).context("Invalid volumes")?, + "-v", + ); let network = parse_network(network); let restart = parse_restart(restart); - let environment = parse_environment(environment); - let labels = parse_labels(labels); + let environment = parse_environment( + &environment_vars_from_str(environment) + .context("Invalid environment")?, + ); + let labels = parse_labels( + &environment_vars_from_str(labels).context("Invalid labels")?, + ); let command = parse_command(command); let extra_args = parse_extra_args(extra_args); - format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}") + let command = format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}"); + Ok(command) } fn parse_conversions( diff --git a/bin/periphery/src/api/git.rs b/bin/periphery/src/api/git.rs index 0c10a71eb..86faf358d 100644 --- a/bin/periphery/src/api/git.rs +++ b/bin/periphery/src/api/git.rs @@ -1,9 +1,10 @@ use anyhow::{anyhow, Context}; +use git::GitRes; use komodo_client::entities::{ to_komodo_name, update::Log, CloneArgs, LatestCommit, }; use periphery_client::api::git::{ - CloneRepo, DeleteRepo, GetLatestCommit, PullRepo, + CloneRepo, DeleteRepo, GetLatestCommit, PullOrCloneRepo, PullRepo, RepoActionResponse, }; use resolver_api::Resolve; @@ -19,7 +20,7 @@ impl Resolve for State { let repo_path = periphery_config().repo_dir.join(name); if !repo_path.is_dir() { return Err(anyhow!( - "repo path is not directory. is it cloned?" + "Repo path is not directory. is it cloned?" )); } git::get_commit_hash_info(&repo_path).await @@ -27,7 +28,10 @@ impl Resolve for State { } impl Resolve for State { - #[instrument(name = "CloneRepo", skip(self, environment))] + #[instrument( + name = "CloneRepo", + skip(self, git_token, environment, replacers) + )] async fn resolve( &self, CloneRepo { @@ -43,18 +47,13 @@ impl Resolve for State { let CloneArgs { provider, account, .. } = &args; - let token = match (account, provider, git_token) { - (None, _, _) => None, - (Some(_), None, _) => { - return Err(anyhow!( - "got incoming git account but no git provider" - )) - } - (Some(_), Some(_), Some(token)) => Some(token), - (Some(account), Some(provider), None) => Some( + let token = match (account, git_token) { + (None, _) => None, + (Some(_), Some(token)) => Some(token), + (Some(account), None) => Some( crate::helpers::git_token(provider, account).map(ToString::to_string) .with_context( - || format!("failed to get git token from periphery config | provider: {provider} | account: {account}") + || format!("Failed to get git token from periphery config | provider: {provider} | account: {account}") )?, ), }; @@ -68,29 +67,36 @@ impl Resolve for State { &replacers, ) .await - .map(|(logs, commit_hash, commit_message, env_file_path)| { - RepoActionResponse { - logs, - commit_hash, - commit_message, - env_file_path, - } - }) + .map( + |GitRes { + logs, + hash, + message, + env_file_path, + }| { + RepoActionResponse { + logs, + commit_hash: hash, + commit_message: message, + env_file_path, + } + }, + ) } } // impl Resolve for State { - #[instrument(name = "PullRepo", skip(self, on_pull, environment))] + #[instrument( + name = "PullRepo", + skip(self, git_token, environment, replacers) + )] async fn resolve( &self, PullRepo { - name, - branch, - commit, - path, - on_pull, + args, + git_token, environment, env_file_path, skip_secret_interp, @@ -98,32 +104,104 @@ impl Resolve for State { }: PullRepo, _: (), ) -> anyhow::Result { - let default_path = - periphery_config().repo_dir.join(to_komodo_name(&name)); - let path = match path { - // If path is absolute, this will resolve to exactly the given absolute path. - // If path is relative, it will be relative to the default repo directory. - Some(path) => default_path.join(path), - None => default_path, + let CloneArgs { + provider, account, .. + } = &args; + let token = match (account, git_token) { + (None, _) => None, + (Some(_), Some(token)) => Some(token), + (Some(account), None) => Some( + crate::helpers::git_token(provider, account).map(ToString::to_string) + .with_context( + || format!("Failed to get git token from periphery config | provider: {provider} | account: {account}") + )?, + ), }; - let (logs, commit_hash, commit_message, env_file_path) = - git::pull( - &path, - &branch, - &commit, - &on_pull, - &environment, - &env_file_path, - (!skip_secret_interp).then_some(&periphery_config().secrets), - &replacers, - ) - .await; - Ok(RepoActionResponse { - logs, - commit_hash, - commit_message, + git::pull( + args, + &periphery_config().repo_dir, + token, + &environment, + &env_file_path, + (!skip_secret_interp).then_some(&periphery_config().secrets), + &replacers, + ) + .await + .map( + |GitRes { + logs, + hash, + message, + env_file_path, + }| { + RepoActionResponse { + logs, + commit_hash: hash, + commit_message: message, + env_file_path, + } + }, + ) + } +} + +// + +impl Resolve for State { + #[instrument( + name = "PullOrCloneRepo", + skip(self, git_token, environment, replacers) + )] + async fn resolve( + &self, + PullOrCloneRepo { + args, + git_token, + environment, env_file_path, - }) + skip_secret_interp, + replacers, + }: PullOrCloneRepo, + _: (), + ) -> anyhow::Result { + let CloneArgs { + provider, account, .. + } = &args; + let token = match (account, git_token) { + (None, _) => None, + (Some(_), Some(token)) => Some(token), + (Some(account), None) => Some( + crate::helpers::git_token(provider, account).map(ToString::to_string) + .with_context( + || format!("Failed to get git token from periphery config | provider: {provider} | account: {account}") + )?, + ), + }; + git::pull_or_clone( + args, + &periphery_config().repo_dir, + token, + &environment, + &env_file_path, + (!skip_secret_interp).then_some(&periphery_config().secrets), + &replacers, + ) + .await + .map( + |GitRes { + logs, + hash, + message, + env_file_path, + }| { + RepoActionResponse { + logs, + commit_hash: hash, + commit_message: message, + env_file_path, + } + }, + ) } } diff --git a/bin/periphery/src/api/mod.rs b/bin/periphery/src/api/mod.rs index 784c82970..fa8c9a168 100644 --- a/bin/periphery/src/api/mod.rs +++ b/bin/periphery/src/api/mod.rs @@ -75,6 +75,7 @@ pub enum PeripheryRequest { GetComposeServiceLogSearch(GetComposeServiceLogSearch), // Compose (Write) + WriteComposeContentsToHost(WriteComposeContentsToHost), ComposeUp(ComposeUp), ComposeExecution(ComposeExecution), diff --git a/bin/periphery/src/compose.rs b/bin/periphery/src/compose.rs index 8edb75b68..ea8eaa589 100644 --- a/bin/periphery/src/compose.rs +++ b/bin/periphery/src/compose.rs @@ -3,18 +3,14 @@ use std::path::PathBuf; use anyhow::{anyhow, Context}; use command::run_komodo_command; use formatting::format_serror; -use git::write_environment_file; +use git::environment; use komodo_client::entities::{ - all_logs_success, - build::{ImageRegistry, StandardRegistryConfig}, - stack::{ComposeContents, Stack}, - to_komodo_name, - update::Log, - CloneArgs, + all_logs_success, environment_vars_from_str, stack::Stack, + to_komodo_name, update::Log, CloneArgs, FileContents, }; use periphery_client::api::{ compose::ComposeUpResponse, - git::{CloneRepo, RepoActionResponse}, + git::{PullOrCloneRepo, RepoActionResponse}, }; use resolver_api::Resolve; use tokio::fs; @@ -44,16 +40,14 @@ pub async fn compose_up( // Write the stack to local disk. For repos, will first delete any existing folder to ensure fresh deploy. // Will also set additional fields on the reponse. // Use the env_file_path in the compose command. - let env_file_path = write_stack(&stack, git_token, res) - .await - .context("failed to write / clone compose file")?; + let (run_directory, env_file_path) = + write_stack(&stack, git_token, res) + .await + .context("Failed to write / clone compose file")?; - let root = periphery_config() - .stack_dir - .join(to_komodo_name(&stack.name)); - let run_directory = root.join(&stack.config.run_directory); + // Canonicalize the path to ensure it exists, and is the cleanest path to the run directory. let run_directory = run_directory.canonicalize().context( - "failed to validate run directory on host after stack write (canonicalize error)", + "Failed to validate run directory on host after stack write (canonicalize error)", )?; let file_paths = stack @@ -91,7 +85,7 @@ pub async fn compose_up( .logs .push(Log::error("read compose file", error.clone())); // This should only happen for repo stacks, ie remote error - res.remote_errors.push(ComposeContents { + res.remote_errors.push(FileContents { path: full_path.display().to_string(), contents: error, }); @@ -100,55 +94,59 @@ pub async fn compose_up( )); } }; - res.file_contents.push(ComposeContents { + res.file_contents.push(FileContents { path: full_path.display().to_string(), contents: file_contents, }); } let docker_compose = docker_compose(); - let run_dir = run_directory - .canonicalize() - .context("failed to canonicalize run directory on host")?; - let run_dir = run_dir.display(); + let run_dir = run_directory.display(); let service_arg = service .as_ref() .map(|service| format!(" {service}")) .unwrap_or_default(); + let file_args = if stack.config.file_paths.is_empty() { String::from("compose.yaml") } else { stack.config.file_paths.join(" -f ") }; + // This will be the last project name, which is the one that needs to be destroyed. + // Might be different from the current project name, if user renames stack / changes to custom project name. let last_project_name = stack.project_name(false); let project_name = stack.project_name(true); - // Login to the registry to pull private images, if account is set - if !stack.config.registry_account.is_empty() { - let registry = ImageRegistry::Standard(StandardRegistryConfig { - domain: stack.config.registry_provider.clone(), - account: stack.config.registry_account.clone(), - ..Default::default() - }); - docker_login(®istry, registry_token.as_deref(), None) - .await - .with_context(|| { - format!( - "domain: {} | account: {}", - stack.config.registry_provider, - stack.config.registry_account - ) - }) - .context("failed to login to image registry")?; + // Login to the registry to pull private images, if provider / account are set + if !stack.config.registry_provider.is_empty() + && !stack.config.registry_account.is_empty() + { + docker_login( + &stack.config.registry_provider, + &stack.config.registry_account, + registry_token.as_deref(), + ) + .await + .with_context(|| { + format!( + "domain: {} | account: {}", + stack.config.registry_provider, stack.config.registry_account + ) + }) + .context("failed to login to image registry")?; } + let env_file = env_file_path + .map(|path| format!(" --env-file {}", path.display())) + .unwrap_or_default(); + // Build images before destroying to minimize downtime. // If this fails, do not continue. if stack.config.run_build { let build_extra_args = parse_extra_args(&stack.config.build_extra_args); let command = format!( - "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} build{build_extra_args}{service_arg}", + "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} build{build_extra_args}{service_arg}", ); if stack.config.skip_secret_interp { let log = run_komodo_command("compose build", command).await; @@ -185,7 +183,7 @@ pub async fn compose_up( let log = run_komodo_command( "compose pull", format!( - "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} pull{service_arg}", + "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} pull{service_arg}", ), ) .await; @@ -207,16 +205,12 @@ pub async fn compose_up( // Run compose up let extra_args = parse_extra_args(&stack.config.extra_args); - let env_file = env_file_path - .map(|path| format!(" --env-file {}", path.display())) - .unwrap_or_default(); let command = format!( "cd {run_dir} && {docker_compose} -p {project_name} -f {file_args}{env_file} up -d{extra_args}{service_arg}", ); - if stack.config.skip_secret_interp { - let log = run_komodo_command("compose up", command).await; - res.deployed = log.success; - res.logs.push(log); + + let log = if stack.config.skip_secret_interp { + run_komodo_command("compose up", command).await } else { let (command, mut replacers) = svi::interpolate_variables( &command, @@ -232,35 +226,22 @@ pub async fn compose_up( log.stdout = svi::replace_in_string(&log.stdout, &replacers); log.stderr = svi::replace_in_string(&log.stderr, &replacers); - res.logs.push(log); - } + log + }; - // If using a repo based stack, clean up the file at this point. - // This will also let user know immediately if there will be a problem - // with any volume mounted inside repo directory. - // Just use absolute mount points or docker volumes with repo based stack anyways. - if !stack.config.files_on_host - && stack.config.file_contents.is_empty() - { - if let Err(e) = fs::remove_dir_all(&root).await.with_context(|| { - format!("failed to clean up files after deploy | path: {root:?} | Bring the stack down, and ensure all volumes are mounted outside the repo directory for the next deploy. (preferably use absolute path for mount point)") - }) { - res - .logs - .push(Log::error("clean up files", format_serror(&e.into()))) - } - } + res.deployed = log.success; + res.logs.push(log); Ok(()) } /// Either writes the stack file_contents to a file, or clones the repo. -/// Returns the env file path, to maybe include in command with --env-file. +/// Returns (run_directory, env_file_path) async fn write_stack( stack: &Stack, git_token: Option, res: &mut ComposeUpResponse, -) -> anyhow::Result> { +) -> anyhow::Result<(PathBuf, Option)> { let root = periphery_config() .stack_dir .join(to_komodo_name(&stack.name)); @@ -269,13 +250,16 @@ async fn write_stack( // Cannot use 'canonicalize' yet as directory may not exist. let run_directory = run_directory.components().collect::(); + let env_vars = environment_vars_from_str(&stack.config.environment) + .context("Invalid environment variables")?; + if stack.config.files_on_host { // ============= // FILES ON HOST // ============= // Only need to write environment file here (which does nothing if not using this feature) - let env_file_path = match write_environment_file( - &stack.config.environment, + let env_file_path = match environment::write_file( + &env_vars, &stack.config.env_file_path, stack .config @@ -291,15 +275,60 @@ async fn write_stack( return Err(anyhow!("failed to write environment file")); } }; - Ok(env_file_path) - } else if stack.config.file_contents.is_empty() { + Ok((run_directory, env_file_path)) + } else if stack.config.repo.is_empty() { + if stack.config.file_contents.trim().is_empty() { + return Err(anyhow!("Must either input compose file contents directly, or use file one host / git repo options.")); + } + // ============== + // UI BASED FILES + // ============== + // Ensure run directory exists + fs::create_dir_all(&run_directory).await.with_context(|| { + format!( + "failed to create stack run directory at {run_directory:?}" + ) + })?; + let env_file_path = match environment::write_file( + &env_vars, + &stack.config.env_file_path, + stack + .config + .skip_secret_interp + .then_some(&periphery_config().secrets), + &run_directory, + &mut res.logs, + ) + .await + { + Ok(path) => path, + Err(_) => { + return Err(anyhow!("failed to write environment file")); + } + }; + let file_path = run_directory + .join( + stack + .config + .file_paths + // only need the first one, or default + .first() + .map(String::as_str) + .unwrap_or("compose.yaml"), + ) + .components() + .collect::(); + fs::write(&file_path, &stack.config.file_contents) + .await + .with_context(|| { + format!("failed to write compose file to {file_path:?}") + })?; + + Ok((run_directory, env_file_path)) + } else { // ================ // REPO BASED FILES // ================ - if stack.config.repo.is_empty() { - // Err response will be written to return, no need to add it to log here - return Err(anyhow!("Must either input compose file contents directly or provide a repo. Got neither.")); - } let mut args: CloneArgs = stack.into(); // Set the clone destination to the one created for this run args.destination = Some(root.display().to_string()); @@ -318,7 +347,7 @@ async fn write_stack( res .logs .push(Log::error("no git token", error.clone())); - res.remote_errors.push(ComposeContents { + res.remote_errors.push(FileContents { path: Default::default(), contents: error, }); @@ -333,8 +362,6 @@ async fn write_stack( } }; - // 🚨 This has to delete the existing folder before clone. - // 🚨 If any volumes were mounted inside the repo folder, the data will be deleted. let RepoActionResponse { logs, commit_hash, @@ -342,10 +369,10 @@ async fn write_stack( env_file_path, } = match State .resolve( - CloneRepo { + PullOrCloneRepo { args, git_token, - environment: stack.config.environment.clone(), + environment: env_vars, env_file_path: stack.config.env_file_path.clone(), skip_secret_interp: stack.config.skip_secret_interp, // repo replacer only needed for on_clone / on_pull, @@ -359,15 +386,15 @@ async fn write_stack( Ok(res) => res, Err(e) => { let error = format_serror( - &e.context("failed to clone stack repo").into(), + &e.context("failed to pull stack repo").into(), ); - res.logs.push(Log::error("clone stack repo", error.clone())); - res.remote_errors.push(ComposeContents { + res.logs.push(Log::error("pull stack repo", error.clone())); + res.remote_errors.push(FileContents { path: Default::default(), contents: error, }); return Err(anyhow!( - "failed to clone stack repo, stopping run" + "failed to pull stack repo, stopping run" )); } }; @@ -377,53 +404,10 @@ async fn write_stack( res.commit_message = commit_message; if !all_logs_success(&res.logs) { - return Err(anyhow!("Stopped after clone failure")); + return Err(anyhow!("Stopped after repo pull failure")); } - Ok(env_file_path) - } else { - // ============== - // UI BASED FILES - // ============== - // Ensure run directory exists - fs::create_dir_all(&run_directory).await.with_context(|| { - format!( - "failed to create stack run directory at {run_directory:?}" - ) - })?; - let env_file_path = match write_environment_file( - &stack.config.environment, - &stack.config.env_file_path, - stack - .config - .skip_secret_interp - .then_some(&periphery_config().secrets), - &run_directory, - &mut res.logs, - ) - .await - { - Ok(path) => path, - Err(_) => { - return Err(anyhow!("failed to write environment file")); - } - }; - let file_path = run_directory.join( - stack - .config - .file_paths - // only need the first one, or default - .first() - .map(String::as_str) - .unwrap_or("compose.yaml"), - ); - fs::write(&file_path, &stack.config.file_contents) - .await - .with_context(|| { - format!("failed to write compose file to {file_path:?}") - })?; - - Ok(env_file_path) + Ok((run_directory, env_file_path)) } } diff --git a/bin/periphery/src/config.rs b/bin/periphery/src/config.rs index a18647788..83c77015f 100644 --- a/bin/periphery/src/config.rs +++ b/bin/periphery/src/config.rs @@ -1,6 +1,7 @@ use std::sync::OnceLock; use clap::Parser; +use environment_file::maybe_read_list_from_file; use komodo_client::entities::{ config::periphery::{CliArgs, Env, PeripheryConfig}, logger::{LogConfig, LogLevel}, @@ -53,7 +54,7 @@ pub fn periphery_config() -> &'static PeripheryConfig { .unwrap_or(config.logging.stdio), otlp_endpoint: env .periphery_logging_otlp_endpoint - .or(config.logging.otlp_endpoint), + .unwrap_or(config.logging.otlp_endpoint), opentelemetry_service_name: env .periphery_logging_opentelemetry_service_name .unwrap_or(config.logging.opentelemetry_service_name), @@ -61,13 +62,26 @@ pub fn periphery_config() -> &'static PeripheryConfig { allowed_ips: env .periphery_allowed_ips .unwrap_or(config.allowed_ips), - passkeys: env.periphery_passkeys.unwrap_or(config.passkeys), + passkeys: maybe_read_list_from_file( + env.periphery_passkeys_file, + env.periphery_passkeys, + ) + .unwrap_or(config.passkeys), include_disk_mounts: env .periphery_include_disk_mounts .unwrap_or(config.include_disk_mounts), exclude_disk_mounts: env .periphery_exclude_disk_mounts .unwrap_or(config.exclude_disk_mounts), + ssl_enabled: env + .periphery_ssl_enabled + .unwrap_or(config.ssl_enabled), + ssl_key_file: env + .periphery_ssl_key_file + .unwrap_or(config.ssl_key_file), + ssl_cert_file: env + .periphery_ssl_cert_file + .unwrap_or(config.ssl_cert_file), secrets: config.secrets, git_providers: config.git_providers, docker_registries: config.docker_registries, diff --git a/bin/periphery/src/docker.rs b/bin/periphery/src/docker.rs index 5707c70ba..c7663b62a 100644 --- a/bin/periphery/src/docker.rs +++ b/bin/periphery/src/docker.rs @@ -8,8 +8,6 @@ use bollard::{ }; use command::run_komodo_command; use komodo_client::entities::{ - build::{ImageRegistry, StandardRegistryConfig}, - config::core::AwsEcrConfig, docker::{ container::*, image::*, network::*, volume::*, ContainerConfig, GraphDriverData, HealthConfig, PortBinding, @@ -907,47 +905,13 @@ impl DockerClient { /// Returns whether build result should be pushed after build #[instrument(skip(registry_token))] pub async fn docker_login( - registry: &ImageRegistry, + domain: &str, + account: &str, // For local token override from core. registry_token: Option<&str>, - // For local config override from core. - aws_ecr: Option<&AwsEcrConfig>, ) -> anyhow::Result { - let (domain, account) = match registry { - // Early return for no login - ImageRegistry::None(_) => return Ok(false), - // Early return because Ecr is different - ImageRegistry::AwsEcr(label) => { - let AwsEcrConfig { region, account_id } = aws_ecr - .with_context(|| { - if label.is_empty() { - String::from("Could not find aws ecr config") - } else { - format!("Could not find aws ecr config for label {label}") - } - })?; - let registry_token = registry_token - .context("aws ecr build missing registry token from core")?; - let command = format!("docker login {account_id}.dkr.ecr.{region}.amazonaws.com -u AWS -p {registry_token}"); - let log = async_run_command(&command).await; - if log.success() { - return Ok(true); - } else { - return Err(anyhow!( - "aws ecr login error: stdout: {} | stderr: {}", - log.stdout, - log.stderr - )); - } - } - ImageRegistry::Standard(StandardRegistryConfig { - domain, - account, - .. - }) => (domain.as_str(), account), - }; - if account.is_empty() { - return Err(anyhow!("Must configure account for registry domain {domain}, got empty string")); + if domain.is_empty() || account.is_empty() { + return Ok(false); } let registry_token = match registry_token { Some(token) => token, diff --git a/bin/periphery/src/main.rs b/bin/periphery/src/main.rs index 596c6c3c3..670e3ff37 100644 --- a/bin/periphery/src/main.rs +++ b/bin/periphery/src/main.rs @@ -4,6 +4,7 @@ extern crate tracing; use std::{net::SocketAddr, str::FromStr}; use anyhow::Context; +use axum_server::tls_openssl::OpenSSLConfig; mod api; mod compose; @@ -11,6 +12,7 @@ mod config; mod docker; mod helpers; mod router; +mod ssl; mod stats; struct State; @@ -20,7 +22,8 @@ async fn app() -> anyhow::Result<()> { let config = config::periphery_config(); logger::init(&config.logging)?; - info!("version: v{}", env!("CARGO_PKG_VERSION")); + info!("Komodo Periphery version: v{}", env!("CARGO_PKG_VERSION")); + info!("{:?}", config.sanitized()); stats::spawn_system_stats_polling_threads(); @@ -28,18 +31,26 @@ async fn app() -> anyhow::Result<()> { SocketAddr::from_str(&format!("0.0.0.0:{}", config.port)) .context("failed to parse socket addr")?; - let listener = tokio::net::TcpListener::bind(&socket_addr) - .await - .context("failed to bind tcp listener")?; + let app = router::router() + .into_make_service_with_connect_info::(); - info!("Komodo Periphery started on {}", socket_addr); - - axum::serve( - listener, - router::router() - .into_make_service_with_connect_info::(), - ) - .await?; + if config.ssl_enabled { + info!("🔒 Periphery SSL Enabled"); + ssl::ensure_certs().await; + info!("Komodo Periphery starting on https://{}", socket_addr); + let ssl_config = OpenSSLConfig::from_pem_file( + &config.ssl_cert_file, + &config.ssl_key_file, + ) + .context("Invalid ssl cert / key")?; + axum_server::bind_openssl(socket_addr, ssl_config) + .serve(app) + .await? + } else { + info!("🔓 Periphery SSL Disabled"); + info!("Komodo Periphery starting on http://{}", socket_addr); + axum_server::bind(socket_addr).serve(app).await? + } Ok(()) } diff --git a/bin/periphery/src/ssl.rs b/bin/periphery/src/ssl.rs new file mode 100644 index 000000000..4e959ef9f --- /dev/null +++ b/bin/periphery/src/ssl.rs @@ -0,0 +1,39 @@ +use crate::config::periphery_config; + +pub async fn ensure_certs() { + let config = periphery_config(); + if !config.ssl_cert_file.is_file() || !config.ssl_key_file.is_file() + { + generate_self_signed_ssl_certs().await + } +} + +#[instrument] +async fn generate_self_signed_ssl_certs() { + info!("Generating certs..."); + + let config = periphery_config(); + + // ensure cert folders exist + if let Some(parent) = config.ssl_key_file.parent() { + let _ = std::fs::create_dir_all(parent); + } + if let Some(parent) = config.ssl_cert_file.parent() { + let _ = std::fs::create_dir_all(parent); + } + + let key_path = &config.ssl_key_file.display(); + let cert_path = &config.ssl_cert_file.display(); + + let command = format!("openssl req -x509 -newkey rsa:4096 -keyout {key_path} -out {cert_path} -sha256 -days 3650 -nodes -subj \"/C=XX/CN=periphery\""); + let log = run_command::async_run_command(&command).await; + + if log.success() { + info!("✅ SSL Certs generated"); + } else { + panic!( + "🚨 Failed to generate SSL Certs | stdout: {} | stderr: {}", + log.stdout, log.stderr + ); + } +} diff --git a/client/core/rs/src/api/auth.rs b/client/core/rs/src/api/auth.rs index e59808114..b15685c7c 100644 --- a/client/core/rs/src/api/auth.rs +++ b/client/core/rs/src/api/auth.rs @@ -38,6 +38,8 @@ pub struct GetLoginOptionsResponse { pub github: bool, /// Whether google login is enabled. pub google: bool, + /// Whether OIDC login is enabled. + pub oidc: bool, /// Whether user registration (Sign Up) has been disabled pub registration_disabled: bool, } diff --git a/client/core/rs/src/api/read/deployment.rs b/client/core/rs/src/api/read/deployment.rs index d511a704d..727c5ddd1 100644 --- a/client/core/rs/src/api/read/deployment.rs +++ b/client/core/rs/src/api/read/deployment.rs @@ -217,10 +217,17 @@ pub struct GetDeploymentsSummary {} #[typeshare] #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct GetDeploymentsSummaryResponse { + /// The total number of Deployments pub total: I64, + /// The number of Deployments with Running state pub running: I64, + /// The number of Deployments with Stopped or Paused state pub stopped: I64, + /// The number of Deployments with NotDeployed state pub not_deployed: I64, + /// The number of Deployments with Restarting or Dead or Created (other) state + pub unhealthy: I64, + /// The number of Deployments with Unknown state pub unknown: I64, } diff --git a/client/core/rs/src/api/read/mod.rs b/client/core/rs/src/api/read/mod.rs index f403a165b..a77bd2cab 100644 --- a/client/core/rs/src/api/read/mod.rs +++ b/client/core/rs/src/api/read/mod.rs @@ -98,6 +98,10 @@ pub struct GetCoreInfoResponse { pub transparent_mode: bool, /// Whether UI write access should be disabled pub ui_write_disabled: bool, + /// Whether non admins can create resources + pub disable_non_admin_create: bool, + /// Whether confirm dialog should be disabled + pub disable_confirm_dialog: bool, /// The repo owners for which github webhook management api is available pub github_webhook_owners: Vec, } @@ -152,21 +156,6 @@ pub type ListDockerRegistriesFromConfigResponse = Vec; // -/// List the available aws ecr config labels from the core config. -/// Response: [ListAwsEcrLabelsResponse]. -#[typeshare] -#[derive( - Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, -)] -#[empty_traits(KomodoReadRequest)] -#[response(ListAwsEcrLabelsResponse)] -pub struct ListAwsEcrLabels {} - -#[typeshare] -pub type ListAwsEcrLabelsResponse = Vec; - -// - /// List the available secrets from the core config. /// Response: [ListSecretsResponse]. #[typeshare] diff --git a/client/core/rs/src/api/read/stack.rs b/client/core/rs/src/api/read/stack.rs index a193ef833..9e071c750 100644 --- a/client/core/rs/src/api/read/stack.rs +++ b/client/core/rs/src/api/read/stack.rs @@ -224,22 +224,12 @@ pub struct GetStacksSummaryResponse { pub total: u32, /// The number of stacks with Running state. pub running: u32, - /// The number of stacks with Paused state. - pub paused: u32, - /// The number of stacks with Stopped state. + /// The number of stacks with Stopped or Paused state. pub stopped: u32, - /// The number of stacks with Restarting state. - pub restarting: u32, - /// The number of stacks with Dead state. - pub dead: u32, - /// The number of stacks with Created state. - pub created: u32, - /// The number of stacks with Removing state. - pub removing: u32, - /// The number of stacks with Unhealthy state. - pub unhealthy: u32, /// The number of stacks with Down state. pub down: u32, + /// The number of stacks with Unhealthy or Restarting or Dead or Created or Removing state. + pub unhealthy: u32, /// The number of stacks with Unknown state. pub unknown: u32, } diff --git a/client/core/rs/src/api/write/build.rs b/client/core/rs/src/api/write/build.rs index 91804d261..3778d80a7 100644 --- a/client/core/rs/src/api/write/build.rs +++ b/client/core/rs/src/api/write/build.rs @@ -92,6 +92,7 @@ pub struct UpdateBuild { #[response(NoData)] pub struct RefreshBuildCache { /// Id or name + #[serde(alias = "id", alias = "name")] pub build: String, } diff --git a/client/core/rs/src/api/write/permissions.rs b/client/core/rs/src/api/write/permissions.rs index 7453b8468..19dc28e00 100644 --- a/client/core/rs/src/api/write/permissions.rs +++ b/client/core/rs/src/api/write/permissions.rs @@ -75,3 +75,21 @@ pub struct UpdateUserBasePermissions { #[typeshare] pub type UpdateUserBasePermissionsResponse = NoData; + +/// **Super Admin only.** Update's whether a user is admin. +/// Response: [NoData]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoWriteRequest)] +#[response(UpdateUserAdminResponse)] +pub struct UpdateUserAdmin { + /// The target user. + pub user_id: String, + /// Whether user should be admin. + pub admin: bool, +} + +#[typeshare] +pub type UpdateUserAdminResponse = NoData; diff --git a/client/core/rs/src/api/write/repo.rs b/client/core/rs/src/api/write/repo.rs index 9d723c65f..7e71d9369 100644 --- a/client/core/rs/src/api/write/repo.rs +++ b/client/core/rs/src/api/write/repo.rs @@ -95,6 +95,7 @@ pub struct UpdateRepo { #[response(NoData)] pub struct RefreshRepoCache { /// Id or name + #[serde(alias = "id", alias = "name")] pub repo: String, } diff --git a/client/core/rs/src/api/write/stack.rs b/client/core/rs/src/api/write/stack.rs index 235974adc..13d6fdef6 100644 --- a/client/core/rs/src/api/write/stack.rs +++ b/client/core/rs/src/api/write/stack.rs @@ -103,6 +103,26 @@ pub struct RenameStack { // +/// Rename the stack at id to the given name. Response: [Update]. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoWriteRequest)] +#[response(Update)] +pub struct WriteStackFileContents { + /// The name or id of the Stack to write the contents to. + #[serde(alias = "id", alias = "name")] + pub stack: String, + /// The file path relative to the stack run directory, + /// or absolute path. + pub file_path: String, + /// The contents to write. + pub contents: String, +} + +// + /// Trigger a refresh of the cached compose file contents. /// Refreshes: /// - Whether the remote file is missing @@ -115,6 +135,7 @@ pub struct RenameStack { #[response(NoData)] pub struct RefreshStackCache { /// Id or name + #[serde(alias = "id", alias = "name")] pub stack: String, } diff --git a/client/core/rs/src/api/write/sync.rs b/client/core/rs/src/api/write/sync.rs index 294629be4..aa92028b0 100644 --- a/client/core/rs/src/api/write/sync.rs +++ b/client/core/rs/src/api/write/sync.rs @@ -83,7 +83,7 @@ pub struct UpdateResourceSync { // -/// Trigger a refresh of the computed diff logs for view. +/// Trigger a refresh of the computed diff logs for view. Response: [ResourceSync] #[typeshare] #[derive( Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, @@ -92,6 +92,24 @@ pub struct UpdateResourceSync { #[response(ResourceSync)] pub struct RefreshResourceSyncPending { /// Id or name + #[serde(alias = "id", alias = "name")] + pub sync: String, +} + +// + +/// Commits matching resources updated configuration to the target resource sync. Response: [Update] +/// +/// Note. Will fail if the Sync is not `managed`. +#[typeshare] +#[derive( + Serialize, Deserialize, Debug, Clone, Request, EmptyTraits, +)] +#[empty_traits(KomodoWriteRequest)] +#[response(ResourceSync)] +pub struct CommitSync { + /// Id or name + #[serde(alias = "id", alias = "name")] pub sync: String, } diff --git a/client/core/rs/src/entities/alerter.rs b/client/core/rs/src/entities/alerter.rs index 087ca0559..effd91307 100644 --- a/client/core/rs/src/entities/alerter.rs +++ b/client/core/rs/src/entities/alerter.rs @@ -110,8 +110,11 @@ pub enum AlerterEndpoint { /// Send alert serialized to JSON to an http endpoint. Custom(CustomAlerterEndpoint), - /// Send alert to a slack app + /// Send alert to a Slack app Slack(SlackAlerterEndpoint), + + /// Send alert to a Discord app + Discord(DiscordAlerterEndpoint), } impl Default for AlerterEndpoint { @@ -120,7 +123,7 @@ impl Default for AlerterEndpoint { } } -/// Configuration for a custom alerter endpoint. +/// Configuration for a Custom alerter endpoint. #[typeshare] #[derive( Debug, Clone, PartialEq, Serialize, Deserialize, Builder, @@ -144,13 +147,13 @@ fn default_custom_url() -> String { String::from("http://localhost:7000") } -/// Configuration for a slack alerter. +/// Configuration for a Slack alerter. #[typeshare] #[derive( Debug, Clone, PartialEq, Serialize, Deserialize, Builder, )] pub struct SlackAlerterEndpoint { - /// The slack app url + /// The Slack app webhook url #[serde(default = "default_slack_url")] #[builder(default = "default_slack_url()")] pub url: String, @@ -170,6 +173,32 @@ fn default_slack_url() -> String { ) } +/// Configuration for a Discord alerter. +#[typeshare] +#[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, Builder, +)] +pub struct DiscordAlerterEndpoint { + /// The Discord webhook url + #[serde(default = "default_discord_url")] + #[builder(default = "default_discord_url()")] + pub url: String, +} + +impl Default for DiscordAlerterEndpoint { + fn default() -> Self { + Self { + url: default_discord_url(), + } + } +} + +fn default_discord_url() -> String { + String::from( + "https://discord.com/api/webhooks/XXXXXXXXXXXX/XXXX-XXXXXXXXXX", + ) +} + // QUERY #[typeshare] diff --git a/client/core/rs/src/entities/build.rs b/client/core/rs/src/entities/build.rs index 994a12ba3..6fdfa92d2 100644 --- a/client/core/rs/src/entities/build.rs +++ b/client/core/rs/src/entities/build.rs @@ -2,7 +2,10 @@ use bson::{doc, Document}; use derive_builder::Builder; use derive_default_builder::DefaultBuilder; use partial_derive2::Partial; -use serde::{Deserialize, Serialize}; +use serde::{ + de::{value::MapAccessDeserializer, Visitor}, + Deserialize, Deserializer, Serialize, +}; use strum::Display; use typeshare::typeshare; @@ -10,7 +13,7 @@ use crate::entities::I64; use super::{ resource::{Resource, ResourceListItem, ResourceQuery}, - EnvironmentVar, NoData, SystemCommand, Version, + NoData, SystemCommand, Version, }; #[typeshare] @@ -181,11 +184,6 @@ pub struct BuildConfig { #[builder(default)] pub pre_build: SystemCommand, - /// Configuration for the registry to push the built image to. - #[serde(default)] - #[builder(default)] - pub image_registry: ImageRegistry, - /// The path of the docker build context relative to the root of the repo. /// Default: "." (the root of the repo). #[serde(default = "default_build_path")] @@ -199,6 +197,15 @@ pub struct BuildConfig { #[partial_default(default_dockerfile_path())] pub dockerfile_path: String, + /// Configuration for the registry to push the built image to. + #[serde(default, deserialize_with = "image_registry_deserializer")] + #[partial_attr(serde( + default, + deserialize_with = "option_image_registry_deserializer" + ))] + #[builder(default)] + pub image_registry: ImageRegistryConfig, + /// Whether to skip secret interpolation in the build_args. #[serde(default)] #[builder(default)] @@ -226,12 +233,12 @@ pub struct BuildConfig { deserialize_with = "super::option_env_vars_deserializer" ))] #[builder(default)] - pub build_args: Vec, + pub build_args: String, /// Secret arguments. /// /// These values remain hidden in the final image by using - /// docker secret mounts. See ``. + /// docker secret mounts. See [https://docs.docker.com/build/building/secrets]. /// /// The values can be used in RUN commands: /// ``` @@ -247,19 +254,16 @@ pub struct BuildConfig { deserialize_with = "super::option_env_vars_deserializer" ))] #[builder(default)] - pub secret_args: Vec, + pub secret_args: String, /// Docker labels - #[serde( - default, - deserialize_with = "super::env_vars_deserializer" - )] + #[serde(default, deserialize_with = "super::labels_deserializer")] #[partial_attr(serde( default, - deserialize_with = "super::option_env_vars_deserializer" + deserialize_with = "super::option_labels_deserializer" ))] #[builder(default)] - pub labels: Vec, + pub labels: String, } impl BuildConfig { @@ -327,35 +331,15 @@ impl Default for BuildConfig { } } -/// Configuration for the registry to push the built image to. -#[typeshare] -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(tag = "type", content = "params")] -pub enum ImageRegistry { - /// Don't push the image to any registry - None(NoData), - /// Push the image to a standard image registry (any domain) - Standard(StandardRegistryConfig), - /// Push the image to Aws Elastic Container Registry - /// - /// The string held in 'params' should match a label of an `aws_ecr_registry` in the core config. - AwsEcr(String), -} - -impl Default for ImageRegistry { - fn default() -> Self { - Self::None(NoData {}) - } -} - -/// Configuration for a standard image registry +/// Configuration for an image registry #[typeshare] #[derive( Debug, Clone, Default, PartialEq, Serialize, Deserialize, )] -pub struct StandardRegistryConfig { - /// Specify the registry provider domain. Default: `docker.io` - #[serde(default = "default_registry_domain")] +pub struct ImageRegistryConfig { + /// Specify the registry provider domain, eg `docker.io`. + /// If not provided, will not push to any registry. + #[serde(default)] pub domain: String, /// Specify an account to use with the registry. @@ -368,8 +352,120 @@ pub struct StandardRegistryConfig { pub organization: String, } -fn default_registry_domain() -> String { - String::from("docker.io") +pub fn image_registry_deserializer<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(ImageRegistryVisitor) +} + +pub fn option_image_registry_deserializer<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(OptionImageRegistryVisitor) +} + +struct ImageRegistryVisitor; + +impl<'de> Visitor<'de> for ImageRegistryVisitor { + type Value = ImageRegistryConfig; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "{{ \"domain\": string, \"account\": string, \"organization\": string }}") + } + + fn visit_map(self, map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + // Need to use Value intermediate to get something cloneable. + let value = serde_json::Value::deserialize( + MapAccessDeserializer::new(map), + )?; + // 1.14 and before: try to use ImageRegistry1_14 syntax + if let Ok(config) = + serde_json::from_value::(value.clone()) + { + return Ok(config.into()); + } + serde_json::from_value(value).map_err(serde::de::Error::custom) + } +} + +struct OptionImageRegistryVisitor; + +impl<'de> Visitor<'de> for OptionImageRegistryVisitor { + type Value = Option; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "null or {{ \"domain\": string, \"account\": string, \"organization\": string }}") + } + + fn visit_map(self, map: A) -> Result + where + A: serde::de::MapAccess<'de>, + { + ImageRegistryVisitor.visit_map(map).map(Some) + } + + fn visit_none(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } + + fn visit_unit(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } +} + +/// Configuration for the registry to push the built image to. +#[typeshare] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(tag = "type", content = "params")] +pub enum ImageRegistryLegacy1_14 { + /// Don't push the image to any registry + None(NoData), + /// Push the image to a standard image registry (any domain) + Standard(ImageRegistryConfig), +} + +impl Default for ImageRegistryLegacy1_14 { + fn default() -> Self { + Self::None(NoData {}) + } +} + +impl From for ImageRegistryConfig { + fn from(value: ImageRegistryLegacy1_14) -> Self { + match value { + ImageRegistryLegacy1_14::None(_) => { + ImageRegistryConfig::default() + } + ImageRegistryLegacy1_14::Standard(mut config) => { + // This version had domain defaulted to docker.io + if config.domain.is_empty() { + config.domain = String::from("docker.io"); + } + config + } + } + } } #[typeshare] diff --git a/client/core/rs/src/entities/builder.rs b/client/core/rs/src/entities/builder.rs index 5e05e4e5c..21d38e95f 100644 --- a/client/core/rs/src/entities/builder.rs +++ b/client/core/rs/src/entities/builder.rs @@ -72,8 +72,8 @@ impl Default for BuilderConfig { #[serde(tag = "type", content = "params")] #[allow(clippy::large_enum_variant)] pub enum PartialBuilderConfig { - Server(_PartialServerBuilderConfig), - Aws(_PartialAwsBuilderConfig), + Server(#[serde(default)] _PartialServerBuilderConfig), + Aws(#[serde(default)] _PartialAwsBuilderConfig), } impl Default for PartialBuilderConfig { @@ -230,6 +230,8 @@ impl MergePartial for BuilderConfig { .use_public_ip .unwrap_or(config.use_public_ip), port: partial.port.unwrap_or(config.port), + use_https: partial.use_https.unwrap_or(config.use_https), + user_data: partial.user_data.unwrap_or(config.user_data), git_providers: partial .git_providers .unwrap_or(config.git_providers), @@ -297,32 +299,56 @@ pub struct AwsBuilderConfig { #[partial_default(default_port())] pub port: i32, + #[serde(default = "default_use_https")] + #[builder(default = "default_use_https()")] + #[partial_default(default_use_https())] + pub use_https: bool, + /// The EC2 ami id to create. /// The ami should have the periphery client configured to start on startup, /// and should have the necessary github / dockerhub accounts configured. + #[serde(default)] + #[builder(default)] pub ami_id: String, /// The subnet id to create the instance in. + #[serde(default)] + #[builder(default)] pub subnet_id: String, /// The key pair name to attach to the instance + #[serde(default)] + #[builder(default)] pub key_pair_name: String, /// Whether to assign the instance a public IP address. /// Likely needed for the instance to be able to reach the open internet. + #[serde(default)] + #[builder(default)] pub assign_public_ip: bool, /// Whether core should use the public IP address to communicate with periphery on the builder. /// If false, core will communicate with the instance using the private IP. + #[serde(default)] + #[builder(default)] pub use_public_ip: bool, /// The security group ids to attach to the instance. /// This should include a security group to allow core inbound access to the periphery port. + #[serde(default)] + #[builder(default)] pub security_group_ids: Vec, + /// The user data to deploy the instance with. + #[serde(default)] + #[builder(default)] + pub user_data: String, /// Which git providers are available on the AMI #[serde(default)] + #[builder(default)] pub git_providers: Vec, /// Which docker registries are available on the AMI. #[serde(default)] + #[builder(default)] pub docker_registries: Vec, /// Which secrets are available on the AMI. #[serde(default)] + #[builder(default)] pub secrets: Vec, } @@ -333,12 +359,14 @@ impl Default for AwsBuilderConfig { instance_type: aws_default_instance_type(), volume_gb: aws_default_volume_gb(), port: default_port(), + use_https: default_use_https(), ami_id: Default::default(), subnet_id: Default::default(), security_group_ids: Default::default(), key_pair_name: Default::default(), - assign_public_ip: false, - use_public_ip: false, + assign_public_ip: Default::default(), + use_public_ip: Default::default(), + user_data: Default::default(), git_providers: Default::default(), docker_registries: Default::default(), secrets: Default::default(), @@ -368,6 +396,10 @@ fn default_port() -> i32 { 8120 } +fn default_use_https() -> bool { + true +} + #[typeshare] pub type BuilderQuery = ResourceQuery; diff --git a/client/core/rs/src/entities/config/core.rs b/client/core/rs/src/entities/config/core.rs index 74788f002..75bcdde5f 100644 --- a/client/core/rs/src/entities/config/core.rs +++ b/client/core/rs/src/entities/config/core.rs @@ -18,7 +18,7 @@ use crate::entities::{ Timelength, }; -use super::{DockerRegistry, GitProvider}; +use super::{empty_or_redacted, DockerRegistry, GitProvider}; /// # Komodo Core Environment Variables /// @@ -46,22 +46,22 @@ pub struct Env { pub komodo_port: Option, /// Override `passkey` pub komodo_passkey: Option, - /// Override `ensure_server` - pub komodo_ensure_server: Option, + /// Override `passkey` with file + pub komodo_passkey_file: Option, + /// Override `first_server` + pub komodo_first_server: Option, + /// Override `frontend_path` + pub komodo_frontend_path: Option, /// Override `jwt_secret` pub komodo_jwt_secret: Option, + /// Override `jwt_secret` from file + pub komodo_jwt_secret_file: Option, /// Override `jwt_ttl` pub komodo_jwt_ttl: Option, /// Override `repo_directory` - pub komodo_repo_directory: Option, - /// Override `sync_poll_interval` - pub komodo_sync_poll_interval: Option, - /// Override `stack_poll_interval` - pub komodo_stack_poll_interval: Option, - /// Override `build_poll_interval` - pub komodo_build_poll_interval: Option, - /// Override `repo_poll_interval` - pub komodo_repo_poll_interval: Option, + pub komodo_repo_directory: Option, + /// Override `resource_poll_interval` + pub komodo_resource_poll_interval: Option, /// Override `monitoring_interval` pub komodo_monitoring_interval: Option, /// Override `keep_stats_for_days` @@ -70,6 +70,8 @@ pub struct Env { pub komodo_keep_alerts_for_days: Option, /// Override `webhook_secret` pub komodo_webhook_secret: Option, + /// Override `webhook_secret` with file + pub komodo_webhook_secret_file: Option, /// Override `webhook_base_url` pub komodo_webhook_base_url: Option, @@ -90,30 +92,64 @@ pub struct Env { pub komodo_enable_new_users: Option, /// Override `disable_user_registration` pub komodo_disable_user_registration: Option, + /// Override `disable_confirm_dialog` + pub komodo_disable_confirm_dialog: Option, + /// Override `disable_non_admin_create` + pub komodo_disable_non_admin_create: Option, /// Override `local_auth` pub komodo_local_auth: Option, + /// Override `oidc_enabled` + pub komodo_oidc_enabled: Option, + /// Override `oidc_provider` + pub komodo_oidc_provider: Option, + /// Override `oidc_redirect` + pub komodo_oidc_redirect: Option, + /// Override `oidc_client_id` + pub komodo_oidc_client_id: Option, + /// Override `oidc_client_id` from file + pub komodo_oidc_client_id_file: Option, + /// Override `oidc_client_secret` + pub komodo_oidc_client_secret: Option, + /// Override `oidc_client_secret` from file + pub komodo_oidc_client_secret_file: Option, + /// Override `oidc_use_full_email` + pub komodo_oidc_use_full_email: Option, + /// Override `google_oauth.enabled` pub komodo_google_oauth_enabled: Option, /// Override `google_oauth.id` pub komodo_google_oauth_id: Option, + /// Override `google_oauth.id` from file + pub komodo_google_oauth_id_file: Option, /// Override `google_oauth.secret` pub komodo_google_oauth_secret: Option, + /// Override `google_oauth.secret` from file + pub komodo_google_oauth_secret_file: Option, /// Override `github_oauth.enabled` pub komodo_github_oauth_enabled: Option, /// Override `github_oauth.id` pub komodo_github_oauth_id: Option, + /// Override `github_oauth.id` from file + pub komodo_github_oauth_id_file: Option, /// Override `github_oauth.secret` pub komodo_github_oauth_secret: Option, + /// Override `github_oauth.secret` from file + pub komodo_github_oauth_secret_file: Option, /// Override `github_webhook_app.app_id` pub komodo_github_webhook_app_app_id: Option, + /// Override `github_webhook_app.app_id` from file + pub komodo_github_webhook_app_app_id_file: Option, /// Override `github_webhook_app.installations[i].id`. Accepts comma seperated list. /// /// Note. Paired by index with values in `komodo_github_webhook_app_installations_namespaces` pub komodo_github_webhook_app_installations_ids: Option>, + /// Override `github_webhook_app.installations[i].id` from file + pub komodo_github_webhook_app_installations_ids_file: + Option, /// Override `github_webhook_app.installations[i].namespace`. Accepts comma seperated list. /// /// Note. Paired by index with values in `komodo_github_webhook_app_installations_ids` @@ -122,26 +158,54 @@ pub struct Env { /// Override `github_webhook_app.pk_path` pub komodo_github_webhook_app_pk_path: Option, - /// Override `mongo.uri` - pub komodo_mongo_uri: Option, - /// Override `mongo.address` - pub komodo_mongo_address: Option, - /// Override `mongo.username` - pub komodo_mongo_username: Option, - /// Override `mongo.password` - pub komodo_mongo_password: Option, - /// Override `mongo.app_name` - pub komodo_mongo_app_name: Option, - /// Override `mongo.db_name` - pub komodo_mongo_db_name: Option, + /// Override `database.uri` + #[serde(alias = "KOMODO_MONGO_URI")] + pub komodo_database_uri: Option, + /// Override `database.uri` from file + #[serde(alias = "KOMODO_MONGO_URI_FILE")] + pub komodo_database_uri_file: Option, + /// Override `database.address` + #[serde(alias = "KOMODO_MONGO_ADDRESS")] + pub komodo_database_address: Option, + /// Override `database.username` + #[serde(alias = "KOMODO_MONGO_USERNAME")] + pub komodo_database_username: Option, + /// Override `database.username` with file + #[serde(alias = "KOMODO_MONGO_USERNAME_FILE")] + pub komodo_database_username_file: Option, + /// Override `database.password` + #[serde(alias = "KOMODO_MONGO_PASSWORD")] + pub komodo_database_password: Option, + /// Override `database.password` with file + #[serde(alias = "KOMODO_MONGO_PASSWORD_FILE")] + pub komodo_database_password_file: Option, + /// Override `database.app_name` + #[serde(alias = "KOMODO_MONGO_APP_NAME")] + pub komodo_database_app_name: Option, + /// Override `database.db_name` + #[serde(alias = "KOMODO_MONGO_DB_NAME")] + pub komodo_database_db_name: Option, /// Override `aws.access_key_id` pub komodo_aws_access_key_id: Option, + /// Override `aws.access_key_id` with file + pub komodo_aws_access_key_id_file: Option, /// Override `aws.secret_access_key` pub komodo_aws_secret_access_key: Option, + /// Override `aws.secret_access_key` with file + pub komodo_aws_secret_access_key_file: Option, /// Override `hetzner.token` pub komodo_hetzner_token: Option, + /// Override `hetzner.token` with file + pub komodo_hetzner_token_file: Option, + + /// Override `ssl_enabled`. + pub komodo_ssl_enabled: Option, + /// Override `ssl_key_file` + pub komodo_ssl_key_file: Option, + /// Override `ssl_cert_file` + pub komodo_ssl_cert_file: Option, } fn default_config_path() -> String { @@ -190,20 +254,22 @@ pub struct CoreConfig { #[serde(default)] pub ui_write_disabled: bool, - /// If defined, ensure an enabled server exists at this address. - /// Use with All In One compose. - /// Example: `http://komodo-periphery:8120` + /// Disable the popup confirm dialogs. All buttons will just be double click. #[serde(default)] - pub ensure_server: String, + pub disable_confirm_dialog: bool, - // ============ - // = Database = - // ============ - /// Configure core mongo connection. - /// - /// An easy deployment method is to use Mongo Atlas to provide - /// a reliable database. - pub mongo: MongoConfig, + /// If defined, ensure an enabled first server exists at this address. + /// Example: `http://periphery:8120` + #[serde(default)] + pub first_server: String, + + /// The path to the built frontend folder. + #[serde(default = "default_frontend_path")] + pub frontend_path: String, + + /// Configure database connection + #[serde(alias = "mongo")] + pub database: DatabaseConfig, // ================ // = Auth / Login = @@ -226,6 +292,11 @@ pub struct CoreConfig { #[serde(default)] pub disable_user_registration: bool, + /// Normally all users can create resources. + /// If `disable_non_admin_create = true`, only admins will be able to create resources. + #[serde(default)] + pub disable_non_admin_create: bool, + /// Optionally provide a specific jwt secret. /// Passing nothing or an empty string will cause one to be generated. /// Default: "" (empty string) @@ -237,6 +308,42 @@ pub struct CoreConfig { #[serde(default = "default_jwt_ttl")] pub jwt_ttl: Timelength, + // ======== + // = OIDC = + // ======== + /// Enable login with configured OIDC provider. + #[serde(default)] + pub oidc_enabled: bool, + + /// Configure OIDC provider address for + /// communcation directly with Komodo Core. + /// Note. Needs to be reachable from Komodo Core. + /// Eg. `https://accounts.example.internal/application/o/komodo` + #[serde(default)] + pub oidc_provider: String, + + /// Configure OIDC user redirect address. + /// This is the address users are redirected to in their browser, + /// and may be different from `oidc_provider`. + /// If not provided, the `oidc_provider` will be used. + /// Eg. `https://accounts.example.external/application/o/komodo` + #[serde(default)] + pub oidc_redirect: String, + + /// Set OIDC client id + #[serde(default)] + pub oidc_client_id: String, + + /// Set OIDC client secret + #[serde(default)] + pub oidc_client_secret: String, + + /// Use the full email for usernames. + /// Otherwise, the @address will be stripped, + /// making usernames more concise. + #[serde(default)] + pub oidc_use_full_email: bool, + // ========= // = Oauth = // ========= @@ -263,7 +370,8 @@ pub struct CoreConfig { /// This can be used if Komodo Core sits on an internal network which is /// unreachable directly from the open internet. /// A reverse proxy in a public network can forward webhooks to Komodo. - pub webhook_base_url: Option, + #[serde(default)] + pub webhook_base_url: String, /// Configure a Github Webhook app. /// Allows users to manage repo webhooks from within the Komodo UI. @@ -295,29 +403,11 @@ pub struct CoreConfig { // ================== // = Poll Intervals = // ================== - /// Interval at which to poll stacks for any updates / automated actions. + /// Interval at which to poll resources for any updates / automated actions. /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` /// Default: `5-min`. #[serde(default = "default_poll_interval")] - pub stack_poll_interval: Timelength, - - /// Interval at which to poll syncs for any updates / automated actions. - /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` - /// Default: `5-min`. - #[serde(default = "default_poll_interval")] - pub sync_poll_interval: Timelength, - - /// Interval at which to poll build commit hash for any updates / automated actions. - /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` - /// Default: `5-min`. - #[serde(default = "default_poll_interval")] - pub build_poll_interval: Timelength, - - /// Interval at which to poll repo commit hash for any updates / automated actions. - /// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr` - /// Default: `5-min`. - #[serde(default = "default_poll_interval")] - pub repo_poll_interval: Timelength, + pub resource_poll_interval: Timelength, /// Interval at which to collect server stats and send any alerts. /// Default: `15-sec` @@ -351,10 +441,6 @@ pub struct CoreConfig { #[serde(default, alias = "docker_registry")] pub docker_registries: Vec, - /// Configure aws ecr registries, which are handled differently than other registries - #[serde(default, alias = "aws_ecr_registry")] - pub aws_ecr_registries: Vec, - // =========== // = Secrets = // =========== @@ -369,10 +455,23 @@ pub struct CoreConfig { // ========= /// Specify the directory used to clone stack / repo / build repos, for latest hash / contents. /// The default is fine when using a container. - /// This directory has no need for persistence, so no need to mount it. - /// Default: `/repos` + /// Default: `/repo-cache` #[serde(default = "default_repo_directory")] pub repo_directory: PathBuf, + + /// Whether to enable ssl. + #[serde(default)] + pub ssl_enabled: bool, + + /// Path to the ssl key. + /// Default: `/config/ssl/key.pem`. + #[serde(default = "default_ssl_key_file")] + pub ssl_key_file: PathBuf, + + /// Path to the ssl cert. + /// Default: `/config/ssl/cert.pem`. + #[serde(default = "default_ssl_cert_file")] + pub ssl_cert_file: PathBuf, } fn default_title() -> String { @@ -383,13 +482,17 @@ fn default_core_port() -> u16 { 9120 } +fn default_frontend_path() -> String { + "/app/frontend".to_string() +} + fn default_jwt_ttl() -> Timelength { Timelength::OneDay } fn default_repo_directory() -> PathBuf { - // unwrap ok: `/repos` will always be valid path - PathBuf::from_str("/repos").unwrap() + // unwrap ok: `/repo-cache` will always be valid path + PathBuf::from_str("/repo-cache").unwrap() } fn default_prune_days() -> u64 { @@ -404,6 +507,14 @@ fn default_monitoring_interval() -> Timelength { Timelength::FifteenSeconds } +fn default_ssl_key_file() -> PathBuf { + "/config/ssl/key.pem".parse().unwrap() +} + +fn default_ssl_cert_file() -> PathBuf { + "/config/ssl/cert.pem".parse().unwrap() +} + impl CoreConfig { pub fn sanitized(&self) -> CoreConfig { let config = self.clone(); @@ -412,23 +523,31 @@ impl CoreConfig { host: config.host, port: config.port, passkey: empty_or_redacted(&config.passkey), - ensure_server: config.ensure_server, + first_server: config.first_server, + frontend_path: config.frontend_path, jwt_secret: empty_or_redacted(&config.jwt_secret), jwt_ttl: config.jwt_ttl, repo_directory: config.repo_directory, - sync_poll_interval: config.sync_poll_interval, - stack_poll_interval: config.stack_poll_interval, - build_poll_interval: config.build_poll_interval, - repo_poll_interval: config.repo_poll_interval, + resource_poll_interval: config.resource_poll_interval, monitoring_interval: config.monitoring_interval, keep_stats_for_days: config.keep_stats_for_days, keep_alerts_for_days: config.keep_alerts_for_days, logging: config.logging, transparent_mode: config.transparent_mode, ui_write_disabled: config.ui_write_disabled, + disable_confirm_dialog: config.disable_confirm_dialog, enable_new_users: config.enable_new_users, disable_user_registration: config.disable_user_registration, + disable_non_admin_create: config.disable_non_admin_create, local_auth: config.local_auth, + oidc_enabled: config.oidc_enabled, + oidc_provider: config.oidc_provider, + oidc_redirect: config.oidc_redirect, + oidc_client_id: empty_or_redacted(&config.oidc_client_id), + oidc_client_secret: empty_or_redacted( + &config.oidc_client_secret, + ), + oidc_use_full_email: config.oidc_use_full_email, google_oauth: OauthCredentials { enabled: config.google_oauth.enabled, id: empty_or_redacted(&config.google_oauth.id), @@ -442,19 +561,13 @@ impl CoreConfig { webhook_secret: empty_or_redacted(&config.webhook_secret), webhook_base_url: config.webhook_base_url, github_webhook_app: config.github_webhook_app, - mongo: MongoConfig { - uri: config.mongo.uri.map(|cur| empty_or_redacted(&cur)), - address: config.mongo.address, - username: config - .mongo - .username - .map(|cur| empty_or_redacted(&cur)), - password: config - .mongo - .password - .map(|cur| empty_or_redacted(&cur)), - app_name: config.mongo.app_name, - db_name: config.mongo.db_name, + database: DatabaseConfig { + uri: empty_or_redacted(&config.database.uri), + address: config.database.address, + username: empty_or_redacted(&config.database.username), + password: empty_or_redacted(&config.database.password), + app_name: config.database.app_name, + db_name: config.database.db_name, }, aws: AwsCredentials { access_key_id: empty_or_redacted(&config.aws.access_key_id), @@ -490,25 +603,11 @@ impl CoreConfig { provider }) .collect(), - aws_ecr_registries: config - .aws_ecr_registries - .into_iter() - .map(|mut ecr| { - ecr.access_key_id = empty_or_redacted(&ecr.access_key_id); - ecr.secret_access_key = - empty_or_redacted(&ecr.secret_access_key); - ecr - }) - .collect(), - } - } -} -fn empty_or_redacted(src: &str) -> String { - if src.is_empty() { - String::new() - } else { - String::from("##############") + ssl_enabled: config.ssl_enabled, + ssl_key_file: config.ssl_key_file, + ssl_cert_file: config.ssl_cert_file, + } } } @@ -526,46 +625,57 @@ pub struct OauthCredentials { pub secret: String, } -/// Provide mongo connection information. +/// Provide database connection information. +/// Komodo uses the MongoDB api driver for database communication, +/// and FerretDB to support Postgres and Sqlite storage options. +/// /// Must provide ONE of: /// 1. `uri` /// 2. `address` + `username` + `password` #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MongoConfig { +pub struct DatabaseConfig { /// Full mongo uri string, eg. `mongodb://username:password@your.mongo.int:27017` - pub uri: Option, - /// Just the address part of the uri, eg `your.mongo.int:27017` - pub address: Option, + #[serde(default)] + pub uri: String, + /// Just the address part of the mongo uri, eg `your.mongo.int:27017` + #[serde(default = "default_database_address")] + pub address: String, /// Mongo user username - pub username: Option, + #[serde(default)] + pub username: String, /// Mongo user password - pub password: Option, + #[serde(default)] + pub password: String, /// Mongo app name. default: `komodo_core` - #[serde(default = "default_core_mongo_app_name")] + #[serde(default = "default_database_app_name")] pub app_name: String, /// Mongo db name. Which mongo database to create the collections in. /// Default: `komodo`. - #[serde(default = "default_core_mongo_db_name")] + #[serde(default = "default_database_db_name")] pub db_name: String, } -fn default_core_mongo_app_name() -> String { +fn default_database_address() -> String { + String::from("localhost:27017") +} + +fn default_database_app_name() -> String { "komodo_core".to_string() } -fn default_core_mongo_db_name() -> String { +fn default_database_db_name() -> String { "komodo".to_string() } -impl Default for MongoConfig { +impl Default for DatabaseConfig { fn default() -> Self { Self { - uri: None, - address: Some("localhost:27017".to_string()), - username: None, - password: None, - app_name: default_core_mongo_app_name(), - db_name: default_core_mongo_db_name(), + uri: Default::default(), + address: default_database_address(), + username: Default::default(), + password: Default::default(), + app_name: default_database_app_name(), + db_name: default_database_db_name(), } } } @@ -585,39 +695,6 @@ pub struct HetznerCredentials { pub token: String, } -/// Provide configuration for an Aws Ecr registry. -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct AwsEcrConfigWithCredentials { - /// A label for the registry - pub label: String, - /// The Aws region - pub region: String, - /// The Aws account id - pub account_id: String, - /// The Aws ACCESS_KEY_ID - pub access_key_id: String, - /// The Aws SECRET_ACCESS_KEY - pub secret_access_key: String, -} - -/// Provide configuration for an Aws Ecr registry. -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct AwsEcrConfig { - /// The Aws region - pub region: String, - /// The Aws account id - pub account_id: String, -} - -impl AwsEcrConfig { - pub fn from(config: &AwsEcrConfigWithCredentials) -> AwsEcrConfig { - AwsEcrConfig { - region: config.region.to_string(), - account_id: config.account_id.to_string(), - } - } -} - /// Provide configuration for a Github Webhook app. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GithubWebhookAppConfig { diff --git a/client/core/rs/src/entities/config/mod.rs b/client/core/rs/src/entities/config/mod.rs index d78ede1e5..735871ae1 100644 --- a/client/core/rs/src/entities/config/mod.rs +++ b/client/core/rs/src/entities/config/mod.rs @@ -85,3 +85,11 @@ pub struct ProviderAccount { #[serde(default, skip_serializing)] pub token: String, } + +fn empty_or_redacted(src: &str) -> String { + if src.is_empty() { + String::new() + } else { + String::from("##############") + } +} diff --git a/client/core/rs/src/entities/config/periphery.rs b/client/core/rs/src/entities/config/periphery.rs index 3cc556f80..2e6864cb2 100644 --- a/client/core/rs/src/entities/config/periphery.rs +++ b/client/core/rs/src/entities/config/periphery.rs @@ -22,7 +22,9 @@ use crate::entities::{ Timelength, }; -use super::{DockerRegistry, GitProvider}; +use super::{ + empty_or_redacted, DockerRegistry, GitProvider, ProviderAccount, +}; /// # Periphery Command Line Arguments. /// @@ -135,10 +137,19 @@ pub struct Env { pub periphery_allowed_ips: Option>, /// Override `passkeys` pub periphery_passkeys: Option>, + /// Override `passkeys` from file + pub periphery_passkeys_file: Option, /// Override `include_disk_mounts` pub periphery_include_disk_mounts: Option>, /// Override `exclude_disk_mounts` pub periphery_exclude_disk_mounts: Option>, + + /// Override `ssl_enabled` + pub periphery_ssl_enabled: Option, + /// Override `ssl_key_file` + pub periphery_ssl_key_file: Option, + /// Override `ssl_cert_file` + pub periphery_ssl_cert_file: Option, } /// # Periphery Configuration File @@ -212,6 +223,21 @@ pub struct PeripheryConfig { /// Supports any docker image repository. #[serde(default, alias = "docker_registry")] pub docker_registries: Vec, + + /// Whether to enable ssl. + /// Default: false (will change in later release) + #[serde(default = "default_ssl_enabled")] + pub ssl_enabled: bool, + + /// Path to the ssl key. + /// Default: `/etc/komodo/ssl/periphery/key.pem`. + #[serde(default = "default_ssl_key_file")] + pub ssl_key_file: PathBuf, + + /// Path to the ssl cert. + /// Default: `/etc/komodo/ssl/periphery/cert.pem`. + #[serde(default = "default_ssl_cert_file")] + pub ssl_cert_file: PathBuf, } fn default_periphery_port() -> u16 { @@ -230,6 +256,18 @@ fn default_stats_polling_rate() -> Timelength { Timelength::FiveSeconds } +fn default_ssl_enabled() -> bool { + false +} + +fn default_ssl_key_file() -> PathBuf { + "/etc/komodo/ssl/key.pem".parse().unwrap() +} + +fn default_ssl_cert_file() -> PathBuf { + "/etc/komodo/ssl/cert.pem".parse().unwrap() +} + impl Default for PeripheryConfig { fn default() -> Self { Self { @@ -246,6 +284,72 @@ impl Default for PeripheryConfig { secrets: Default::default(), git_providers: Default::default(), docker_registries: Default::default(), + ssl_enabled: default_ssl_enabled(), + ssl_key_file: default_ssl_key_file(), + ssl_cert_file: default_ssl_cert_file(), + } + } +} + +impl PeripheryConfig { + pub fn sanitized(&self) -> PeripheryConfig { + PeripheryConfig { + port: self.port, + repo_dir: self.repo_dir.clone(), + stack_dir: self.stack_dir.clone(), + stats_polling_rate: self.stats_polling_rate, + legacy_compose_cli: self.legacy_compose_cli, + logging: self.logging.clone(), + allowed_ips: self.allowed_ips.clone(), + passkeys: self + .passkeys + .iter() + .map(|passkey| empty_or_redacted(passkey)) + .collect(), + include_disk_mounts: self.include_disk_mounts.clone(), + exclude_disk_mounts: self.exclude_disk_mounts.clone(), + secrets: self + .secrets + .iter() + .map(|(var, secret)| { + (var.to_string(), empty_or_redacted(secret)) + }) + .collect(), + git_providers: self + .git_providers + .iter() + .map(|provider| GitProvider { + domain: provider.domain.clone(), + https: provider.https, + accounts: provider + .accounts + .iter() + .map(|account| ProviderAccount { + username: account.username.clone(), + token: empty_or_redacted(&account.token), + }) + .collect(), + }) + .collect(), + docker_registries: self + .docker_registries + .iter() + .map(|provider| DockerRegistry { + domain: provider.domain.clone(), + organizations: provider.organizations.clone(), + accounts: provider + .accounts + .iter() + .map(|account| ProviderAccount { + username: account.username.clone(), + token: empty_or_redacted(&account.token), + }) + .collect(), + }) + .collect(), + ssl_enabled: self.ssl_enabled, + ssl_key_file: self.ssl_key_file.clone(), + ssl_cert_file: self.ssl_cert_file.clone(), } } } diff --git a/client/core/rs/src/entities/deployment.rs b/client/core/rs/src/entities/deployment.rs index 469f44a92..164f0b60a 100644 --- a/client/core/rs/src/entities/deployment.rs +++ b/client/core/rs/src/entities/deployment.rs @@ -11,10 +11,12 @@ use serde::{ use strum::{Display, EnumString}; use typeshare::typeshare; +use crate::parser::parse_key_value_list; + use super::{ docker::container::ContainerStateStatusEnum, resource::{Resource, ResourceListItem, ResourceQuery}, - EnvironmentVar, TerminationSignal, Version, + TerminationSignal, Version, }; #[typeshare] @@ -129,17 +131,13 @@ pub struct DeploymentConfig { /// Labels attached to various termination signal options. /// Used to specify different shutdown functionality depending on the termination signal. - #[serde( - default = "default_term_signal_labels", - deserialize_with = "term_labels_deserializer" - )] + #[serde(default, deserialize_with = "term_labels_deserializer")] #[partial_attr(serde( default, deserialize_with = "option_term_labels_deserializer" ))] - #[builder(default = "default_term_signal_labels()")] - #[partial_default(default_term_signal_labels())] - pub term_signal_labels: Vec, + #[builder(default)] + pub term_signal_labels: String, /// The container port mapping. /// Irrelevant if container network is `host`. @@ -150,7 +148,7 @@ pub struct DeploymentConfig { deserialize_with = "option_conversions_deserializer" ))] #[builder(default)] - pub ports: Vec, + pub ports: String, /// The container volume mapping. /// Maps files / folders on host to files / folders in container. @@ -160,7 +158,7 @@ pub struct DeploymentConfig { deserialize_with = "option_conversions_deserializer" ))] #[builder(default)] - pub volumes: Vec, + pub volumes: String, /// The environment variables passed to the container. #[serde( @@ -172,19 +170,19 @@ pub struct DeploymentConfig { deserialize_with = "super::option_env_vars_deserializer" ))] #[builder(default)] - pub environment: Vec, + pub environment: String, /// The docker labels given to the container. #[serde( default, - deserialize_with = "super::env_vars_deserializer" + deserialize_with = "super::labels_deserializer" )] #[partial_attr(serde( default, - deserialize_with = "super::option_env_vars_deserializer" + deserialize_with = "super::option_labels_deserializer" ))] #[builder(default)] - pub labels: Vec, + pub labels: String, } impl DeploymentConfig { @@ -197,10 +195,6 @@ fn default_send_alerts() -> bool { true } -fn default_term_signal_labels() -> Vec { - vec![TerminationSignalLabel::default()] -} - fn default_termination_timeout() -> i32 { 10 } @@ -219,7 +213,7 @@ impl Default for DeploymentConfig { image_registry_account: Default::default(), skip_secret_interp: Default::default(), redeploy_on_build: Default::default(), - term_signal_labels: default_term_signal_labels(), + term_signal_labels: Default::default(), termination_signal: Default::default(), termination_timeout: default_termination_timeout(), ports: Default::default(), @@ -289,54 +283,20 @@ pub struct Conversion { pub container: String, } -pub fn conversions_to_string(conversions: &[Conversion]) -> String { - conversions - .iter() - .map(|Conversion { local, container }| { - format!("{local}={container}") - }) - .collect::>() - .join("\n") -} - pub fn conversions_from_str( - value: &str, + input: &str, ) -> anyhow::Result> { - let trimmed = value.trim(); - if trimmed.is_empty() { - return Ok(Vec::new()); - } - let res = trimmed - .split('\n') - .map(|line| line.trim()) - .enumerate() - .filter(|(_, line)| { - !line.is_empty() - && !line.starts_with('#') - && !line.starts_with("//") - }) - .map(|(i, line)| { - let (local, container) = line - .split_once('=') - .with_context(|| format!("line {i} missing assignment (=)")) - .map(|(local, container)| { - let container = container - .split(" #") - .next() - .unwrap_or_default() - .trim() - .to_string(); - (local.trim().to_string(), container) - })?; - anyhow::Ok(Conversion { local, container }) - }) - .collect::>>()?; - Ok(res) + parse_key_value_list(input).map(|conversions| { + conversions + .into_iter() + .map(|(local, container)| Conversion { local, container }) + .collect() + }) } pub fn conversions_deserializer<'de, D>( deserializer: D, -) -> Result, D::Error> +) -> Result where D: Deserializer<'de>, { @@ -345,7 +305,7 @@ where pub fn option_conversions_deserializer<'de, D>( deserializer: D, -) -> Result>, D::Error> +) -> Result, D::Error> where D: Deserializer<'de>, { @@ -355,7 +315,7 @@ where struct ConversionVisitor; impl<'de> Visitor<'de> for ConversionVisitor { - type Value = Vec; + type Value = String; fn expecting( &self, @@ -368,43 +328,37 @@ impl<'de> Visitor<'de> for ConversionVisitor { where E: serde::de::Error, { - conversions_from_str(v) - .map_err(|e| serde::de::Error::custom(format!("{e:#}"))) + let out = v.to_string(); + if out.is_empty() || out.ends_with('\n') { + Ok(out) + } else { + Ok(out + "\n") + } } fn visit_seq(self, seq: A) -> Result where A: serde::de::SeqAccess<'de>, { - #[derive(Deserialize)] - struct ConversionInner { - local: String, - container: String, - } - - impl From for Conversion { - fn from(value: ConversionInner) -> Self { - Self { - local: value.local, - container: value.container, - } - } - } - - let res = Vec::::deserialize( + let res = Vec::::deserialize( SeqAccessDeserializer::new(seq), - )? - .into_iter() - .map(Into::into) - .collect(); - Ok(res) + )?; + let res = res + .iter() + .map(|Conversion { local, container }| { + format!(" {local}: {container}") + }) + .collect::>() + .join("\n"); + let extra = if res.is_empty() { "" } else { "\n" }; + Ok(res + extra) } } struct OptionConversionVisitor; impl<'de> Visitor<'de> for OptionConversionVisitor { - type Value = Option>; + type Value = Option; fn expecting( &self, @@ -542,64 +496,25 @@ pub struct TerminationSignalLabel { pub label: String, } -pub fn term_signal_labels_to_string( - labels: &[TerminationSignalLabel], -) -> String { - labels - .iter() - .map(|TerminationSignalLabel { signal, label }| { - format!("{signal}={label}") - }) - .collect::>() - .join("\n") -} - pub fn term_signal_labels_from_str( - value: &str, + input: &str, ) -> anyhow::Result> { - let trimmed = value.trim(); - if trimmed.is_empty() { - return Ok(Vec::new()); - } - let res = trimmed - .split('\n') - .map(|line| line.trim()) - .enumerate() - .filter(|(_, line)| { - !line.is_empty() - && !line.starts_with('#') - && !line.starts_with("//") - }) - .map(|(i, line)| { - let (signal, label) = line - .split_once('=') - .with_context(|| format!("line {i} missing assignment (=)")) - .map(|(signal, label)| { - let label = label - .split(" #") - .next() - .unwrap_or_default() - .trim() - .to_string(); - ( - signal.trim().parse::().with_context( - || format!("line {i} does not have valid signal"), - ), - label, - ) - })?; - anyhow::Ok(TerminationSignalLabel { - signal: signal?, - label, + parse_key_value_list(input).and_then(|list| { + list + .into_iter() + .map(|(signal, label)| { + anyhow::Ok(TerminationSignalLabel { + signal: signal.parse()?, + label, + }) }) - }) - .collect::>>()?; - Ok(res) + .collect() + }) } pub fn term_labels_deserializer<'de, D>( deserializer: D, -) -> Result, D::Error> +) -> Result where D: Deserializer<'de>, { @@ -608,7 +523,7 @@ where pub fn option_term_labels_deserializer<'de, D>( deserializer: D, -) -> Result>, D::Error> +) -> Result, D::Error> where D: Deserializer<'de>, { @@ -618,7 +533,7 @@ where struct TermSignalLabelVisitor; impl<'de> Visitor<'de> for TermSignalLabelVisitor { - type Value = Vec; + type Value = String; fn expecting( &self, @@ -631,43 +546,36 @@ impl<'de> Visitor<'de> for TermSignalLabelVisitor { where E: serde::de::Error, { - term_signal_labels_from_str(v) - .map_err(|e| serde::de::Error::custom(format!("{e:#}"))) + let out = v.to_string(); + if out.is_empty() || out.ends_with('\n') { + Ok(out) + } else { + Ok(out + "\n") + } } fn visit_seq(self, seq: A) -> Result where A: serde::de::SeqAccess<'de>, { - #[derive(Deserialize)] - struct TermSignalLabelInner { - signal: TerminationSignal, - label: String, - } - - impl From for TerminationSignalLabel { - fn from(value: TermSignalLabelInner) -> Self { - Self { - signal: value.signal, - label: value.label, - } - } - } - - let res = Vec::::deserialize( + let res = Vec::::deserialize( SeqAccessDeserializer::new(seq), )? .into_iter() - .map(Into::into) - .collect(); - Ok(res) + .map(|TerminationSignalLabel { signal, label }| { + format!(" {signal}: {label}") + }) + .collect::>() + .join("\n"); + let extra = if res.is_empty() { "" } else { "\n" }; + Ok(res + extra) } } struct OptionTermSignalLabelVisitor; impl<'de> Visitor<'de> for OptionTermSignalLabelVisitor { - type Value = Option>; + type Value = Option; fn expecting( &self, diff --git a/client/core/rs/src/entities/docker/mod.rs b/client/core/rs/src/entities/docker/mod.rs index a868af677..3c5cfb4df 100644 --- a/client/core/rs/src/entities/docker/mod.rs +++ b/client/core/rs/src/entities/docker/mod.rs @@ -174,4 +174,4 @@ pub struct HealthConfig { /// The time to wait between checks in nanoseconds during the start period. It should be 0 or at least 1000000 (1 ms). 0 means inherit. #[serde(rename = "StartInterval")] pub start_interval: Option, -} \ No newline at end of file +} diff --git a/client/core/rs/src/entities/logger.rs b/client/core/rs/src/entities/logger.rs index 4df19cda6..d05e17096 100644 --- a/client/core/rs/src/entities/logger.rs +++ b/client/core/rs/src/entities/logger.rs @@ -11,7 +11,8 @@ pub struct LogConfig { pub stdio: StdioLogMode, /// Enable opentelemetry exporting - pub otlp_endpoint: Option, + #[serde(default)] + pub otlp_endpoint: String, #[serde(default = "default_opentelemetry_service_name")] pub opentelemetry_service_name: String, @@ -26,7 +27,7 @@ impl Default for LogConfig { Self { level: Default::default(), stdio: Default::default(), - otlp_endpoint: None, + otlp_endpoint: Default::default(), opentelemetry_service_name: default_opentelemetry_service_name( ), } diff --git a/client/core/rs/src/entities/mod.rs b/client/core/rs/src/entities/mod.rs index 1703b2e0e..e974944e8 100644 --- a/client/core/rs/src/entities/mod.rs +++ b/client/core/rs/src/entities/mod.rs @@ -1,10 +1,12 @@ -use std::str::FromStr; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use anyhow::Context; use async_timing_util::unix_timestamp_ms; -use build::StandardRegistryConfig; +use build::ImageRegistryConfig; use clap::Parser; -use config::core::AwsEcrConfig; use derive_empty_traits::EmptyTraits; use derive_variants::{EnumVariants, ExtractVariant}; use serde::{ @@ -18,6 +20,8 @@ use serror::Serror; use strum::{AsRefStr, Display, EnumString}; use typeshare::typeshare; +use crate::parser::parse_key_value_list; + /// Subtypes of [Alert][alert::Alert]. pub mod alert; /// Subtypes of [Alerter][alerter::Alerter]. @@ -126,42 +130,35 @@ pub fn get_image_name( config: build::BuildConfig { image_name, - image_registry, + image_registry: + ImageRegistryConfig { + domain, + account, + organization, + }, .. }, .. }: &build::Build, - aws_ecr: impl FnOnce(&String) -> Option, ) -> anyhow::Result { let name = if image_name.is_empty() { to_komodo_name(name) } else { to_komodo_name(image_name) }; - let name = match image_registry { - build::ImageRegistry::None(_) => name, - build::ImageRegistry::AwsEcr(label) => { - let AwsEcrConfig { - region, account_id, .. - } = aws_ecr(label).with_context(|| { - format!("didn't find aws ecr config for registry {label}") - })?; - format!("{account_id}.dkr.ecr.{region}.amazonaws.com/{name}") - } - build::ImageRegistry::Standard(StandardRegistryConfig { - domain, - account, - organization, - }) => { - if !organization.is_empty() { - let org = organization.to_lowercase(); - format!("{domain}/{org}/{name}") - } else if !account.is_empty() { - format!("{domain}/{account}/{name}") - } else { - name - } + let name = match ( + !domain.is_empty(), + !organization.is_empty(), + !account.is_empty(), + ) { + // If organization and account provided, name under organization. + (true, true, true) => { + format!("{domain}/{}/{name}", organization.to_lowercase()) } + // Just domain / account provided + (true, false, true) => format!("{domain}/{account}/{name}"), + // Otherwise, just use name + _ => name, }; Ok(name) } @@ -170,6 +167,7 @@ pub fn to_komodo_name(name: &str) -> String { name.to_lowercase().replace([' ', '.'], "_") } +/// Unix timestamp in milliseconds as i64 pub fn komodo_timestamp() -> i64 { unix_timestamp_ms() as i64 } @@ -352,54 +350,20 @@ pub struct EnvironmentVar { pub value: String, } -pub fn environment_vars_to_string(vars: &[EnvironmentVar]) -> String { - vars - .iter() - .map(|EnvironmentVar { variable, value }| { - format!("{variable}={value}") - }) - .collect::>() - .join("\n") -} - pub fn environment_vars_from_str( - value: &str, + input: &str, ) -> anyhow::Result> { - let trimmed = value.trim(); - if trimmed.is_empty() { - return Ok(Vec::new()); - } - let res = trimmed - .split('\n') - .map(|line| line.trim()) - .enumerate() - .filter(|(_, line)| { - !line.is_empty() - && !line.starts_with('#') - && !line.starts_with("//") - }) - .map(|(i, line)| { - let (variable, value) = line - .split_once('=') - .with_context(|| format!("line {i} missing assignment (=)")) - .map(|(variable, value)| { - let value = value - .split(" #") - .next() - .unwrap_or_default() - .trim() - .to_string(); - (variable.trim().to_string(), value) - })?; - anyhow::Ok(EnvironmentVar { variable, value }) - }) - .collect::>>()?; - Ok(res) + parse_key_value_list(input).map(|list| { + list + .into_iter() + .map(|(variable, value)| EnvironmentVar { variable, value }) + .collect() + }) } pub fn env_vars_deserializer<'de, D>( deserializer: D, -) -> Result, D::Error> +) -> Result where D: Deserializer<'de>, { @@ -408,7 +372,7 @@ where pub fn option_env_vars_deserializer<'de, D>( deserializer: D, -) -> Result>, D::Error> +) -> Result, D::Error> where D: Deserializer<'de>, { @@ -418,7 +382,7 @@ where struct EnvironmentVarVisitor; impl<'de> Visitor<'de> for EnvironmentVarVisitor { - type Value = Vec; + type Value = String; fn expecting( &self, @@ -431,43 +395,37 @@ impl<'de> Visitor<'de> for EnvironmentVarVisitor { where E: serde::de::Error, { - environment_vars_from_str(v) - .map_err(|e| serde::de::Error::custom(format!("{e:#}"))) + let out = v.to_string(); + if out.is_empty() || out.ends_with('\n') { + Ok(out) + } else { + Ok(out + "\n") + } } fn visit_seq(self, seq: A) -> Result where A: serde::de::SeqAccess<'de>, { - #[derive(Deserialize)] - struct EnvironmentVarInner { - variable: String, - value: String, - } - - impl From for EnvironmentVar { - fn from(value: EnvironmentVarInner) -> Self { - Self { - variable: value.variable, - value: value.value, - } - } - } - - let res = Vec::::deserialize( + let vars = Vec::::deserialize( SeqAccessDeserializer::new(seq), - )? - .into_iter() - .map(Into::into) - .collect(); - Ok(res) + )?; + let vars = vars + .iter() + .map(|EnvironmentVar { variable, value }| { + format!(" {variable} = {value}") + }) + .collect::>() + .join("\n"); + let extra = if vars.is_empty() { "" } else { "\n" }; + Ok(vars + extra) } } struct OptionEnvVarVisitor; impl<'de> Visitor<'de> for OptionEnvVarVisitor { - type Value = Option>; + type Value = Option; fn expecting( &self, @@ -505,6 +463,108 @@ impl<'de> Visitor<'de> for OptionEnvVarVisitor { } } +pub fn labels_deserializer<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(LabelVisitor) +} + +pub fn option_labels_deserializer<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(OptionLabelVisitor) +} + +struct LabelVisitor; + +impl<'de> Visitor<'de> for LabelVisitor { + type Value = String; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "string or Vec") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let out = v.to_string(); + if out.is_empty() || out.ends_with('\n') { + Ok(out) + } else { + Ok(out + "\n") + } + } + + fn visit_seq(self, seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + let vars = Vec::::deserialize( + SeqAccessDeserializer::new(seq), + )?; + let vars = vars + .iter() + .map(|EnvironmentVar { variable, value }| { + format!(" {variable}: {value}") + }) + .collect::>() + .join("\n"); + let extra = if vars.is_empty() { "" } else { "\n" }; + Ok(vars + extra) + } +} + +struct OptionLabelVisitor; + +impl<'de> Visitor<'de> for OptionLabelVisitor { + type Value = Option; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "null or string or Vec") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + LabelVisitor.visit_str(v).map(Some) + } + + fn visit_seq(self, seq: A) -> Result + where + A: serde::de::SeqAccess<'de>, + { + LabelVisitor.visit_seq(seq).map(Some) + } + + fn visit_none(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } + + fn visit_unit(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } +} + #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LatestCommit { @@ -512,19 +572,28 @@ pub struct LatestCommit { pub message: String, } +#[typeshare] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct FileContents { + /// The path of the file on the host + pub path: String, + /// The contents of the file + pub contents: String, +} + #[typeshare] #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct CloneArgs { /// Resource name (eg Build name, Repo name) pub name: String, /// Git provider domain. Default: `github.com` - pub provider: Option, + pub provider: String, /// Use https (vs http). pub https: bool, /// Full repo identifier. / pub repo: Option, /// Git Branch. Default: `main` - pub branch: Option, + pub branch: String, /// Specific commit hash. Optional pub commit: Option, /// The clone destination path @@ -537,17 +606,66 @@ pub struct CloneArgs { pub account: Option, } +impl CloneArgs { + pub fn path(&self, repo_dir: &Path) -> PathBuf { + let path = match &self.destination { + Some(destination) => PathBuf::from(&destination), + None => repo_dir.join(&to_komodo_name(&self.name)), + }; + path.components().collect::() + } + + pub fn remote_url( + &self, + access_token: Option<&str>, + ) -> anyhow::Result { + let access_token_at = match &access_token { + Some(token) => format!("{token}@"), + None => String::new(), + }; + let protocol = if self.https { "https" } else { "http" }; + let repo = self + .repo + .as_ref() + .context("resource has no repo attached")?; + Ok(format!( + "{protocol}://{access_token_at}{}/{repo}.git", + self.provider + )) + } + + pub fn unique_path( + &self, + repo_dir: &Path, + ) -> anyhow::Result { + let repo = self + .repo + .as_ref() + .context("resource has no repo attached")?; + let res = repo_dir + .join(self.provider.replace('/', "-")) + .join(repo.replace('/', "-")) + .join(self.branch.replace('/', "-")) + .join( + self.commit.as_ref().map(String::as_str).unwrap_or("latest"), + ); + Ok(res) + } +} + impl From<&self::build::Build> for CloneArgs { fn from(build: &self::build::Build) -> CloneArgs { CloneArgs { name: build.name.clone(), + provider: optional_string(&build.config.git_provider) + .unwrap_or_else(|| String::from("github.com")), repo: optional_string(&build.config.repo), - branch: optional_string(&build.config.branch), + branch: optional_string(&build.config.branch) + .unwrap_or_else(|| String::from("main")), commit: optional_string(&build.config.commit), destination: None, on_clone: build.config.pre_build.clone().into_option(), on_pull: None, - provider: optional_string(&build.config.git_provider), https: build.config.git_https, account: optional_string(&build.config.git_account), } @@ -558,13 +676,15 @@ impl From<&self::repo::Repo> for CloneArgs { fn from(repo: &self::repo::Repo) -> CloneArgs { CloneArgs { name: repo.name.clone(), + provider: optional_string(&repo.config.git_provider) + .unwrap_or_else(|| String::from("github.com")), repo: optional_string(&repo.config.repo), - branch: optional_string(&repo.config.branch), + branch: optional_string(&repo.config.branch) + .unwrap_or_else(|| String::from("main")), commit: optional_string(&repo.config.commit), destination: optional_string(&repo.config.path), on_clone: repo.config.on_clone.clone().into_option(), on_pull: repo.config.on_pull.clone().into_option(), - provider: optional_string(&repo.config.git_provider), https: repo.config.git_https, account: optional_string(&repo.config.git_account), } @@ -575,13 +695,15 @@ impl From<&self::sync::ResourceSync> for CloneArgs { fn from(sync: &self::sync::ResourceSync) -> Self { CloneArgs { name: sync.name.clone(), + provider: optional_string(&sync.config.git_provider) + .unwrap_or_else(|| String::from("github.com")), repo: optional_string(&sync.config.repo), - branch: optional_string(&sync.config.branch), + branch: optional_string(&sync.config.branch) + .unwrap_or_else(|| String::from("main")), commit: optional_string(&sync.config.commit), destination: None, on_clone: None, on_pull: None, - provider: optional_string(&sync.config.git_provider), https: sync.config.git_https, account: optional_string(&sync.config.git_account), } @@ -592,13 +714,15 @@ impl From<&self::stack::Stack> for CloneArgs { fn from(stack: &self::stack::Stack) -> Self { CloneArgs { name: stack.name.clone(), + provider: optional_string(&stack.config.git_provider) + .unwrap_or_else(|| String::from("github.com")), repo: optional_string(&stack.config.repo), - branch: optional_string(&stack.config.branch), + branch: optional_string(&stack.config.branch) + .unwrap_or_else(|| String::from("main")), commit: optional_string(&stack.config.commit), destination: None, on_clone: None, on_pull: None, - provider: optional_string(&stack.config.git_provider), https: stack.config.git_https, account: optional_string(&stack.config.git_account), } @@ -746,68 +870,12 @@ pub enum Operation { PruneBuildx, PruneSystem, - // build - CreateBuild, - UpdateBuild, - DeleteBuild, - RunBuild, - CancelBuild, - - // builder - CreateBuilder, - UpdateBuilder, - DeleteBuilder, - - // deployment - CreateDeployment, - UpdateDeployment, - DeleteDeployment, - Deploy, - StartDeployment, - RestartDeployment, - PauseDeployment, - UnpauseDeployment, - StopDeployment, - DestroyDeployment, - RenameDeployment, - - // repo - CreateRepo, - UpdateRepo, - DeleteRepo, - CloneRepo, - PullRepo, - BuildRepo, - CancelRepoBuild, - - // alerter - CreateAlerter, - UpdateAlerter, - DeleteAlerter, - - // procedure - CreateProcedure, - UpdateProcedure, - DeleteProcedure, - RunProcedure, - - // server template - CreateServerTemplate, - UpdateServerTemplate, - DeleteServerTemplate, - LaunchServer, - - // sync - CreateResourceSync, - UpdateResourceSync, - DeleteResourceSync, - RunSync, - // stack CreateStack, UpdateStack, RenameStack, DeleteStack, + WriteStackContents, RefreshStackCache, DeployStack, StartStack, @@ -824,6 +892,64 @@ pub enum Operation { UnpauseStackService, StopStackService, + // deployment + CreateDeployment, + UpdateDeployment, + DeleteDeployment, + Deploy, + StartDeployment, + RestartDeployment, + PauseDeployment, + UnpauseDeployment, + StopDeployment, + DestroyDeployment, + RenameDeployment, + + // build + CreateBuild, + UpdateBuild, + DeleteBuild, + RunBuild, + CancelBuild, + + // repo + CreateRepo, + UpdateRepo, + DeleteRepo, + CloneRepo, + PullRepo, + BuildRepo, + CancelRepoBuild, + + // procedure + CreateProcedure, + UpdateProcedure, + DeleteProcedure, + RunProcedure, + + // builder + CreateBuilder, + UpdateBuilder, + DeleteBuilder, + + // alerter + CreateAlerter, + UpdateAlerter, + DeleteAlerter, + + // server template + CreateServerTemplate, + UpdateServerTemplate, + DeleteServerTemplate, + LaunchServer, + + // sync + CreateResourceSync, + UpdateResourceSync, + DeleteResourceSync, + CommitSync, + RunSync, + // variable CreateVariable, UpdateVariableValue, @@ -918,16 +1044,16 @@ pub enum TerminationSignal { #[serde(tag = "type", content = "id")] pub enum ResourceTarget { System(String), - Build(String), - Builder(String), - Deployment(String), Server(String), + Stack(String), + Deployment(String), + Build(String), Repo(String), - Alerter(String), Procedure(String), + Builder(String), + Alerter(String), ServerTemplate(String), ResourceSync(String), - Stack(String), } impl ResourceTarget { @@ -936,16 +1062,16 @@ impl ResourceTarget { ) -> (ResourceTargetVariant, &String) { let id = match &self { ResourceTarget::System(id) => id, + ResourceTarget::Server(id) => id, + ResourceTarget::Stack(id) => id, ResourceTarget::Build(id) => id, ResourceTarget::Builder(id) => id, ResourceTarget::Deployment(id) => id, - ResourceTarget::Server(id) => id, ResourceTarget::Repo(id) => id, ResourceTarget::Alerter(id) => id, ResourceTarget::Procedure(id) => id, ResourceTarget::ServerTemplate(id) => id, ResourceTarget::ResourceSync(id) => id, - ResourceTarget::Stack(id) => id, }; (self.extract_variant(), id) } @@ -1020,3 +1146,101 @@ impl From<&stack::Stack> for ResourceTarget { Self::Stack(resource_sync.id.clone()) } } + +impl ResourceTargetVariant { + /// These need to use snake case + pub fn toml_header(&self) -> &'static str { + match self { + ResourceTargetVariant::System => "system", + ResourceTargetVariant::Build => "build", + ResourceTargetVariant::Builder => "builder", + ResourceTargetVariant::Deployment => "deployment", + ResourceTargetVariant::Server => "server", + ResourceTargetVariant::Repo => "repo", + ResourceTargetVariant::Alerter => "alerter", + ResourceTargetVariant::Procedure => "procedure", + ResourceTargetVariant::ServerTemplate => "server_template", + ResourceTargetVariant::ResourceSync => "resource_sync", + ResourceTargetVariant::Stack => "stack", + } + } +} + +/// Using this ensures the file contents end with trailing '\n' +pub fn file_contents_deserializer<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(FileContentsVisitor) +} + +/// Using this ensures the file contents end with trailing '\n' +pub fn option_file_contents_deserializer<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + deserializer.deserialize_any(OptionFileContentsVisitor) +} + +struct FileContentsVisitor; + +impl<'de> Visitor<'de> for FileContentsVisitor { + type Value = String; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let out = v.to_string(); + if out.is_empty() || out.ends_with('\n') { + Ok(out) + } else { + Ok(out + "\n") + } + } +} + +struct OptionFileContentsVisitor; + +impl<'de> Visitor<'de> for OptionFileContentsVisitor { + type Value = Option; + + fn expecting( + &self, + formatter: &mut std::fmt::Formatter, + ) -> std::fmt::Result { + write!(formatter, "null or string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + FileContentsVisitor.visit_str(v).map(Some) + } + + fn visit_none(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } + + fn visit_unit(self) -> Result + where + E: serde::de::Error, + { + Ok(None) + } +} diff --git a/client/core/rs/src/entities/permission.rs b/client/core/rs/src/entities/permission.rs index 17918fb9a..4a44879a1 100644 --- a/client/core/rs/src/entities/permission.rs +++ b/client/core/rs/src/entities/permission.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use strum::{AsRefStr, Display, EnumString}; use typeshare::typeshare; -use super::{ResourceTarget, MongoId}; +use super::{MongoId, ResourceTarget}; /// Representation of a User or UserGroups permission on a resource. #[typeshare] diff --git a/client/core/rs/src/entities/repo.rs b/client/core/rs/src/entities/repo.rs index 05d66af18..865ca4b11 100644 --- a/client/core/rs/src/entities/repo.rs +++ b/client/core/rs/src/entities/repo.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use bson::{doc, Document}; use derive_builder::Builder; use derive_default_builder::DefaultBuilder; @@ -9,6 +10,7 @@ use typeshare::typeshare; use crate::entities::I64; use super::{ + environment_vars_from_str, resource::{Resource, ResourceListItem, ResourceQuery}, EnvironmentVar, SystemCommand, }; @@ -96,12 +98,14 @@ pub type _PartialRepoConfig = PartialRepoConfig; #[partial(skip_serializing_none, from, diff)] pub struct RepoConfig { /// The server to clone the repo on. - #[serde(default)] + #[serde(default, alias = "server")] + #[partial_attr(serde(alias = "server"))] #[builder(default)] pub server_id: String, /// Attach a builder to 'build' the repo. - #[serde(default)] + #[serde(default, alias = "builder")] + #[partial_attr(serde(alias = "builder"))] #[builder(default)] pub builder_id: String, @@ -192,7 +196,7 @@ pub struct RepoConfig { deserialize_with = "super::option_env_vars_deserializer" ))] #[builder(default)] - pub environment: Vec, + pub environment: String, /// The name of the written environment file before `docker compose up`. /// Relative to the repo root. @@ -212,6 +216,11 @@ impl RepoConfig { pub fn builder() -> RepoConfigBuilder { RepoConfigBuilder::default() } + + pub fn env_vars(&self) -> anyhow::Result> { + environment_vars_from_str(&self.environment) + .context("Invalid environment") + } } fn default_git_provider() -> String { diff --git a/client/core/rs/src/entities/server.rs b/client/core/rs/src/entities/server.rs index 3489208e7..d2616ad44 100644 --- a/client/core/rs/src/entities/server.rs +++ b/client/core/rs/src/entities/server.rs @@ -156,7 +156,7 @@ impl ServerConfig { } fn default_address() -> String { - String::from("http://localhost:8120") + String::from("https://periphery:8120") } fn default_enabled() -> bool { diff --git a/client/core/rs/src/entities/server_template/aws.rs b/client/core/rs/src/entities/server_template/aws.rs index 3db335f0d..e74452aba 100644 --- a/client/core/rs/src/entities/server_template/aws.rs +++ b/client/core/rs/src/entities/server_template/aws.rs @@ -50,11 +50,11 @@ pub struct AwsServerTemplateConfig { #[builder(default = "default_port()")] #[partial_default(default_port())] pub port: i32, - /// The user data to deploy the instance with. - #[serde(default = "default_user_data")] - #[builder(default = "default_user_data()")] - #[partial_default(default_user_data())] - pub user_data: String, + /// Whether Periphery will be running on https + #[serde(default = "default_use_https")] + #[builder(default = "default_use_https()")] + #[partial_default(default_use_https())] + pub use_https: bool, /// The security groups to give to the instance. #[serde(default)] #[builder(default)] @@ -64,6 +64,11 @@ pub struct AwsServerTemplateConfig { #[builder(default = "default_volumes()")] #[partial_default(default_volumes())] pub volumes: Vec, + /// The user data to deploy the instance with. + #[serde(default = "default_user_data")] + #[builder(default = "default_user_data()")] + #[partial_default(default_user_data())] + pub user_data: String, } impl AwsServerTemplateConfig { @@ -102,6 +107,10 @@ fn default_port() -> i32 { 8120 } +fn default_use_https() -> bool { + true +} + fn default_user_data() -> String { String::from("#!/bin/bash apt update @@ -121,6 +130,7 @@ impl Default for AwsServerTemplateConfig { assign_public_ip: default_assign_public_ip(), use_public_ip: default_use_public_ip(), port: default_port(), + use_https: default_use_https(), volumes: default_volumes(), ami_id: Default::default(), subnet_id: Default::default(), @@ -190,7 +200,8 @@ impl AwsServerTemplateConfig { assign_public_ip: value.assign_public_ip, use_public_ip: value.use_public_ip, port: value.port, - user_data: Default::default(), + use_https: value.use_https, + user_data: value.user_data.clone(), } } } diff --git a/client/core/rs/src/entities/server_template/hetzner.rs b/client/core/rs/src/entities/server_template/hetzner.rs index e6a46a9d4..92ce22dee 100644 --- a/client/core/rs/src/entities/server_template/hetzner.rs +++ b/client/core/rs/src/entities/server_template/hetzner.rs @@ -19,34 +19,19 @@ pub type _PartialHetznerServerTemplateConfig = #[partial(skip_serializing_none, from, diff)] pub struct HetznerServerTemplateConfig { /// ID or name of the Image the Server is created from - #[serde(default)] - #[builder(default)] + #[serde(default = "default_image")] + #[builder(default = "default_image()")] + #[partial_default(default_image())] pub image: String, /// ID or name of Datacenter to create Server in #[serde(default)] #[builder(default)] pub datacenter: HetznerDatacenter, - /// Network IDs which should be attached to the Server private network interface at the creation time - #[serde(default)] - #[builder(default)] - pub private_network_ids: Vec, /// ID of the Placement Group the server should be in, /// Or 0 to not use placement group. #[serde(default)] #[builder(default)] pub placement_group: I64, - /// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. - #[serde(default)] - #[builder(default)] - pub enable_public_ipv4: bool, - /// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. - #[serde(default)] - #[builder(default)] - pub enable_public_ipv6: bool, - /// The firewalls to attach to the instance - #[serde(default)] - #[builder(default)] - pub firewall_ids: Vec, /// ID or name of the Server type this Server should be created with #[serde(default)] #[builder(default)] @@ -55,15 +40,37 @@ pub struct HetznerServerTemplateConfig { #[serde(default)] #[builder(default)] pub ssh_keys: Vec, - /// Cloud-Init user data to use during Server creation. This field is limited to 32KiB. - #[serde(default = "default_user_data")] - #[builder(default = "default_user_data()")] - #[partial_default(default_user_data())] - pub user_data: String, + /// Network IDs which should be attached to the Server private network interface at the creation time + #[serde(default)] + #[builder(default)] + pub private_network_ids: Vec, + /// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. + #[serde(default)] + #[builder(default)] + pub enable_public_ipv4: bool, + /// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. + #[serde(default)] + #[builder(default)] + pub enable_public_ipv6: bool, /// Connect to the instance using it's public ip. #[serde(default)] #[builder(default)] pub use_public_ip: bool, + /// The port periphery will be running on in AMI. + /// Default: `8120` + #[serde(default = "default_port")] + #[builder(default = "default_port()")] + #[partial_default(default_port())] + pub port: i32, + /// Whether Periphery will be running on https + #[serde(default = "default_use_https")] + #[builder(default = "default_use_https()")] + #[partial_default(default_use_https())] + pub use_https: bool, + /// The firewalls to attach to the instance + #[serde(default)] + #[builder(default)] + pub firewall_ids: Vec, /// Labels for the server #[serde(default)] #[builder(default)] @@ -72,12 +79,11 @@ pub struct HetznerServerTemplateConfig { #[serde(default)] #[builder(default)] pub volumes: Vec, - /// The port periphery will be running on in AMI. - /// Default: `8120` - #[serde(default = "default_port")] - #[builder(default = "default_port()")] - #[partial_default(default_port())] - pub port: i32, + /// Cloud-Init user data to use during Server creation. This field is limited to 32KiB. + #[serde(default = "default_user_data")] + #[builder(default = "default_user_data()")] + #[partial_default(default_user_data())] + pub user_data: String, } impl HetznerServerTemplateConfig { @@ -86,10 +92,18 @@ impl HetznerServerTemplateConfig { } } +fn default_image() -> String { + String::from("ubuntu-24.04") +} + fn default_port() -> i32 { 8120 } +fn default_use_https() -> bool { + true +} + fn default_user_data() -> String { String::from("#cloud-config runcmd: @@ -106,7 +120,8 @@ impl Default for HetznerServerTemplateConfig { fn default() -> Self { Self { port: default_port(), - image: Default::default(), + use_https: default_use_https(), + image: default_image(), datacenter: Default::default(), private_network_ids: Default::default(), placement_group: Default::default(), diff --git a/client/core/rs/src/entities/server_template/mod.rs b/client/core/rs/src/entities/server_template/mod.rs index 89905c83b..a26c11d8d 100644 --- a/client/core/rs/src/entities/server_template/mod.rs +++ b/client/core/rs/src/entities/server_template/mod.rs @@ -74,8 +74,10 @@ impl Default for ServerTemplateConfig { )] #[serde(tag = "type", content = "params")] pub enum PartialServerTemplateConfig { - Aws(aws::_PartialAwsServerTemplateConfig), - Hetzner(hetzner::_PartialHetznerServerTemplateConfig), + Aws(#[serde(default)] aws::_PartialAwsServerTemplateConfig), + Hetzner( + #[serde(default)] hetzner::_PartialHetznerServerTemplateConfig, + ), } impl Default for PartialServerTemplateConfig { @@ -233,6 +235,7 @@ impl MergePartial for ServerTemplateConfig { .use_public_ip .unwrap_or(config.use_public_ip), port: partial.port.unwrap_or(config.port), + use_https: partial.use_https.unwrap_or(config.use_https), user_data: partial.user_data.unwrap_or(config.user_data), }; ServerTemplateConfig::Aws(config) @@ -274,6 +277,7 @@ impl MergePartial for ServerTemplateConfig { labels: partial.labels.unwrap_or(config.labels), volumes: partial.volumes.unwrap_or(config.volumes), port: partial.port.unwrap_or(config.port), + use_https: partial.use_https.unwrap_or(config.use_https), }; ServerTemplateConfig::Hetzner(config) } diff --git a/client/core/rs/src/entities/stack.rs b/client/core/rs/src/entities/stack.rs index c1a9b975c..71bf12793 100644 --- a/client/core/rs/src/entities/stack.rs +++ b/client/core/rs/src/entities/stack.rs @@ -11,7 +11,7 @@ use typeshare::typeshare; use super::{ docker::container::ContainerListItem, resource::{Resource, ResourceListItem, ResourceQuery}, - to_komodo_name, EnvironmentVar, + to_komodo_name, FileContents, }; #[typeshare] @@ -57,6 +57,10 @@ pub type StackListItem = ResourceListItem; pub struct StackListItemInfo { /// The server that stack is deployed on. pub server_id: String, + /// Whether stack is using files on host mode + pub files_on_host: bool, + /// Whether stack has file contents defined. + pub file_contents: bool, /// The git provider domain pub git_provider: String, /// The configured repo @@ -143,7 +147,7 @@ pub struct StackInfo { /// Deployed commit message, or null. Only for repo based stacks pub deployed_message: Option, /// The deployed compose file contents. This is updated whenever Komodo successfully deploys the stack. - pub deployed_contents: Option>, + pub deployed_contents: Option>, /// The deployed service names. /// This is updated whenever it is empty, or deployed contents is updated. pub deployed_services: Option>, @@ -156,9 +160,9 @@ pub struct StackInfo { /// The remote compose file contents, whether on host or in repo. /// This is updated whenever Komodo refreshes the stack cache. /// It will be empty if the file is defined directly in the stack config. - pub remote_contents: Option>, + pub remote_contents: Option>, /// If there was an error in getting the remote contents, it will be here. - pub remote_errors: Option>, + pub remote_errors: Option>, /// Latest commit hash, or null pub latest_hash: Option, @@ -176,10 +180,16 @@ pub type _PartialStackConfig = PartialStackConfig; #[partial(skip_serializing_none, from, diff)] pub struct StackConfig { /// The server to deploy the stack on. - #[serde(default)] + #[serde(default, alias = "server")] + #[partial_attr(serde(alias = "server"))] #[builder(default)] pub server_id: String, + /// Configure quick links that are displayed in the resource header + #[serde(default)] + #[builder(default)] + pub links: Vec, + /// Optionally specify a custom project name for the stack. /// If this is empty string, it will default to the stack name. /// Used with `docker compose -p {project_name}`. @@ -189,48 +199,6 @@ pub struct StackConfig { #[builder(default)] pub project_name: String, - /// Directory to change to (`cd`) before running `docker compose up -d`. - /// Default: `./` (the repo root) - #[serde(default = "default_run_directory")] - #[builder(default = "default_run_directory()")] - #[partial_default(default_run_directory())] - pub run_directory: String, - - /// Add paths to compose files, relative to the run path. - /// If this is empty, will use file `compose.yaml`. - #[serde(default)] - #[builder(default)] - pub file_paths: Vec, - - /// If this is checked, the stack will source the files on the host. - /// Use `run_directory` and `file_paths` to specify the path on the host. - /// This is useful for those who wish to setup their files on the host using SSH or similar, - /// rather than defining the contents in UI or in a git repo. - #[serde(default)] - #[builder(default)] - pub files_on_host: bool, - - /// Used with `registry_account` to login to a registry before docker compose up. - #[serde(default)] - #[builder(default)] - pub registry_provider: String, - - /// Used with `registry_provider` to login to a registry before docker compose up. - #[serde(default)] - #[builder(default)] - pub registry_account: String, - - /// The extra arguments to pass after `docker compose up -d`. - /// If empty, no extra arguments will be passed. - #[serde(default)] - #[builder(default)] - pub extra_args: Vec, - - /// Whether to skip secret interpolation into the stack environment variables. - #[serde(default)] - #[builder(default)] - pub skip_secret_interp: bool, - /// Whether to automatically `compose pull` before redeploying stack. /// Ensured latest images are deployed. /// Will fail if the compose file specifies a locally build image. @@ -245,19 +213,37 @@ pub struct StackConfig { #[builder(default)] pub run_build: bool, - /// The extra arguments to pass after `docker compose build`. - /// If empty, no extra build arguments will be passed. - /// Only used if `run_build: true` + /// Whether to skip secret interpolation into the stack environment variables. #[serde(default)] #[builder(default)] - pub build_extra_args: Vec, + pub skip_secret_interp: bool, - /// Ignore certain services declared in the compose file when checking - /// the stack status. For example, an init service might be exited, but the - /// stack should be healthy. This init service should be in `ignore_services` + /// If this is checked, the stack will source the files on the host. + /// Use `run_directory` and `file_paths` to specify the path on the host. + /// This is useful for those who wish to setup their files on the host using SSH or similar, + /// rather than defining the contents in UI or in a git repo. #[serde(default)] #[builder(default)] - pub ignore_services: Vec, + pub files_on_host: bool, + + /// Directory to change to (`cd`) before running `docker compose up -d`. + #[serde(default)] + #[builder(default)] + pub run_directory: String, + + /// Add paths to compose files, relative to the run path. + /// If this is empty, will use file `compose.yaml`. + #[serde(default)] + #[builder(default)] + pub file_paths: Vec, + + /// The name of the written environment file before `docker compose up`. + /// Relative to the repo root. + /// Default: .env + #[serde(default = "default_env_file_path")] + #[builder(default = "default_env_file_path()")] + #[partial_default(default_env_file_path())] + pub env_file_path: String, /// The git provider domain. Default: github.com #[serde(default = "default_git_provider")] @@ -316,15 +302,47 @@ pub struct StackConfig { #[partial_default(default_send_alerts())] pub send_alerts: bool, - /// Configure quick links that are displayed in the resource header + /// Used with `registry_account` to login to a registry before docker compose up. #[serde(default)] #[builder(default)] - pub links: Vec, + pub registry_provider: String, + + /// Used with `registry_provider` to login to a registry before docker compose up. + #[serde(default)] + #[builder(default)] + pub registry_account: String, + + /// The extra arguments to pass after `docker compose up -d`. + /// If empty, no extra arguments will be passed. + #[serde(default)] + #[builder(default)] + pub extra_args: Vec, + + /// The extra arguments to pass after `docker compose build`. + /// If empty, no extra build arguments will be passed. + /// Only used if `run_build: true` + #[serde(default)] + #[builder(default)] + pub build_extra_args: Vec, + + /// Ignore certain services declared in the compose file when checking + /// the stack status. For example, an init service might be exited, but the + /// stack should be healthy. This init service should be in `ignore_services` + #[serde(default)] + #[builder(default)] + pub ignore_services: Vec, /// The contents of the file directly, for management in the UI. /// If this is empty, it will fall back to checking git config for /// repo based compose file. - #[serde(default)] + #[serde( + default, + deserialize_with = "super::file_contents_deserializer" + )] + #[partial_attr(serde( + default, + deserialize_with = "super::option_file_contents_deserializer" + ))] #[builder(default)] pub file_contents: String, @@ -342,15 +360,7 @@ pub struct StackConfig { deserialize_with = "super::option_env_vars_deserializer" ))] #[builder(default)] - pub environment: Vec, - - /// The name of the written environment file before `docker compose up`. - /// Relative to the repo root. - /// Default: .env - #[serde(default = "default_env_file_path")] - #[builder(default = "default_env_file_path()")] - #[partial_default(default_env_file_path())] - pub env_file_path: String, + pub environment: String, } impl StackConfig { @@ -379,10 +389,6 @@ fn default_branch() -> String { String::from("main") } -fn default_run_directory() -> String { - String::from("./") -} - fn default_webhook_enabled() -> bool { true } @@ -396,7 +402,7 @@ impl Default for StackConfig { Self { server_id: Default::default(), project_name: Default::default(), - run_directory: default_run_directory(), + run_directory: Default::default(), file_paths: Default::default(), files_on_host: Default::default(), registry_provider: Default::default(), @@ -435,15 +441,6 @@ pub struct ComposeProject { pub compose_files: Vec, } -#[typeshare] -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct ComposeContents { - /// The path of the file on the host - pub path: String, - /// The contents of the file - pub contents: String, -} - #[typeshare] #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct StackServiceNames { diff --git a/client/core/rs/src/entities/sync.rs b/client/core/rs/src/entities/sync.rs index 0b7ed15ab..26db03d01 100644 --- a/client/core/rs/src/entities/sync.rs +++ b/client/core/rs/src/entities/sync.rs @@ -8,7 +8,7 @@ use typeshare::typeshare; use super::{ resource::{Resource, ResourceListItem, ResourceQuery}, - I64, + FileContents, ResourceTarget, I64, }; #[typeshare] @@ -20,16 +20,24 @@ pub type ResourceSyncListItem = pub struct ResourceSyncListItemInfo { /// Unix timestamp of last sync, or 0 pub last_sync_ts: I64, - /// Short commit hash of last sync, or empty string - pub last_sync_hash: String, - /// Commit message of last sync, or empty string - pub last_sync_message: String, - /// The git provider domain + /// Whether sync is `files_on_host` mode. + pub files_on_host: bool, + /// Whether sync has file contents defined. + pub file_contents: bool, + /// Whether sync has `managed` mode enabled. + pub managed: bool, + /// Resource path to the files. + pub resource_path: String, + /// The git provider domain. pub git_provider: String, /// The Github repo used as the source of the sync resources pub repo: String, /// The branch of the repo pub branch: String, + /// Short commit hash of last sync, or empty string + pub last_sync_hash: Option, + /// Commit message of last sync, or empty string + pub last_sync_message: Option, /// State of the sync. Reflects whether most recent sync successful. pub state: ResourceSyncState, } @@ -60,102 +68,69 @@ pub type ResourceSync = #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ResourceSyncInfo { /// Unix timestamp of last applied sync + #[serde(default)] pub last_sync_ts: I64, /// Short commit hash of last applied sync - pub last_sync_hash: String, + pub last_sync_hash: Option, /// Commit message of last applied sync - pub last_sync_message: String, - /// Readable logs of pending updates - pub pending: PendingSyncUpdates, + pub last_sync_message: Option, + + /// The list of pending updates to resources + #[serde(default)] + pub resource_updates: Vec, + /// The list of pending updates to variables + #[serde(default)] + pub variable_updates: Vec, + /// The list of pending updates to user groups + #[serde(default)] + pub user_group_updates: Vec, + /// The list of pending deploys to resources. + #[serde(default)] + pub pending_deploy: SyncDeployUpdate, + /// If there is an error, it will be stored here + pub pending_error: Option, + /// The commit hash which produced these pending updates. + pub pending_hash: Option, + /// The commit message which produced these pending updates. + pub pending_message: Option, + + /// The current sync files + #[serde(default)] + pub remote_contents: Vec, + /// Any read errors in files by path + #[serde(default)] + pub remote_errors: Vec, } #[typeshare] -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct PendingSyncUpdates { - /// The commit hash which produced these pending updates - pub hash: Option, - /// The commit message which produced these pending updates - pub message: Option, - /// The data associated with the sync. Either Ok containing diffs, - /// or Err containing an error message - pub data: PendingSyncUpdatesData, +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ResourceDiff { + /// The resource target. + /// The target id will be empty if "Create" ResourceDiffType. + pub target: ResourceTarget, + /// The data associated with the diff. + pub data: DiffData, } #[typeshare] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type", content = "data")] -#[allow(clippy::large_enum_variant)] -pub enum PendingSyncUpdatesData { - Ok(PendingSyncUpdatesDataOk), - Err(PendingSyncUpdatesDataErr), -} - -impl Default for PendingSyncUpdatesData { - fn default() -> Self { - Self::Ok(Default::default()) - } -} - -#[typeshare] -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct PendingSyncUpdatesDataOk { - /// Readable log of any deploy actions that will be performed - pub deploy_updates: Option, - /// Readable log of any pending deployment updates - pub deployment_updates: Option, - /// Readable log of any pending deployment updates - pub stack_updates: Option, - /// Readable log of any pending server updates - pub server_updates: Option, - /// Readable log of any pending build updates - pub build_updates: Option, - /// Readable log of any pending repo updates - pub repo_updates: Option, - /// Readable log of any pending procedure updates - pub procedure_updates: Option, - /// Readable log of any pending alerter updates - pub alerter_updates: Option, - /// Readable log of any pending builder updates - pub builder_updates: Option, - /// Readable log of any pending server template updates - pub server_template_updates: Option, - /// Readable log of any pending resource sync updates - pub resource_sync_updates: Option, - /// Readable log of any pending variable updates - pub variable_updates: Option, - /// Readable log of any pending user group updates - pub user_group_updates: Option, -} - -impl PendingSyncUpdatesDataOk { - pub fn no_updates(&self) -> bool { - self.deploy_updates.is_none() - && self.deployment_updates.is_none() - && self.stack_updates.is_none() - && self.server_updates.is_none() - && self.build_updates.is_none() - && self.repo_updates.is_none() - && self.procedure_updates.is_none() - && self.alerter_updates.is_none() - && self.builder_updates.is_none() - && self.server_template_updates.is_none() - && self.resource_sync_updates.is_none() - && self.variable_updates.is_none() - && self.user_group_updates.is_none() - } -} - -#[typeshare] -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct SyncUpdate { - /// Resources to create - pub to_create: i32, - /// Resources to update - pub to_update: i32, - /// Resources to delete - pub to_delete: i32, - /// A readable log of all the changes to be applied - pub log: String, +pub enum DiffData { + /// Resource will be created + Create { + /// The proposed resource to create in TOML + proposed: String, + }, + Update { + /// The proposed TOML + proposed: String, + /// The current TOML + current: String, + }, + Delete { + /// The current TOML of the resource to delete + current: String, + }, } #[typeshare] @@ -167,12 +142,6 @@ pub struct SyncDeployUpdate { pub log: String, } -#[typeshare] -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PendingSyncUpdatesDataErr { - pub message: String, -} - #[typeshare(serialized_as = "Partial")] pub type _PartialResourceSyncConfig = PartialResourceSyncConfig; @@ -221,20 +190,6 @@ pub struct ResourceSyncConfig { #[builder(default)] pub git_account: String, - /// The path of the resource file(s) to sync, relative to the repo root. - /// Can be a specific file, or a directory containing multiple files / folders. - /// See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. - #[serde(default = "default_resource_path")] - #[builder(default = "default_resource_path()")] - #[partial_default(default_resource_path())] - pub resource_path: String, - - /// Whether sync should delete resources - /// not declared in the resource files - #[serde(default)] - #[builder(default)] - pub delete: bool, - /// Whether incoming webhooks actually trigger action. #[serde(default = "default_webhook_enabled")] #[builder(default = "default_webhook_enabled()")] @@ -246,6 +201,55 @@ pub struct ResourceSyncConfig { #[serde(default)] #[builder(default)] pub webhook_secret: String, + + /// Files are available on the Komodo Core host. + /// Specify the file / folder with [ResourceSyncConfig::resource_path]. + #[serde(default)] + #[builder(default)] + pub files_on_host: bool, + + /// The path of the resource file(s) to sync. + /// - If Files on Host, this is relative to the configured `sync_directory` in core config. + /// - If Git Repo based, this is relative to the root of the repo. + /// Can be a specific file, or a directory containing multiple files / folders. + /// See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. + #[serde(default = "default_resource_path")] + #[builder(default = "default_resource_path()")] + #[partial_default(default_resource_path())] + pub resource_path: String, + + /// Enable "pushes" to the file, + /// which exports resources matching tags to single file. + /// - If using `files_on_host`, it is stored in the file_contents, which must point to a .toml file path (it will be created if it doesn't exist). + /// - If using `file_contents`, it is stored in the database. + /// When using this, "delete" mode is always enabled. + #[serde(default)] + #[builder(default)] + pub managed: bool, + + /// Whether sync should delete resources + /// not declared in the resource files + #[serde(default)] + #[builder(default)] + pub delete: bool, + + /// When using `managed` resource sync, will only export resources + /// matching all of the given tags. If none, will match all resources. + #[serde(default)] + #[builder(default)] + pub match_tags: Vec, + + /// Manage the file contents in the UI. + #[serde( + default, + deserialize_with = "super::file_contents_deserializer" + )] + #[partial_attr(serde( + default, + deserialize_with = "super::option_file_contents_deserializer" + ))] + #[builder(default)] + pub file_contents: String, } impl ResourceSyncConfig { @@ -267,7 +271,7 @@ fn default_branch() -> String { } fn default_resource_path() -> String { - String::from("resources") + String::from("./resources.toml") } fn default_webhook_enabled() -> bool { @@ -284,6 +288,10 @@ impl Default for ResourceSyncConfig { commit: Default::default(), git_account: Default::default(), resource_path: default_resource_path(), + files_on_host: Default::default(), + file_contents: Default::default(), + managed: Default::default(), + match_tags: Default::default(), delete: Default::default(), webhook_enabled: default_webhook_enabled(), webhook_secret: Default::default(), diff --git a/client/core/rs/src/entities/user.rs b/client/core/rs/src/entities/user.rs index fab9d9801..08d8e936d 100644 --- a/client/core/rs/src/entities/user.rs +++ b/client/core/rs/src/entities/user.rs @@ -5,9 +5,7 @@ use typeshare::typeshare; use crate::entities::{MongoId, I64}; -use super::{ - permission::PermissionLevel, ResourceTargetVariant, -}; +use super::{permission::PermissionLevel, ResourceTargetVariant}; #[typeshare] #[derive(Serialize, Deserialize, Debug, Clone, Default)] @@ -39,6 +37,10 @@ pub struct User { #[serde(default)] pub enabled: bool, + /// Can give / take other users admin priviledges. + #[serde(default)] + pub super_admin: bool, + /// Whether the user has global admin permissions. #[serde(default)] pub admin: bool, @@ -230,6 +232,9 @@ pub enum UserConfig { /// User that logs in via Github Oauth Github { github_id: String, avatar: String }, + /// User that logs in via Oidc provider + Oidc { provider: String, user_id: String }, + /// Non-human managed user, can have it's own permissions / api keys Service { description: String }, } diff --git a/client/core/rs/src/entities/variable.rs b/client/core/rs/src/entities/variable.rs index 2a377db30..ad09db710 100644 --- a/client/core/rs/src/entities/variable.rs +++ b/client/core/rs/src/entities/variable.rs @@ -22,7 +22,7 @@ pub struct Variable { pub value: String, /// If marked as secret, the variable value will be hidden in updates / logs. /// Additionally the value will not be served in read requests by non admin users. - /// + /// /// Note that the value is NOT encrypted in the database, and will likely show up in database logs. /// The security of these variables comes down to the security /// of the database (system level encryption, network isolation, etc.) diff --git a/client/core/rs/src/lib.rs b/client/core/rs/src/lib.rs index da8adbe10..ee1fec823 100644 --- a/client/core/rs/src/lib.rs +++ b/client/core/rs/src/lib.rs @@ -37,6 +37,7 @@ pub mod busy; pub mod entities; pub mod ws; +mod parser; mod request; #[derive(Deserialize)] diff --git a/client/core/rs/src/parser.rs b/client/core/rs/src/parser.rs new file mode 100644 index 000000000..67eb82a98 --- /dev/null +++ b/client/core/rs/src/parser.rs @@ -0,0 +1,56 @@ +use anyhow::Context; + +pub fn parse_key_value_list( + input: &str, +) -> anyhow::Result> { + let trimmed = input.trim(); + if trimmed.is_empty() { + return Ok(Vec::new()); + } + trimmed + .split('\n') + .map(|line| line.trim()) + .enumerate() + .filter(|(_, line)| { + !line.is_empty() + && !line.starts_with('#') + && !line.starts_with("//") + }) + .map(|(i, line)| { + let line = line + // Remove end of line comments + .split_once(" #") + .unwrap_or((line, "")) + .0 + .trim() + // Remove preceding '-' (yaml list) + .trim_start_matches('-') + .trim(); + // Remove wrapping quotes (from yaml list) + let line = if let Some(line) = line.strip_prefix('"') { + line.strip_suffix('"').unwrap_or(line) + } else { + line + }; + // Remove any preceding '"' (from yaml list) (wrapping quotes open) + let (key, value) = line + .split_once(['=', ':']) + .with_context(|| { + format!( + "line {i} missing assignment character ('=' or ':')" + ) + }) + .map(|(key, value)| { + let value = value.trim(); + // Remove wrapping quotes around value + if let Some(value) = value.strip_prefix('"') { + value.strip_suffix('"').unwrap_or(value) + } else { + value + }; + (key.trim().to_string(), value.trim().to_string()) + })?; + anyhow::Ok((key, value)) + }) + .collect::>>() +} diff --git a/client/core/ts/src/responses.ts b/client/core/ts/src/responses.ts index 148d0b7ff..6d98217ba 100644 --- a/client/core/ts/src/responses.ts +++ b/client/core/ts/src/responses.ts @@ -18,7 +18,6 @@ export type UserResponses = { export type ReadResponses = { GetVersion: Types.GetVersionResponse; GetCoreInfo: Types.GetCoreInfoResponse; - ListAwsEcrLabels: Types.ListAwsEcrLabelsResponse; ListSecrets: Types.ListSecretsResponse; ListGitProvidersFromConfig: Types.ListGitProvidersFromConfigResponse; ListDockerRegistriesFromConfig: Types.ListDockerRegistriesFromConfigResponse; @@ -188,6 +187,7 @@ export type WriteResponses = { SetUsersInUserGroup: Types.UserGroup; // ==== PERMISSIONS ==== + UpdateUserAdmin: Types.UpdateUserAdminResponse; UpdateUserBasePermissions: Types.UpdateUserBasePermissionsResponse; UpdatePermissionOnResourceType: Types.UpdatePermissionOnResourceTypeResponse; UpdatePermissionOnTarget: Types.UpdatePermissionOnTargetResponse; @@ -258,6 +258,7 @@ export type WriteResponses = { DeleteResourceSync: Types.ResourceSync; UpdateResourceSync: Types.ResourceSync; RefreshResourceSyncPending: Types.ResourceSync; + CommitSync: Types.ResourceSync; CreateSyncWebhook: Types.CreateSyncWebhookResponse; DeleteSyncWebhook: Types.DeleteSyncWebhookResponse; @@ -267,6 +268,7 @@ export type WriteResponses = { DeleteStack: Types.Stack; UpdateStack: Types.Stack; RenameStack: Types.Update; + WriteStackFileContents: Types.Update; RefreshStackCache: Types.NoData; CreateStackWebhook: Types.CreateStackWebhookResponse; DeleteStackWebhook: Types.DeleteStackWebhookResponse; diff --git a/client/core/ts/src/types.ts b/client/core/ts/src/types.ts index 99c831834..59fa19743 100644 --- a/client/core/ts/src/types.ts +++ b/client/core/ts/src/types.ts @@ -37,6 +37,11 @@ export type UserConfig = | { type: "Github", data: { github_id: string; avatar: string; +}} + /** User that logs in via Oidc provider */ + | { type: "Oidc", data: { + provider: string; + user_id: string; }} /** Non-human managed user, can have it's own permissions / api keys */ | { type: "Service", data: { @@ -68,6 +73,8 @@ export interface User { username: string; /** Whether user is enabled / able to access the api. */ enabled?: boolean; + /** Can give / take other users admin priviledges. */ + super_admin?: boolean; /** Whether the user has global admin permissions. */ admin?: boolean; /** Whether the user has permission to create servers. */ @@ -100,16 +107,16 @@ export enum SeverityLevel { /** Used to reference a specific resource across all resource types */ export type ResourceTarget = | { type: "System", id: string } - | { type: "Build", id: string } - | { type: "Builder", id: string } - | { type: "Deployment", id: string } | { type: "Server", id: string } + | { type: "Stack", id: string } + | { type: "Deployment", id: string } + | { type: "Build", id: string } | { type: "Repo", id: string } - | { type: "Alerter", id: string } | { type: "Procedure", id: string } + | { type: "Builder", id: string } + | { type: "Alerter", id: string } | { type: "ServerTemplate", id: string } - | { type: "ResourceSync", id: string } - | { type: "Stack", id: string }; + | { type: "ResourceSync", id: string }; /** The variants of data related to the alert. */ export type AlertData = @@ -283,8 +290,10 @@ export interface Resource { export type AlerterEndpoint = /** Send alert serialized to JSON to an http endpoint. */ | { type: "Custom", params: CustomAlerterEndpoint } - /** Send alert to a slack app */ - | { type: "Slack", params: SlackAlerterEndpoint }; + /** Send alert to a Slack app */ + | { type: "Slack", params: SlackAlerterEndpoint } + /** Send alert to a Discord app */ + | { type: "Discord", params: DiscordAlerterEndpoint }; export interface AlerterConfig { /** Whether the alerter is enabled */ @@ -350,22 +359,20 @@ export interface SystemCommand { command?: string; } -/** Configuration for the registry to push the built image to. */ -export type ImageRegistry = - /** Don't push the image to any registry */ - | { type: "None", params: NoData } - /** Push the image to a standard image registry (any domain) */ - | { type: "Standard", params: StandardRegistryConfig } +/** Configuration for an image registry */ +export interface ImageRegistryConfig { /** - * Push the image to Aws Elastic Container Registry - * - * The string held in 'params' should match a label of an `aws_ecr_registry` in the core config. + * Specify the registry provider domain, eg `docker.io`. + * If not provided, will not push to any registry. */ - | { type: "AwsEcr", params: string }; - -export interface EnvironmentVar { - variable: string; - value: string; + domain?: string; + /** Specify an account to use with the registry. */ + account?: string; + /** + * Optional. Specify an organization to push the image under. + * Empty string means no organization. + */ + organization?: string; } /** The build configuration. */ @@ -432,7 +439,7 @@ export interface BuildConfig { /** The optional command run after repo clone and before docker build. */ pre_build?: SystemCommand; /** Configuration for the registry to push the built image to. */ - image_registry?: ImageRegistry; + image_registry?: ImageRegistryConfig; /** * The path of the docker build context relative to the root of the repo. * Default: "." (the root of the repo). @@ -451,12 +458,12 @@ export interface BuildConfig { * * These values are visible in the final image by running `docker inspect`. */ - build_args?: EnvironmentVar[] | string; + build_args?: string; /** * Secret arguments. * * These values remain hidden in the final image by using - * docker secret mounts. See ``. + * docker secret mounts. See [https://docs.docker.com/build/building/secrets]. * * The values can be used in RUN commands: * ``` @@ -464,9 +471,9 @@ export interface BuildConfig { * SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ... * ``` */ - secret_args?: EnvironmentVar[] | string; + secret_args?: string; /** Docker labels */ - labels?: EnvironmentVar[] | string; + labels?: string; } export interface BuildInfo { @@ -595,18 +602,6 @@ export enum TerminationSignal { SigTerm = "SIGTERM", } -export interface TerminationSignalLabel { - signal: TerminationSignal; - label: string; -} - -export interface Conversion { - /** reference on the server. */ - local: string; - /** reference in the container. */ - container: string; -} - export interface DeploymentConfig { /** The id of server the deployment is deployed on. */ server_id?: string; @@ -659,22 +654,22 @@ export interface DeploymentConfig { * Labels attached to various termination signal options. * Used to specify different shutdown functionality depending on the termination signal. */ - term_signal_labels: TerminationSignalLabel[]; + term_signal_labels?: string; /** * The container port mapping. * Irrelevant if container network is `host`. * Maps ports on host to ports on container. */ - ports?: Conversion[]; + ports?: string; /** * The container volume mapping. * Maps files / folders on host to files / folders in container. */ - volumes?: Conversion[]; + volumes?: string; /** The environment variables passed to the container. */ - environment?: EnvironmentVar[] | string; + environment?: string; /** The docker labels given to the container. */ - labels?: EnvironmentVar[] | string; + labels?: string; } export type Deployment = Resource; @@ -801,8 +796,6 @@ export interface DockerRegistry { export type ListDockerRegistriesFromConfigResponse = DockerRegistry[]; -export type ListAwsEcrLabelsResponse = string[]; - export type ListSecretsResponse = string[]; export type UserTarget = @@ -1064,7 +1057,7 @@ export interface RepoConfig { * * If it is empty, no file will be written. */ - environment?: EnvironmentVar[] | string; + environment?: string; /** * The name of the written environment file before `docker compose up`. * Relative to the repo root. @@ -2307,6 +2300,8 @@ export type ListFullServerTemplatesResponse = ServerTemplate[]; export interface StackConfig { /** The server to deploy the stack on. */ server_id?: string; + /** Configure quick links that are displayed in the resource header */ + links?: string[]; /** * Optionally specify a custom project name for the stack. * If this is empty string, it will default to the stack name. @@ -2315,34 +2310,6 @@ export interface StackConfig { * Note. Can be used to import pre-existing stacks. */ project_name?: string; - /** - * Directory to change to (`cd`) before running `docker compose up -d`. - * Default: `./` (the repo root) - */ - run_directory: string; - /** - * Add paths to compose files, relative to the run path. - * If this is empty, will use file `compose.yaml`. - */ - file_paths?: string[]; - /** - * If this is checked, the stack will source the files on the host. - * Use `run_directory` and `file_paths` to specify the path on the host. - * This is useful for those who wish to setup their files on the host using SSH or similar, - * rather than defining the contents in UI or in a git repo. - */ - files_on_host?: boolean; - /** Used with `registry_account` to login to a registry before docker compose up. */ - registry_provider?: string; - /** Used with `registry_provider` to login to a registry before docker compose up. */ - registry_account?: string; - /** - * The extra arguments to pass after `docker compose up -d`. - * If empty, no extra arguments will be passed. - */ - extra_args?: string[]; - /** Whether to skip secret interpolation into the stack environment variables. */ - skip_secret_interp?: boolean; /** * Whether to automatically `compose pull` before redeploying stack. * Ensured latest images are deployed. @@ -2354,18 +2321,28 @@ export interface StackConfig { * Combine with build_extra_args for custom behaviors. */ run_build?: boolean; + /** Whether to skip secret interpolation into the stack environment variables. */ + skip_secret_interp?: boolean; /** - * The extra arguments to pass after `docker compose build`. - * If empty, no extra build arguments will be passed. - * Only used if `run_build: true` + * If this is checked, the stack will source the files on the host. + * Use `run_directory` and `file_paths` to specify the path on the host. + * This is useful for those who wish to setup their files on the host using SSH or similar, + * rather than defining the contents in UI or in a git repo. */ - build_extra_args?: string[]; + files_on_host?: boolean; + /** Directory to change to (`cd`) before running `docker compose up -d`. */ + run_directory?: string; /** - * Ignore certain services declared in the compose file when checking - * the stack status. For example, an init service might be exited, but the - * stack should be healthy. This init service should be in `ignore_services` + * Add paths to compose files, relative to the run path. + * If this is empty, will use file `compose.yaml`. */ - ignore_services?: string[]; + file_paths?: string[]; + /** + * The name of the written environment file before `docker compose up`. + * Relative to the repo root. + * Default: .env + */ + env_file_path: string; /** The git provider domain. Default: github.com */ git_provider: string; /** @@ -2397,8 +2374,27 @@ export interface StackConfig { webhook_secret?: string; /** Whether to send StackStateChange alerts for this stack. */ send_alerts: boolean; - /** Configure quick links that are displayed in the resource header */ - links?: string[]; + /** Used with `registry_account` to login to a registry before docker compose up. */ + registry_provider?: string; + /** Used with `registry_provider` to login to a registry before docker compose up. */ + registry_account?: string; + /** + * The extra arguments to pass after `docker compose up -d`. + * If empty, no extra arguments will be passed. + */ + extra_args?: string[]; + /** + * The extra arguments to pass after `docker compose build`. + * If empty, no extra build arguments will be passed. + * Only used if `run_build: true` + */ + build_extra_args?: string[]; + /** + * Ignore certain services declared in the compose file when checking + * the stack status. For example, an init service might be exited, but the + * stack should be healthy. This init service should be in `ignore_services` + */ + ignore_services?: string[]; /** * The contents of the file directly, for management in the UI. * If this is empty, it will fall back to checking git config for @@ -2412,16 +2408,10 @@ export interface StackConfig { * * If it is empty, no file will be written. */ - environment?: EnvironmentVar[] | string; - /** - * The name of the written environment file before `docker compose up`. - * Relative to the repo root. - * Default: .env - */ - env_file_path: string; + environment?: string; } -export interface ComposeContents { +export interface FileContents { /** The path of the file on the host */ path: string; /** The contents of the file */ @@ -2470,7 +2460,7 @@ export interface StackInfo { /** Deployed commit message, or null. Only for repo based stacks */ deployed_message?: string; /** The deployed compose file contents. This is updated whenever Komodo successfully deploys the stack. */ - deployed_contents?: ComposeContents[]; + deployed_contents?: FileContents[]; /** * The deployed service names. * This is updated whenever it is empty, or deployed contents is updated. @@ -2486,9 +2476,9 @@ export interface StackInfo { * This is updated whenever Komodo refreshes the stack cache. * It will be empty if the file is defined directly in the stack config. */ - remote_contents?: ComposeContents[]; + remote_contents?: FileContents[]; /** If there was an error in getting the remote contents, it will be here. */ - remote_errors?: ComposeContents[]; + remote_errors?: FileContents[]; /** Latest commit hash, or null */ latest_hash?: string; /** Latest commit message, or null */ @@ -2542,6 +2532,10 @@ export enum StackState { export interface StackListItemInfo { /** The server that stack is deployed on. */ server_id: string; + /** Whether stack is using files on host mode */ + files_on_host: boolean; + /** Whether stack has file contents defined. */ + file_contents: boolean; /** The git provider domain */ git_provider: string; /** The configured repo */ @@ -2617,17 +2611,6 @@ export interface ResourceSyncConfig { * for the configured git provider. */ git_account?: string; - /** - * The path of the resource file(s) to sync, relative to the repo root. - * Can be a specific file, or a directory containing multiple files / folders. - * See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. - */ - resource_path: string; - /** - * Whether sync should delete resources - * not declared in the resource files - */ - delete?: boolean; /** Whether incoming webhooks actually trigger action. */ webhook_enabled: boolean; /** @@ -2635,33 +2618,100 @@ export interface ResourceSyncConfig { * If its an empty string, use the default secret from the config. */ webhook_secret?: string; + /** + * Files are available on the Komodo Core host. + * Specify the file / folder with [ResourceSyncConfig::resource_path]. + */ + files_on_host?: boolean; + /** + * The path of the resource file(s) to sync. + * - If Files on Host, this is relative to the configured `sync_directory` in core config. + * - If Git Repo based, this is relative to the root of the repo. + * Can be a specific file, or a directory containing multiple files / folders. + * See [https://komo.do/docs/sync-resources](https://komo.do/docs/sync-resources) for more information. + */ + resource_path: string; + /** + * Enable "pushes" to the file, + * which exports resources matching tags to single file. + * - If using `files_on_host`, it is stored in the file_contents, which must point to a .toml file path (it will be created if it doesn't exist). + * - If using `file_contents`, it is stored in the database. + * When using this, "delete" mode is always enabled. + */ + managed?: boolean; + /** + * Whether sync should delete resources + * not declared in the resource files + */ + delete?: boolean; + /** + * When using `managed` resource sync, will only export resources + * matching all of the given tags. If none, will match all resources. + */ + match_tags?: string[]; + /** Manage the file contents in the UI. */ + file_contents?: string; } -export type PendingSyncUpdatesData = - | { type: "Ok", data: PendingSyncUpdatesDataOk } - | { type: "Err", data: PendingSyncUpdatesDataErr }; +export type DiffData = + /** Resource will be created */ + | { type: "Create", data: { + /** The proposed resource to create in TOML */ + proposed: string; +}} + | { type: "Update", data: { + /** The proposed TOML */ + proposed: string; + /** The current TOML */ + current: string; +}} + | { type: "Delete", data: { + /** The current TOML of the resource to delete */ + current: string; +}}; -export interface PendingSyncUpdates { - /** The commit hash which produced these pending updates */ - hash?: string; - /** The commit message which produced these pending updates */ - message?: string; +export interface ResourceDiff { /** - * The data associated with the sync. Either Ok containing diffs, - * or Err containing an error message + * The resource target. + * The target id will be empty if "Create" ResourceDiffType. */ - data: PendingSyncUpdatesData; + target: ResourceTarget; + /** The data associated with the diff. */ + data: DiffData; +} + +export interface SyncDeployUpdate { + /** Resources to deploy */ + to_deploy: number; + /** A readable log of all the changes to be applied */ + log: string; } export interface ResourceSyncInfo { /** Unix timestamp of last applied sync */ - last_sync_ts: I64; + last_sync_ts?: I64; /** Short commit hash of last applied sync */ - last_sync_hash: string; + last_sync_hash?: string; /** Commit message of last applied sync */ - last_sync_message: string; - /** Readable logs of pending updates */ - pending: PendingSyncUpdates; + last_sync_message?: string; + /** The list of pending updates to resources */ + resource_updates?: ResourceDiff[]; + /** The list of pending updates to variables */ + variable_updates?: DiffData[]; + /** The list of pending updates to user groups */ + user_group_updates?: DiffData[]; + /** The list of pending deploys to resources. */ + pending_deploy?: SyncDeployUpdate; + /** If there is an error, it will be stored here */ + pending_error?: string; + /** The commit hash which produced these pending updates. */ + pending_hash?: string; + /** The commit message which produced these pending updates. */ + pending_message?: string; + /** The current sync files */ + remote_contents?: FileContents[]; + /** Any read errors in files by path */ + remote_errors?: FileContents[]; } export type ResourceSync = Resource; @@ -2684,16 +2734,24 @@ export enum ResourceSyncState { export interface ResourceSyncListItemInfo { /** Unix timestamp of last sync, or 0 */ last_sync_ts: I64; - /** Short commit hash of last sync, or empty string */ - last_sync_hash: string; - /** Commit message of last sync, or empty string */ - last_sync_message: string; - /** The git provider domain */ + /** Whether sync is `files_on_host` mode. */ + files_on_host: boolean; + /** Whether sync has file contents defined. */ + file_contents: boolean; + /** Whether sync has `managed` mode enabled. */ + managed: boolean; + /** Resource path to the files. */ + resource_path: string; + /** The git provider domain. */ git_provider: string; /** The Github repo used as the source of the sync resources */ repo: string; /** The branch of the repo */ branch: string; + /** Short commit hash of last sync, or empty string */ + last_sync_hash?: string; + /** Commit message of last sync, or empty string */ + last_sync_message?: string; /** State of the sync. Reflects whether most recent sync successful. */ state: ResourceSyncState; } @@ -2763,51 +2821,11 @@ export enum Operation { PruneDockerBuilders = "PruneDockerBuilders", PruneBuildx = "PruneBuildx", PruneSystem = "PruneSystem", - CreateBuild = "CreateBuild", - UpdateBuild = "UpdateBuild", - DeleteBuild = "DeleteBuild", - RunBuild = "RunBuild", - CancelBuild = "CancelBuild", - CreateBuilder = "CreateBuilder", - UpdateBuilder = "UpdateBuilder", - DeleteBuilder = "DeleteBuilder", - CreateDeployment = "CreateDeployment", - UpdateDeployment = "UpdateDeployment", - DeleteDeployment = "DeleteDeployment", - Deploy = "Deploy", - StartDeployment = "StartDeployment", - RestartDeployment = "RestartDeployment", - PauseDeployment = "PauseDeployment", - UnpauseDeployment = "UnpauseDeployment", - StopDeployment = "StopDeployment", - DestroyDeployment = "DestroyDeployment", - RenameDeployment = "RenameDeployment", - CreateRepo = "CreateRepo", - UpdateRepo = "UpdateRepo", - DeleteRepo = "DeleteRepo", - CloneRepo = "CloneRepo", - PullRepo = "PullRepo", - BuildRepo = "BuildRepo", - CancelRepoBuild = "CancelRepoBuild", - CreateAlerter = "CreateAlerter", - UpdateAlerter = "UpdateAlerter", - DeleteAlerter = "DeleteAlerter", - CreateProcedure = "CreateProcedure", - UpdateProcedure = "UpdateProcedure", - DeleteProcedure = "DeleteProcedure", - RunProcedure = "RunProcedure", - CreateServerTemplate = "CreateServerTemplate", - UpdateServerTemplate = "UpdateServerTemplate", - DeleteServerTemplate = "DeleteServerTemplate", - LaunchServer = "LaunchServer", - CreateResourceSync = "CreateResourceSync", - UpdateResourceSync = "UpdateResourceSync", - DeleteResourceSync = "DeleteResourceSync", - RunSync = "RunSync", CreateStack = "CreateStack", UpdateStack = "UpdateStack", RenameStack = "RenameStack", DeleteStack = "DeleteStack", + WriteStackContents = "WriteStackContents", RefreshStackCache = "RefreshStackCache", DeployStack = "DeployStack", StartStack = "StartStack", @@ -2821,6 +2839,48 @@ export enum Operation { PauseStackService = "PauseStackService", UnpauseStackService = "UnpauseStackService", StopStackService = "StopStackService", + CreateDeployment = "CreateDeployment", + UpdateDeployment = "UpdateDeployment", + DeleteDeployment = "DeleteDeployment", + Deploy = "Deploy", + StartDeployment = "StartDeployment", + RestartDeployment = "RestartDeployment", + PauseDeployment = "PauseDeployment", + UnpauseDeployment = "UnpauseDeployment", + StopDeployment = "StopDeployment", + DestroyDeployment = "DestroyDeployment", + RenameDeployment = "RenameDeployment", + CreateBuild = "CreateBuild", + UpdateBuild = "UpdateBuild", + DeleteBuild = "DeleteBuild", + RunBuild = "RunBuild", + CancelBuild = "CancelBuild", + CreateRepo = "CreateRepo", + UpdateRepo = "UpdateRepo", + DeleteRepo = "DeleteRepo", + CloneRepo = "CloneRepo", + PullRepo = "PullRepo", + BuildRepo = "BuildRepo", + CancelRepoBuild = "CancelRepoBuild", + CreateProcedure = "CreateProcedure", + UpdateProcedure = "UpdateProcedure", + DeleteProcedure = "DeleteProcedure", + RunProcedure = "RunProcedure", + CreateBuilder = "CreateBuilder", + UpdateBuilder = "UpdateBuilder", + DeleteBuilder = "DeleteBuilder", + CreateAlerter = "CreateAlerter", + UpdateAlerter = "UpdateAlerter", + DeleteAlerter = "DeleteAlerter", + CreateServerTemplate = "CreateServerTemplate", + UpdateServerTemplate = "UpdateServerTemplate", + DeleteServerTemplate = "DeleteServerTemplate", + LaunchServer = "LaunchServer", + CreateResourceSync = "CreateResourceSync", + UpdateResourceSync = "UpdateResourceSync", + DeleteResourceSync = "DeleteResourceSync", + CommitSync = "CommitSync", + RunSync = "RunSync", CreateVariable = "CreateVariable", UpdateVariableValue = "UpdateVariableValue", DeleteVariable = "DeleteVariable", @@ -3009,6 +3069,8 @@ export type UpdatePermissionOnResourceTypeResponse = NoData; export type UpdateUserBasePermissionsResponse = NoData; +export type UpdateUserAdminResponse = NoData; + export type CreateProcedureResponse = Procedure; export type CopyProcedureResponse = Procedure; @@ -3211,6 +3273,8 @@ export interface GetLoginOptionsResponse { github: boolean; /** Whether google login is enabled. */ google: boolean; + /** Whether OIDC login is enabled. */ + oidc: boolean; /** Whether user registration (Sign Up) has been disabled */ registration_disabled: boolean; } @@ -4097,10 +4161,17 @@ export interface GetDeploymentsSummary { /** Response for [GetDeploymentsSummary]. */ export interface GetDeploymentsSummaryResponse { + /** The total number of Deployments */ total: I64; + /** The number of Deployments with Running state */ running: I64; + /** The number of Deployments with Stopped or Paused state */ stopped: I64; + /** The number of Deployments with NotDeployed state */ not_deployed: I64; + /** The number of Deployments with Restarting or Dead or Created (other) state */ + unhealthy: I64; + /** The number of Deployments with Unknown state */ unknown: I64; } @@ -4145,6 +4216,10 @@ export interface GetCoreInfoResponse { transparent_mode: boolean; /** Whether UI write access should be disabled */ ui_write_disabled: boolean; + /** Whether non admins can create resources */ + disable_non_admin_create: boolean; + /** Whether confirm dialog should be disabled */ + disable_confirm_dialog: boolean; /** The repo owners for which github webhook management api is available */ github_webhook_owners: string[]; } @@ -4183,13 +4258,6 @@ export interface ListDockerRegistriesFromConfig { target?: ResourceTarget; } -/** - * List the available aws ecr config labels from the core config. - * Response: [ListAwsEcrLabelsResponse]. - */ -export interface ListAwsEcrLabels { -} - /** * List the available secrets from the core config. * Response: [ListSecretsResponse]. @@ -4817,22 +4885,12 @@ export interface GetStacksSummaryResponse { total: number; /** The number of stacks with Running state. */ running: number; - /** The number of stacks with Paused state. */ - paused: number; - /** The number of stacks with Stopped state. */ + /** The number of stacks with Stopped or Paused state. */ stopped: number; - /** The number of stacks with Restarting state. */ - restarting: number; - /** The number of stacks with Dead state. */ - dead: number; - /** The number of stacks with Created state. */ - created: number; - /** The number of stacks with Removing state. */ - removing: number; - /** The number of stacks with Unhealthy state. */ - unhealthy: number; /** The number of stacks with Down state. */ down: number; + /** The number of stacks with Unhealthy or Restarting or Dead or Created or Removing state. */ + unhealthy: number; /** The number of stacks with Unknown state. */ unknown: number; } @@ -5464,6 +5522,17 @@ export interface UpdateUserBasePermissions { create_builds?: boolean; } +/** + * **Super Admin only.** Update's whether a user is admin. + * Response: [NoData]. + */ +export interface UpdateUserAdmin { + /** The target user. */ + user_id: string; + /** Whether user should be admin. */ + admin: boolean; +} + /** Create a procedure. Response: [Procedure]. */ export interface CreateProcedure { /** The name given to newly created build. */ @@ -5809,6 +5878,19 @@ export interface RenameStack { name: string; } +/** Rename the stack at id to the given name. Response: [Update]. */ +export interface WriteStackFileContents { + /** The name or id of the Stack to write the contents to. */ + stack: string; + /** + * The file path relative to the stack run directory, + * or absolute path. + */ + file_path: string; + /** The contents to write. */ + contents: string; +} + /** * Trigger a refresh of the cached compose file contents. * Refreshes: @@ -5892,12 +5974,22 @@ export interface UpdateResourceSync { config: _PartialResourceSyncConfig; } -/** Trigger a refresh of the computed diff logs for view. */ +/** Trigger a refresh of the computed diff logs for view. Response: [ResourceSync] */ export interface RefreshResourceSyncPending { /** Id or name */ sync: string; } +/** + * Commits matching resources updated configuration to the target resource sync. Response: [Update] + * + * Note. Will fail if the Sync is not `managed`. + */ +export interface CommitSync { + /** Id or name */ + sync: string; +} + export enum SyncWebhookAction { Refresh = "Refresh", Sync = "Sync", @@ -6069,29 +6161,22 @@ export interface DeleteVariable { name: string; } -/** Configuration for a custom alerter endpoint. */ +/** Configuration for a Custom alerter endpoint. */ export interface CustomAlerterEndpoint { /** The http/s endpoint to send the POST to */ url: string; } -/** Configuration for a slack alerter. */ +/** Configuration for a Slack alerter. */ export interface SlackAlerterEndpoint { - /** The slack app url */ + /** The Slack app webhook url */ url: string; } -/** Configuration for a standard image registry */ -export interface StandardRegistryConfig { - /** Specify the registry provider domain. Default: `docker.io` */ - domain: string; - /** Specify an account to use with the registry. */ - account?: string; - /** - * Optional. Specify an organization to push the image under. - * Empty string means no organization. - */ - organization?: string; +/** Configuration for a Discord alerter. */ +export interface DiscordAlerterEndpoint { + /** The Discord webhook url */ + url: string; } /** Configuration for a Komodo Server Builder. */ @@ -6113,31 +6198,34 @@ export interface AwsBuilderConfig { * Default: `8120` */ port: number; + use_https: boolean; /** * The EC2 ami id to create. * The ami should have the periphery client configured to start on startup, * and should have the necessary github / dockerhub accounts configured. */ - ami_id: string; + ami_id?: string; /** The subnet id to create the instance in. */ - subnet_id: string; + subnet_id?: string; /** The key pair name to attach to the instance */ - key_pair_name: string; + key_pair_name?: string; /** * Whether to assign the instance a public IP address. * Likely needed for the instance to be able to reach the open internet. */ - assign_public_ip: boolean; + assign_public_ip?: boolean; /** * Whether core should use the public IP address to communicate with periphery on the builder. * If false, core will communicate with the instance using the private IP. */ - use_public_ip: boolean; + use_public_ip?: boolean; /** * The security group ids to attach to the instance. * This should include a security group to allow core inbound access to the periphery port. */ - security_group_ids: string[]; + security_group_ids?: string[]; + /** The user data to deploy the instance with. */ + user_data?: string; /** Which git providers are available on the AMI */ git_providers?: GitProvider[]; /** Which docker registries are available on the AMI. */ @@ -6146,6 +6234,18 @@ export interface AwsBuilderConfig { secrets?: string[]; } +export interface Conversion { + /** reference on the server. */ + local: string; + /** reference in the container. */ + container: string; +} + +export interface TerminationSignalLabel { + signal: TerminationSignal; + label: string; +} + export interface NameAndId { name: string; id: string; @@ -6169,6 +6269,11 @@ export interface Port { Type?: PortTypeEnum; } +export interface EnvironmentVar { + variable: string; + value: string; +} + export interface LatestCommit { hash: string; message: string; @@ -6178,13 +6283,13 @@ export interface CloneArgs { /** Resource name (eg Build name, Repo name) */ name: string; /** Git provider domain. Default: `github.com` */ - provider?: string; + provider: string; /** Use https (vs http). */ https: boolean; /** Full repo identifier. / */ repo?: string; /** Git Branch. Default: `main` */ - branch?: string; + branch: string; /** Specific commit hash. Optional */ commit?: string; /** The clone destination path */ @@ -6255,12 +6360,14 @@ export interface AwsServerTemplateConfig { * Default: `8120` */ port: number; - /** The user data to deploy the instance with. */ - user_data: string; + /** Whether Periphery will be running on https */ + use_https: boolean; /** The security groups to give to the instance. */ security_group_ids?: string[]; /** Specify the EBS volumes to attach. */ volumes: AwsVolume[]; + /** The user data to deploy the instance with. */ + user_data: string; } export enum HetznerDatacenter { @@ -6332,39 +6439,41 @@ export interface HetznerVolumeSpecs { /** Hetzner server config. */ export interface HetznerServerTemplateConfig { /** ID or name of the Image the Server is created from */ - image?: string; + image: string; /** ID or name of Datacenter to create Server in */ datacenter?: HetznerDatacenter; - /** Network IDs which should be attached to the Server private network interface at the creation time */ - private_network_ids?: I64[]; /** * ID of the Placement Group the server should be in, * Or 0 to not use placement group. */ placement_group?: I64; - /** Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. */ - enable_public_ipv4?: boolean; - /** Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. */ - enable_public_ipv6?: boolean; - /** The firewalls to attach to the instance */ - firewall_ids?: I64[]; /** ID or name of the Server type this Server should be created with */ server_type?: HetznerServerType; /** SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time */ ssh_keys?: string[]; - /** Cloud-Init user data to use during Server creation. This field is limited to 32KiB. */ - user_data: string; + /** Network IDs which should be attached to the Server private network interface at the creation time */ + private_network_ids?: I64[]; + /** Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. */ + enable_public_ipv4?: boolean; + /** Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. */ + enable_public_ipv6?: boolean; /** Connect to the instance using it's public ip. */ use_public_ip?: boolean; - /** Labels for the server */ - labels?: Record; - /** Specs for volumes to attach */ - volumes?: HetznerVolumeSpecs[]; /** * The port periphery will be running on in AMI. * Default: `8120` */ port: number; + /** Whether Periphery will be running on https */ + use_https: boolean; + /** The firewalls to attach to the instance */ + firewall_ids?: I64[]; + /** Labels for the server */ + labels?: Record; + /** Specs for volumes to attach */ + volumes?: HetznerVolumeSpecs[]; + /** Cloud-Init user data to use during Server creation. This field is limited to 32KiB. */ + user_data: string; } export interface ComposeService { @@ -6387,57 +6496,6 @@ export interface TotalDiskUsage { total_gb: number; } -export interface SyncDeployUpdate { - /** Resources to deploy */ - to_deploy: number; - /** A readable log of all the changes to be applied */ - log: string; -} - -export interface SyncUpdate { - /** Resources to create */ - to_create: number; - /** Resources to update */ - to_update: number; - /** Resources to delete */ - to_delete: number; - /** A readable log of all the changes to be applied */ - log: string; -} - -export interface PendingSyncUpdatesDataOk { - /** Readable log of any deploy actions that will be performed */ - deploy_updates?: SyncDeployUpdate; - /** Readable log of any pending deployment updates */ - deployment_updates?: SyncUpdate; - /** Readable log of any pending deployment updates */ - stack_updates?: SyncUpdate; - /** Readable log of any pending server updates */ - server_updates?: SyncUpdate; - /** Readable log of any pending build updates */ - build_updates?: SyncUpdate; - /** Readable log of any pending repo updates */ - repo_updates?: SyncUpdate; - /** Readable log of any pending procedure updates */ - procedure_updates?: SyncUpdate; - /** Readable log of any pending alerter updates */ - alerter_updates?: SyncUpdate; - /** Readable log of any pending builder updates */ - builder_updates?: SyncUpdate; - /** Readable log of any pending server template updates */ - server_template_updates?: SyncUpdate; - /** Readable log of any pending resource sync updates */ - resource_sync_updates?: SyncUpdate; - /** Readable log of any pending variable updates */ - variable_updates?: SyncUpdate; - /** Readable log of any pending user group updates */ - user_group_updates?: SyncUpdate; -} - -export interface PendingSyncUpdatesDataErr { - message: string; -} - export type AuthRequest = | { type: "GetLoginOptions", params: GetLoginOptions } | { type: "CreateLocalUser", params: CreateLocalUser } @@ -6494,7 +6552,6 @@ export type ExecuteRequest = export type ReadRequest = | { type: "GetVersion", params: GetVersion } | { type: "GetCoreInfo", params: GetCoreInfo } - | { type: "ListAwsEcrLabels", params: ListAwsEcrLabels } | { type: "ListSecrets", params: ListSecrets } | { type: "ListGitProvidersFromConfig", params: ListGitProvidersFromConfig } | { type: "ListDockerRegistriesFromConfig", params: ListDockerRegistriesFromConfig } @@ -6624,6 +6681,7 @@ export type WriteRequest = | { type: "AddUserToUserGroup", params: AddUserToUserGroup } | { type: "RemoveUserFromUserGroup", params: RemoveUserFromUserGroup } | { type: "SetUsersInUserGroup", params: SetUsersInUserGroup } + | { type: "UpdateUserAdmin", params: UpdateUserAdmin } | { type: "UpdateUserBasePermissions", params: UpdateUserBasePermissions } | { type: "UpdatePermissionOnResourceType", params: UpdatePermissionOnResourceType } | { type: "UpdatePermissionOnTarget", params: UpdatePermissionOnTarget } @@ -6673,6 +6731,7 @@ export type WriteRequest = | { type: "DeleteResourceSync", params: DeleteResourceSync } | { type: "UpdateResourceSync", params: UpdateResourceSync } | { type: "RefreshResourceSyncPending", params: RefreshResourceSyncPending } + | { type: "CommitSync", params: CommitSync } | { type: "CreateSyncWebhook", params: CreateSyncWebhook } | { type: "DeleteSyncWebhook", params: DeleteSyncWebhook } | { type: "CreateStack", params: CreateStack } @@ -6680,6 +6739,7 @@ export type WriteRequest = | { type: "DeleteStack", params: DeleteStack } | { type: "UpdateStack", params: UpdateStack } | { type: "RenameStack", params: RenameStack } + | { type: "WriteStackFileContents", params: WriteStackFileContents } | { type: "RefreshStackCache", params: RefreshStackCache } | { type: "CreateStackWebhook", params: CreateStackWebhook } | { type: "DeleteStackWebhook", params: DeleteStackWebhook } @@ -6699,6 +6759,13 @@ export type WriteRequest = | { type: "UpdateDockerRegistryAccount", params: UpdateDockerRegistryAccount } | { type: "DeleteDockerRegistryAccount", params: DeleteDockerRegistryAccount }; +/** Configuration for the registry to push the built image to. */ +export type ImageRegistryLegacy1_14 = + /** Don't push the image to any registry */ + | { type: "None", params: NoData } + /** Push the image to a standard image registry (any domain) */ + | { type: "Standard", params: ImageRegistryConfig }; + export type WsLoginMessage = | { type: "Jwt", params: { jwt: string; diff --git a/client/periphery/rs/src/api/build.rs b/client/periphery/rs/src/api/build.rs index 240483760..219b4370d 100644 --- a/client/periphery/rs/src/api/build.rs +++ b/client/periphery/rs/src/api/build.rs @@ -1,6 +1,4 @@ -use komodo_client::entities::{ - config::core::AwsEcrConfig, update::Log, -}; +use komodo_client::entities::update::Log; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; @@ -10,8 +8,6 @@ pub struct Build { pub build: komodo_client::entities::build::Build, /// Override registry token with one sent from core. pub registry_token: Option, - /// Propogate AwsEcrConfig from core - pub aws_ecr: Option, /// Propogate any secret replacers from core interpolation. #[serde(default)] pub replacers: Vec<(String, String)>, diff --git a/client/periphery/rs/src/api/compose.rs b/client/periphery/rs/src/api/compose.rs index 1e23c6866..0e5c1c869 100644 --- a/client/periphery/rs/src/api/compose.rs +++ b/client/periphery/rs/src/api/compose.rs @@ -1,7 +1,7 @@ use komodo_client::entities::{ - stack::{ComposeContents, ComposeProject, Stack}, + stack::{ComposeProject, Stack}, update::Log, - SearchCombinator, + FileContents, SearchCombinator, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; @@ -30,8 +30,8 @@ pub struct GetComposeContentsOnHost { #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct GetComposeContentsOnHostResponse { - pub contents: Vec, - pub errors: Vec, + pub contents: Vec, + pub errors: Vec, } // @@ -76,6 +76,24 @@ pub struct GetComposeServiceLogSearch { // +/// Write the compose contents to the file on the host, for stacks using +/// `files_on_host`. +#[derive(Debug, Clone, Serialize, Deserialize, Request)] +#[response(Log)] +pub struct WriteComposeContentsToHost { + /// The name of the stack + pub name: String, + /// The run directory of the stack + pub run_directory: String, + /// Relative to the stack folder + run directory, + /// or absolute path. + pub file_path: String, + /// The contents to write. + pub contents: String, +} + +// + /// Rewrites the compose directory, pulls any images, takes down existing containers, /// and runs docker compose up. #[derive(Debug, Clone, Serialize, Deserialize, Request)] @@ -103,9 +121,9 @@ pub struct ComposeUpResponse { /// whether stack was successfully deployed pub deployed: bool, /// The deploy compose file contents if they could be acquired, or empty vec. - pub file_contents: Vec, + pub file_contents: Vec, /// The error in getting remote file contents at the path, or null - pub remote_errors: Vec, + pub remote_errors: Vec, /// If its a repo based stack, will include the latest commit hash pub commit_hash: Option, /// If its a repo based stack, will include the latest commit message diff --git a/client/periphery/rs/src/api/container.rs b/client/periphery/rs/src/api/container.rs index dfff43742..461378b0c 100644 --- a/client/periphery/rs/src/api/container.rs +++ b/client/periphery/rs/src/api/container.rs @@ -1,5 +1,4 @@ use komodo_client::entities::{ - config::core::AwsEcrConfig, deployment::Deployment, docker::container::{Container, ContainerStats}, update::Log, @@ -71,8 +70,6 @@ pub struct Deploy { pub stop_time: Option, /// Override registry token with one sent from core. pub registry_token: Option, - /// Propogate AwsEcrConfig from core - pub aws_ecr: Option, /// Propogate any secret replacers from core interpolation. #[serde(default)] pub replacers: Vec<(String, String)>, diff --git a/client/periphery/rs/src/api/git.rs b/client/periphery/rs/src/api/git.rs index a1c466bec..d18233481 100644 --- a/client/periphery/rs/src/api/git.rs +++ b/client/periphery/rs/src/api/git.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use komodo_client::entities::{ - update::Log, CloneArgs, EnvironmentVar, LatestCommit, SystemCommand, + update::Log, CloneArgs, EnvironmentVar, LatestCommit, }; use resolver_api::derive::Request; use serde::{Deserialize, Serialize}; @@ -38,17 +38,35 @@ fn default_env_file_path() -> String { #[derive(Serialize, Deserialize, Debug, Clone, Request)] #[response(RepoActionResponse)] pub struct PullRepo { - pub name: String, - pub branch: Option, - pub commit: Option, - pub path: Option, - pub on_pull: Option, + pub args: CloneArgs, #[serde(default)] pub environment: Vec, #[serde(default = "default_env_file_path")] pub env_file_path: String, #[serde(default)] pub skip_secret_interp: bool, + /// Override git token with one sent from core. + pub git_token: Option, + /// Propogate any secret replacers from core interpolation. + #[serde(default)] + pub replacers: Vec<(String, String)>, +} + +// + +/// Either pull or clone depending on whether it exists. +#[derive(Serialize, Deserialize, Debug, Clone, Request)] +#[response(RepoActionResponse)] +pub struct PullOrCloneRepo { + pub args: CloneArgs, + #[serde(default)] + pub environment: Vec, + #[serde(default = "default_env_file_path")] + pub env_file_path: String, + #[serde(default)] + pub skip_secret_interp: bool, + /// Override git token with one sent from core. + pub git_token: Option, /// Propogate any secret replacers from core interpolation. #[serde(default)] pub replacers: Vec<(String, String)>, diff --git a/client/periphery/rs/src/lib.rs b/client/periphery/rs/src/lib.rs index 2658f6132..f75e78f4a 100644 --- a/client/periphery/rs/src/lib.rs +++ b/client/periphery/rs/src/lib.rs @@ -8,10 +8,16 @@ use serror::deserialize_error; pub mod api; -fn http_client() -> &'static reqwest::Client { +fn periphery_http_client() -> &'static reqwest::Client { static PERIPHERY_HTTP_CLIENT: OnceLock = OnceLock::new(); - PERIPHERY_HTTP_CLIENT.get_or_init(Default::default) + PERIPHERY_HTTP_CLIENT.get_or_init(|| { + reqwest::Client::builder() + // Use to allow communication with Periphery self-signed certs. + .danger_accept_invalid_certs(true) + .build() + .expect("Failed to build Periphery http client") + }) } pub struct PeripheryClient { @@ -64,7 +70,7 @@ impl PeripheryClient { tracing::trace!( "sending request | type: {req_type} | body: {request:?}" ); - let mut req = http_client() + let mut req = periphery_http_client() .post(&self.address) .json(&json!({ "type": req_type, diff --git a/compose/compose.env b/compose/compose.env index b68eb12f9..7c38166cb 100644 --- a/compose/compose.env +++ b/compose/compose.env @@ -1,40 +1,120 @@ -############################## -# KOMODO COMPOSE - VARIABLES # -############################## +################################### +# 🦎 KOMODO COMPOSE - VARIABLES 🦎 # +################################### -## These env variables can be used with all Komodo deployment options. -## Pass these variables using to the compose up command using `--env-file`. +## These compose variables can be used with all Komodo deployment options. +## Pass these variables to the compose up command using `--env-file komodo/compose.env`. +## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`, +## so you can pass any additional environment variables to Core / Periphery directly in this file as well. -## DB credentials +## 🚨 Uncomment below for arm64 support 🚨 +# COMPOSE_KOMODO_IMAGE_TAG=latest-aarch64 + +## Note: 🚨 Podman does NOT support local logging driver 🚨. See Podman options here: +## `https://docs.podman.io/en/v4.6.1/markdown/podman-run.1.html#log-driver-driver` +COMPOSE_LOGGING_DRIVER=local # Enable log rotation with the local driver. + +## DB credentials - Ignored for Sqlite DB_USERNAME=admin DB_PASSWORD=admin +#=-------------------------=# +#= Komodo Core Environment =# +#=-------------------------=# + +## Full variable list + descriptions are available here: +## 🦎 https://github.com/mbecker20/komodo/blob/main/config/core.config.toml 🦎 + +## Note. Secret variables also support `${VARIABLE}_FILE` syntax to pass docker compose secrets. +## Docs: https://docs.docker.com/compose/how-tos/use-secrets/#examples + ## Used for Oauth / Webhook url suggestion / Caddy reverse proxy. KOMODO_HOST=https://demo.komo.do - ## Displayed in the browser tab. KOMODO_TITLE=Komodo +## Create a server matching this address as the "first server". +## Use `https://host.docker.internal:8120` when using systemd-managed Periphery. +KOMODO_FIRST_SERVER=https://periphery:8120 +## Make all buttons just double-click, rather than the full confirmation dialog. +KOMODO_DISABLE_CONFIRM_DIALOG=false -## Secrets -KOMODO_PASSKEY=a_random_passkey # used to auth against periphery. -KOMODO_WEBHOOK_SECRET=a_random_secret # used to auth incoming webhooks. -KOMODO_JWT_SECRET=a_random_jwt_secret # used to generate jwt. +## Rate Komodo polls your servers for +## status / container status / system stats / alerting. +## Options: 1-sec, 5-sec, 15-sec, 1-min, 5-min. +## Default: 15-sec +KOMODO_MONITORING_INTERVAL="15-sec" +## Rate Komodo polls Resources for updates, +## like outdated commit hash. +## Options: 1-min, 5-min, 15-min, 30-min, 1-hr. +## Default: 5-min +KOMODO_RESOURCE_POLL_INTERVAL="5-min" -## Auth +## Used to auth against periphery. Alt: KOMODO_PASSKEY_FILE +KOMODO_PASSKEY=a_random_passkey +## Used to auth incoming webhooks. Alt: KOMODO_WEBHOOK_SECRET_FILE +KOMODO_WEBHOOK_SECRET=a_random_secret +## Used to generate jwt. Alt: KOMODO_JWT_SECRET_FILE +KOMODO_JWT_SECRET=a_random_jwt_secret + +## Enable login with username + password. KOMODO_LOCAL_AUTH=true +## Disable new user signups. KOMODO_DISABLE_USER_REGISTRATION=false +## All new logins are auto enabled +KOMODO_ENABLE_NEW_USERS=false +## Disable non-admins from creating new resources. +KOMODO_DISABLE_NON_ADMIN_CREATE=false +## Allows all users to have Read level access to all resources. +KOMODO_TRANSPARENT_MODE=false + +## Time to live for jwt tokens. +## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk +KOMODO_JWT_TTL="1-day" + +## OIDC Login +KOMODO_OIDC_ENABLED=false +## Must reachable from Komodo Core container +# KOMODO_OIDC_PROVIDER=https://oidc.provider.internal/application/o/komodo +## Must be reachable by users (optional if it is the same as above). +# KOMODO_OIDC_REDIRECT=https://oidc.provider.external/application/o/komodo +## Your client credentials +# KOMODO_OIDC_CLIENT_ID= # Alt: KOMODO_OIDC_CLIENT_ID_FILE +# KOMODO_OIDC_CLIENT_SECRET= # Alt: KOMODO_OIDC_CLIENT_SECRET_FILE +## Make usernames the full email. +# KOMODO_OIDC_USE_FULL_EMAIL=true + ## Github Oauth KOMODO_GITHUB_OAUTH_ENABLED=false -KOMODO_GITHUB_OAUTH_ID= -KOMODO_GITHUB_OAUTH_SECRET= +# KOMODO_GITHUB_OAUTH_ID= # Alt: KOMODO_GITHUB_OAUTH_ID_FILE +# KOMODO_GITHUB_OAUTH_SECRET= # Alt: KOMODO_GITHUB_OAUTH_SECRET_FILE + ## Google Oauth KOMODO_GOOGLE_OAUTH_ENABLED=false -KOMODO_GOOGLE_OAUTH_ID= -KOMODO_GOOGLE_OAUTH_SECRET= +# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE +# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE -## Aws -KOMODO_AWS_ACCESS_KEY_ID= -KOMODO_AWS_SECRET_ACCESS_KEY= +## Aws - Used to launch Builder instances and ServerTemplate instances. +KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE +KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE -## Hetzner -KOMODO_HETZNER_TOKEN= \ No newline at end of file +## Hetzner - Used to launch ServerTemplate instances +## Hetzner Builder not supported due to Hetzner pay-by-the-hour pricing model +KOMODO_HETZNER_TOKEN= # Alt: KOMODO_HETZNER_TOKEN_FILE + +#=------------------------------=# +#= Komodo Periphery Environment =# +#=------------------------------=# + +## Full variable list + descriptions are available here: +## 🦎 https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml 🦎 + +## Enable SSL using self signed certificates. +## Connect to Periphery at https://address:8120. +PERIPHERY_SSL_ENABLED=true + +## If the disk size is overreporting, can use one of these to +## whitelist / blacklist the disks to filter them, whichever is easier. +## Accepts comma separated list of paths. +## Usually whitelisting just /etc/hostname gives correct size. +PERIPHERY_INCLUDE_DISK_MOUNTS=/etc/hostname +# PERIPHERY_EXCLUDE_DISK_MOUNTS=/snap,/etc/repos \ No newline at end of file diff --git a/compose/mongo.compose.yaml b/compose/mongo.compose.yaml index c6144ba60..15fd6fe8d 100644 --- a/compose/mongo.compose.yaml +++ b/compose/mongo.compose.yaml @@ -1,6 +1,6 @@ -########################## -# KOMODO COMPOSE - MONGO # -########################## +############################### +# 🦎 KOMODO COMPOSE - MONGO 🦎 # +############################### ## This compose file will deploy: ## 1. MongoDB @@ -13,11 +13,11 @@ services: command: --quiet # suppress mongo logs a bit restart: unless-stopped logging: - driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/` + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default - ports: - - 27017:27017 + # ports: + # - 27017:27017 volumes: - mongo-data:/data/db - mongo-config:/data/configdb @@ -26,74 +26,61 @@ services: MONGO_INITDB_ROOT_PASSWORD: ${DB_PASSWORD} core: - image: ghcr.io/mbecker20/komodo:latest - # image: ghcr.io/mbecker20/komodo:latest-aarch64 ## Use for arm support + image: ghcr.io/mbecker20/komodo:${COMPOSE_KOMODO_IMAGE_TAG:-latest} restart: unless-stopped depends_on: - mongo logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default ports: - 9120:9120 - # volumes: # Optionally mount a custom core.config.toml - # - /path/to/core.config.toml:/config/config.toml - # extra_hosts: # allows for systemd Periphery connection at "http://host.docker.internal:8120" + env_file: ./compose.env + environment: + KOMODO_DATABASE_ADDRESS: mongo:27017 + KOMODO_DATABASE_USERNAME: ${DB_USERNAME} + KOMODO_DATABASE_PASSWORD: ${DB_PASSWORD} + ## allows for systemd Periphery connection at + ## "http://host.docker.internal:8120" + # extra_hosts: # - host.docker.internal:host-gateway - environment: # https://github.com/mbecker20/komodo/blob/main/config/core.config.toml - KOMODO_HOST: ${KOMODO_HOST} - KOMODO_TITLE: ${KOMODO_TITLE} - KOMODO_ENSURE_SERVER: http://periphery:8120 - ## Mongo - KOMODO_MONGO_ADDRESS: mongo:27017 - KOMODO_MONGO_USERNAME: ${DB_USERNAME} - KOMODO_MONGO_PASSWORD: ${DB_PASSWORD} - ## Secrets - KOMODO_PASSKEY: ${KOMODO_PASSKEY} - KOMODO_WEBHOOK_SECRET: ${KOMODO_WEBHOOK_SECRET} - KOMODO_JWT_SECRET: ${KOMODO_JWT_SECRET} - ## Auth - KOMODO_LOCAL_AUTH: ${KOMODO_LOCAL_AUTH} - KOMODO_DISABLE_USER_REGISTRATION: ${KOMODO_DISABLE_USER_REGISTRATION} - ## Github Oauth - KOMODO_GITHUB_OAUTH_ENABLED: ${KOMODO_GITHUB_OAUTH_ENABLED} - KOMODO_GITHUB_OAUTH_ID: ${KOMODO_GITHUB_OAUTH_ID} - KOMODO_GITHUB_OAUTH_SECRET: ${KOMODO_GITHUB_OAUTH_SECRET} - ## Google Oauth - KOMODO_GOOGLE_OAUTH_ENABLED: ${KOMODO_GOOGLE_OAUTH_ENABLED} - KOMODO_GOOGLE_OAUTH_ID: ${KOMODO_GOOGLE_OAUTH_ID} - KOMODO_GOOGLE_OAUTH_SECRET: ${KOMODO_GOOGLE_OAUTH_SECRET} - ## Aws - KOMODO_AWS_ACCESS_KEY_ID: ${KOMODO_AWS_ACCESS_KEY_ID} - KOMODO_AWS_SECRET_ACCESS_KEY: ${KOMODO_AWS_SECRET_ACCESS_KEY} - ## Hetzner - KOMODO_HETZNER_TOKEN: ${KOMODO_HETZNER_TOKEN} + ## Optionally mount a custom core.config.toml + # volumes: + # - /path/to/core.config.toml:/config/config.toml ## Deploy Periphery container using this block, - ## or deploy the Periphery binary with systemd using https://github.com/mbecker20/komodo/tree/main/scripts + ## or deploy the Periphery binary with systemd using + ## https://github.com/mbecker20/komodo/tree/main/scripts periphery: - image: ghcr.io/mbecker20/periphery:latest - # image: ghcr.io/mbecker20/periphery:latest-aarch64 # Use for arm support + image: ghcr.io/mbecker20/periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest} + restart: unless-stopped logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default + env_file: ./compose.env volumes: + ## Mount external docker socket - /var/run/docker.sock:/var/run/docker.sock - - repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. - - stacks:/etc/komodo/stacks # manage stack files in a docker volume, or change it to an accessible host directory. - environment: - # If the disk size is overreporting, can use one of these to - # whitelist / blacklist the disks to filter them, whichever is easier. - # Accepts comma separated list of paths. - # Usually whitelisting just /etc/hostname gives correct size. - PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/hostname - # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap,/etc/repos + ## Allow Periphery to see processes outside of container + - /proc:/proc + ## use self signed certs in docker volume, + ## or mount your own signed certs. + - ssl-certs:/etc/komodo/ssl + ## manage repos in a docker volume, + ## or change it to an accessible host directory. + - repos:/etc/komodo/repos + ## manage stack files in a docker volume, + ## or change it to an accessible host directory. + - stacks:/etc/komodo/stacks + ## Optionally mount a path to store compose files + # - /path/to/compose:/host/compose volumes: mongo-data: mongo-config: + ssl-certs: repos: stacks: diff --git a/compose/postgres.compose.yaml b/compose/postgres.compose.yaml index bbe5ad0b0..fce75f600 100644 --- a/compose/postgres.compose.yaml +++ b/compose/postgres.compose.yaml @@ -1,6 +1,6 @@ -############################# -# KOMODO COMPOSE - POSTGRES # -############################# +################################## +# 🦎 KOMODO COMPOSE - POSTGRES 🦎 # +################################## ## This compose file will deploy: ## 1. Postgres + FerretDB Mongo adapter @@ -12,17 +12,17 @@ services: image: postgres restart: unless-stopped logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - - database - ports: - - 5432:5432 + - default + # ports: + # - 5432:5432 volumes: - pg-data:/var/lib/postgresql/data environment: - POSTGRES_USER=${DB_USERNAME} - POSTGRES_PASSWORD=${DB_PASSWORD} - - POSTGRES_DB=komodo + - POSTGRES_DB=${KOMODO_DATABASE_DB_NAME:-komodo} ferretdb: image: ghcr.io/ferretdb/ferretdb @@ -30,84 +30,70 @@ services: depends_on: - postgres logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default - - database - ports: - - 27017:27017 + # ports: + # - 27017:27017 environment: - - FERRETDB_POSTGRESQL_URL=postgres://postgres:5432/komodo + - FERRETDB_POSTGRESQL_URL=postgres://postgres:5432/${KOMODO_DATABASE_DB_NAME:-komodo} core: - image: ghcr.io/mbecker20/komodo:latest - # image: ghcr.io/mbecker20/komodo:latest-aarch64 ## Use for arm support + image: ghcr.io/mbecker20/komodo:${COMPOSE_KOMODO_IMAGE_TAG:-latest} restart: unless-stopped depends_on: - ferretdb logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default ports: - 9120:9120 - # volumes: # Optionally mount a custom core.config.toml - # - /path/to/core.config.toml:/config/config.toml - # extra_hosts: # allows for systemd Periphery connection at "http://host.docker.internal:8120" + env_file: ./compose.env + environment: + KOMODO_DATABASE_URI: mongodb://${DB_USERNAME}:${DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN + # KOMODO_DATABASE_URI_FILE: + ## allows for systemd Periphery connection at + ## "http://host.docker.internal:8120" + # extra_hosts: # - host.docker.internal:host-gateway - environment: # https://github.com/mbecker20/komodo/blob/main/config/core.config.toml - KOMODO_HOST: ${KOMODO_HOST} - KOMODO_TITLE: ${KOMODO_TITLE} - KOMODO_ENSURE_SERVER: http://periphery:8120 - ## Mongo - KOMODO_MONGO_URI: mongodb://${DB_USERNAME}:${DB_PASSWORD}@ferretdb:27017/komodo?authMechanism=PLAIN - ## Secrets - KOMODO_PASSKEY: ${KOMODO_PASSKEY} - KOMODO_WEBHOOK_SECRET: ${KOMODO_WEBHOOK_SECRET} - KOMODO_JWT_SECRET: ${KOMODO_JWT_SECRET} - ## Auth - KOMODO_LOCAL_AUTH: ${KOMODO_LOCAL_AUTH} - KOMODO_DISABLE_USER_REGISTRATION: ${KOMODO_DISABLE_USER_REGISTRATION} - ## Github Oauth - KOMODO_GITHUB_OAUTH_ENABLED: ${KOMODO_GITHUB_OAUTH_ENABLED} - KOMODO_GITHUB_OAUTH_ID: ${KOMODO_GITHUB_OAUTH_ID} - KOMODO_GITHUB_OAUTH_SECRET: ${KOMODO_GITHUB_OAUTH_SECRET} - ## Google Oauth - KOMODO_GOOGLE_OAUTH_ENABLED: ${KOMODO_GOOGLE_OAUTH_ENABLED} - KOMODO_GOOGLE_OAUTH_ID: ${KOMODO_GOOGLE_OAUTH_ID} - KOMODO_GOOGLE_OAUTH_SECRET: ${KOMODO_GOOGLE_OAUTH_SECRET} - ## Aws - KOMODO_AWS_ACCESS_KEY_ID: ${KOMODO_AWS_ACCESS_KEY_ID} - KOMODO_AWS_SECRET_ACCESS_KEY: ${KOMODO_AWS_SECRET_ACCESS_KEY} - ## Hetzner - KOMODO_HETZNER_TOKEN: ${KOMODO_HETZNER_TOKEN} + ## Optionally mount a custom core.config.toml + # volumes: + # - /path/to/core.config.toml:/config/config.toml ## Deploy Periphery container using this block, - ## or deploy the Periphery binary with systemd using https://github.com/mbecker20/komodo/tree/main/scripts + ## or deploy the Periphery binary with systemd using + ## https://github.com/mbecker20/komodo/tree/main/scripts periphery: - image: ghcr.io/mbecker20/periphery:latest - # image: ghcr.io/mbecker20/periphery:latest-aarch64 # Use for arm support + image: ghcr.io/mbecker20/periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest} + restart: unless-stopped logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default + env_file: ./compose.env volumes: + ## Mount external docker socket - /var/run/docker.sock:/var/run/docker.sock - - repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. - - stacks:/etc/komodo/stacks # manage stack files in a docker volume, or change it to an accessible host directory. - environment: - # If the disk size is overreporting, can use one of these to - # whitelist / blacklist the disks to filter them, whichever is easier. - # Accepts comma separated list of paths. - # Usually whitelisting just /etc/hostname gives correct size. - PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/hostname - # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap,/etc/repos + ## Allow Periphery to see processes outside of container + - /proc:/proc + ## use self signed certs in docker volume, + ## or mount your own signed certs. + - ssl-certs:/etc/komodo/ssl + ## manage repos in a docker volume, + ## or change it to an accessible host directory. + - repos:/etc/komodo/repos + ## manage stack files in a docker volume, + ## or change it to an accessible host directory. + - stacks:/etc/komodo/stacks + ## Optionally mount a path to store compose files + # - /path/to/compose:/host/compose volumes: pg-data: + ssl-certs: repos: stacks: networks: - default: {} - database: {} \ No newline at end of file + default: {} \ No newline at end of file diff --git a/compose/sqlite.compose.yaml b/compose/sqlite.compose.yaml index 880a0b5c8..6adbaa157 100644 --- a/compose/sqlite.compose.yaml +++ b/compose/sqlite.compose.yaml @@ -1,6 +1,6 @@ -########################### -# KOMODO COMPOSE - SQLITE # -########################### +################################ +# 🦎 KOMODO COMPOSE - SQLITE 🦎 # +################################ ## This compose file will deploy: ## 1. Sqlite + FerretDB Mongo adapter @@ -12,10 +12,10 @@ services: image: ghcr.io/ferretdb/ferretdb restart: unless-stopped logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default - # ports: # Port closed due to lack of auth. + # ports: # - 27017:27017 volumes: - sqlite-data:/state @@ -23,71 +23,58 @@ services: - FERRETDB_HANDLER=sqlite core: - image: ghcr.io/mbecker20/komodo:latest - # image: ghcr.io/mbecker20/komodo:latest-aarch64 ## Use for arm support + image: ghcr.io/mbecker20/komodo:${COMPOSE_KOMODO_IMAGE_TAG:-latest} restart: unless-stopped depends_on: - ferretdb logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default ports: - 9120:9120 - # volumes: # Optionally mount a custom core.config.toml - # - /path/to/core.config.toml:/config/config.toml - # extra_hosts: # allows for systemd Periphery connection at "http://host.docker.internal:8120" + env_file: ./compose.env + environment: + KOMODO_DATABASE_ADDRESS: ferretdb + ## allows for systemd Periphery connection at + ## "http://host.docker.internal:8120" + # extra_hosts: # - host.docker.internal:host-gateway - environment: # https://github.com/mbecker20/komodo/blob/main/config/core.config.toml - KOMODO_HOST: ${KOMODO_HOST} - KOMODO_TITLE: ${KOMODO_TITLE} - KOMODO_ENSURE_SERVER: http://periphery:8120 - ## Mongo - KOMODO_MONGO_ADDRESS: ferretdb:27017 - ## Secrets - KOMODO_PASSKEY: ${KOMODO_PASSKEY} - KOMODO_WEBHOOK_SECRET: ${KOMODO_WEBHOOK_SECRET} - KOMODO_JWT_SECRET: ${KOMODO_JWT_SECRET} - ## Auth - KOMODO_LOCAL_AUTH: ${KOMODO_LOCAL_AUTH} - KOMODO_DISABLE_USER_REGISTRATION: ${KOMODO_DISABLE_USER_REGISTRATION} - ## Github Oauth - KOMODO_GITHUB_OAUTH_ENABLED: ${KOMODO_GITHUB_OAUTH_ENABLED} - KOMODO_GITHUB_OAUTH_ID: ${KOMODO_GITHUB_OAUTH_ID} - KOMODO_GITHUB_OAUTH_SECRET: ${KOMODO_GITHUB_OAUTH_SECRET} - ## Google Oauth - KOMODO_GOOGLE_OAUTH_ENABLED: ${KOMODO_GOOGLE_OAUTH_ENABLED} - KOMODO_GOOGLE_OAUTH_ID: ${KOMODO_GOOGLE_OAUTH_ID} - KOMODO_GOOGLE_OAUTH_SECRET: ${KOMODO_GOOGLE_OAUTH_SECRET} - ## Aws - KOMODO_AWS_ACCESS_KEY_ID: ${KOMODO_AWS_ACCESS_KEY_ID} - KOMODO_AWS_SECRET_ACCESS_KEY: ${KOMODO_AWS_SECRET_ACCESS_KEY} - ## Hetzner - KOMODO_HETZNER_TOKEN: ${KOMODO_HETZNER_TOKEN} + ## Optionally mount a custom core.config.toml + # volumes: + # - /path/to/core.config.toml:/config/config.toml ## Deploy Periphery container using this block, - ## or deploy the Periphery binary with systemd using https://github.com/mbecker20/komodo/tree/main/scripts + ## or deploy the Periphery binary with systemd using + ## https://github.com/mbecker20/komodo/tree/main/scripts periphery: - image: ghcr.io/mbecker20/periphery:latest - # image: ghcr.io/mbecker20/periphery:latest-aarch64 # Use for arm support + image: ghcr.io/mbecker20/periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest} + restart: unless-stopped logging: - driver: local + driver: ${COMPOSE_LOGGING_DRIVER:-local} networks: - default + env_file: ./compose.env volumes: + ## Mount external docker socket - /var/run/docker.sock:/var/run/docker.sock - - repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. - - stacks:/etc/komodo/stacks # manage stack files in a docker volume, or change it to an accessible host directory. - environment: - # If the disk size is overreporting, can use one of these to - # whitelist / blacklist the disks to filter them, whichever is easier. - # Accepts comma separated list of paths. - # Usually whitelisting just /etc/hostname gives correct size. - PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/hostname - # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap,/etc/repos + ## Allow Periphery to see processes outside of container + - /proc:/proc + ## use self signed certs in docker volume, + ## or mount your own signed certs. + - ssl-certs:/etc/komodo/ssl + ## manage repos in a docker volume, + ## or change it to an accessible host directory. + - repos:/etc/komodo/repos + ## manage stack files in a docker volume, + ## or change it to an accessible host directory. + - stacks:/etc/komodo/stacks + ## Optionally mount a path to store compose files + # - /path/to/compose:/host/compose volumes: sqlite-data: + ssl-certs: repos: stacks: diff --git a/config/core.config.toml b/config/core.config.toml index db740ce4b..7bd8d2a7f 100644 --- a/config/core.config.toml +++ b/config/core.config.toml @@ -1,87 +1,69 @@ -###################### -# KOMODO CORE CONFIG # -##################### +########################### +# 🦎 KOMODO CORE CONFIG 🦎 # +########################### ## This is the offical "Default" config file for Komodo Core. ## It serves as documentation for the meaning of the fields. ## It is located at `https://github.com/mbecker20/komodo/blob/main/config/core.config.toml`. +## All fields with a "Default" provided are optional. If they are +## left out of the file, the "Default" value will be used. + ## This file is bundled into the official image, `ghcr.io/mbecker20/komodo`, ## as the default config at `/config/config.toml`. ## Komodo can start with no external config file mounted. ## There is usually no need to create this file on your host. ## Most fields can instead be configured using environment variables. +## Environment variables will override values set in this file. -## This will be the document title on the web page (shows up as text in the browser tab). +## This will be the document title on the web page. ## Env: KOMODO_TITLE ## Default: 'Komodo' -# title = "Komodo-02" +title = "Komodo" ## This should be the url used to access Komodo in browser, potentially behind DNS. ## Eg https://komodo.example.com or http://12.34.56.78:9120. This should match the address configured in your Oauth app. ## Env: KOMODO_HOST -## Required to start Komodo, no default. -host = "https://komodo.example.com" +## Required, no default. +host = "https://demo.komo.do" ## The port the core system will run on. ## Env: KOMODO_PORT ## Default: 9120 -# port = 9121 +port = 9120 ## This is the token used to authenticate core requests to periphery. ## Ensure this matches a passkey in the connected periphery configs. ## If the periphery servers don't have passkeys configured, this doesn't need to be changed. -## Env: KOMODO_PASSKEY -## Required to start Komodo, no default +## Env: KOMODO_PASSKEY or KOMODO_PASSKEY_FILE +## Required, no default passkey = "a_random_passkey" ## Ensure a server with this address exists on Core -## upon first startup. Used with AIO compose. +## upon first startup. Example: `https://periphery:8120` +## Env: KOMODO_FIRST_SERVER ## Optional, no default. -## Env: KOMODO_ENSURE_SERVER -# ensure_server = "http://komodo-periphery:8120" +first_server = "" ## Disables write support on resources in the UI. ## This protects users that that would normally have write priviledges during their UI usage, ## when they intend to fully rely on ResourceSyncs to manage config. ## Env: KOMODO_UI_WRITE_DISABLED ## Default: false -# ui_write_disabled = true +ui_write_disabled = false -############ -# DATABASE # -############ +## Disables the confirm dialogs on all actions. All buttons will now be double-click. +## Useful when only having http connection to core, as UI quick-copy button won't work. +## Env: KOMODO_DISABLE_CONFIRM_DIALOG +## Default: false +disable_confirm_dialog = false -## Configure the database connection in one of the following ways: - -## Pass a full Mongo URI. Suitable for Mongo Atlas. -## Env: KOMODO_MONGO_URI -# mongo.uri = "mongodb://username:password@localhost:27017" - -## ==== * OR * ==== ## - -# Construct the address as mongodb://{username}:{password}@{address} -## Env: KOMODO_MONGO_ADDRESS -mongo.address = "localhost:27017" -## Env: KOMODO_MONGO_USERNAME -# mongo.username = "admin" -## Env: KOMODO_MONGO_PASSWORD -# mongo.password = "admin" - -## ==== other ==== - -## Komodo will create its collections under this database name. -## The only reason to change this is if multiple Komodo Cores share the same db. -## Env: KOMODO_MONGO_DB_NAME -## Default: komodo. -# mongo.db_name = "komodo" - -## This is the assigned app_name of the mongo client. -## The only reason to change this is if multiple Komodo Cores share the same db. -## Env: KOMODO_MONGO_APP_NAME -## Default: komodo_core. -# mongo.app_name = "komodo_core_01" +## Configure the repo directory (inside the container). +## There shouldn't be a need to change this, just mount a volume. +## Env: KOMODO_REPO_DIRECTORY +## Default: /repo-cache +repo_directory = "/repo-cache" ################ # AUTH / LOGIN # @@ -91,45 +73,97 @@ mongo.address = "localhost:27017" ## The password will be hashed and stored in the db for login comparison. ## ## NOTE: -## Komodo has no API to recover account logins, but if this happens you can doctor the db using Mongo Compass. +## Komodo has no API to recover account logins, but if this happens you can doctor the database using Mongo Compass. ## Create a new user, login to the database with Compass, note down your old users username and _id. ## Then delete the old user, and update the new user to have the same username and _id. ## Make sure to set `enabled: true` and maybe `admin: true` on the new user as well, while using Compass. ## ## Env: KOMODO_LOCAL_AUTH ## Default: false -# local_auth = true +local_auth = false -## Allows all users to have Read level access to all resources. -## Env: KOMODO_TRANSPARENT_MODE +## Normally new users will be registered, but not enabled until an Admin enables them. +## With `disable_user_registration = true`, only the first user to log in will registered as a user. +## Env: KOMODO_DISABLE_USER_REGISTRATION ## Default: false -# transparent_mode = true +disable_user_registration = false ## New users will be automatically enabled when they sign up. ## Otherwise, new users will be disabled on first login. ## The first user to login will always be enabled on creation. ## Env: KOMODO_ENABLE_NEW_USERS ## Default: false -# enable_new_users = true +enable_new_users = false -## Normally new users will be registered, but not enabled until an Admin enables them. -## With `disable_user_registration = true`, only the first user to log in will registered as a user. -## Env: KOMODO_DISABLE_USER_REGISTRATION +## Allows all users to have Read level access to all resources. +## Env: KOMODO_TRANSPARENT_MODE ## Default: false -# disable_user_registration = true +transparent_mode = false + +## Normally all enabled users can create resources. +## If `disable_non_admin_create = true`, only admin users can create resources. +## Env: KOMODO_DISABLE_NON_ADMIN_CREATE +## Default: false +disable_non_admin_create = false ## Optionally provide a specific jwt secret. ## Passing nothing or an empty string will cause one to be generated on every startup. ## This means users will have to log in again if Komodo restarts. -## Env: KOMODO_JWT_SECRET -# jwt_secret = "your_random_secret" +## Env: KOMODO_JWT_SECRET or KOMODO_JWT_SECRET_FILE +## Default: empty string, meaning a random secret will be generated at startup. +jwt_secret = "" ## Specify how long a user can stay logged in before they have to log in again. ## All jwts are invalidated on application restart unless `jwt_secret` is set. ## Env: KOMODO_JWT_TTL -## Default: 1-day. ## Options: 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day -# jwt_ttl = "3-day" +## Default: 1-day. +jwt_ttl = "1-day" + +############# +# OIDC Auth # +############# + +## Enable logins with configured OIDC provider. +## Env: KOMODO_OIDC_ENABLED +## Default: false +oidc_enabled = false + +## Give the provider address. +## +## The path, ie /application/o/komodo for Authentik, +## is provider and configuration specific. +## +## Note. this address must be reachable from Komodo Core container. +## +## Env: KOMODO_OIDC_PROVIDER +## Optional, no default. +oidc_provider = "https://oidc.provider.internal/application/o/komodo" + +## Configure OIDC user redirect address. +## +## This is the address users are redirected to in their browser, +## and may be different from `oidc_provider` depending on your networking. +## If not provided (or empty string ""), the `oidc_provider` will be used. +## +## Env: KOMODO_OIDC_REDIRECT +## Optional, no default. +oidc_redirect = "" + +## Give the OIDC Client ID. +## Env: KOMODO_OIDC_CLIENT_ID or KOMODO_OIDC_CLIENT_ID_FILE +oidc_client_id = "" + +## Give the OIDC Client Secret. +## Env: KOMODO_OIDC_CLIENT_SECRET or KOMODO_OIDC_CLIENT_SECRET_FILE +oidc_client_secret = "" + +## If true, use the full email for usernames. +## Otherwise, the @address will be stripped, +## making usernames more concise. +## Default: false. +## Env: KOMODO_OIDC_USE_FULL_EMAIL +oidc_use_full_email = false ######### # OAUTH # @@ -139,29 +173,84 @@ mongo.address = "localhost:27017" ## Env: KOMODO_GOOGLE_OAUTH_ENABLED ## Default: false -# google_oauth.enabled = true +google_oauth.enabled = false -## Env: KOMODO_GOOGLE_OAUTH_ID +## Env: KOMODO_GOOGLE_OAUTH_ID or KOMODO_GOOGLE_OAUTH_ID_FILE ## Required if google_oauth is enabled. -# google_oauth.id = "your_google_client_id" +google_oauth.id = "" -## Env: KOMODO_GOOGLE_OAUTH_SECRET +## Env: KOMODO_GOOGLE_OAUTH_SECRET or KOMODO_GOOGLE_OAUTH_SECRET_FILE ## Required if google_oauth is enabled. -# google_oauth.secret = "your_google_client_secret" +google_oauth.secret = "" ## Github ## Env: KOMODO_GITHUB_OAUTH_ENABLED ## Default: false -# github_oauth.enabled = true +github_oauth.enabled = false -## Env: KOMODO_GITHUB_OAUTH_ID +## Env: KOMODO_GITHUB_OAUTH_ID or KOMODO_GITHUB_OAUTH_ID_FILE ## Required if github_oauth is enabled. -# github_oauth.id = "your_github_client_id" +github_oauth.id = "" -## Env: KOMODO_GITHUB_OAUTH_SECRET +## Env: KOMODO_GITHUB_OAUTH_SECRET or KOMODO_GITHUB_OAUTH_SECRET_FILE ## Required if github_oauth is enabled. -# github_oauth.secret = "your_github_client_secret" +github_oauth.secret = "" + +############ +# Security # +############ + +## Enable HTTPS server using the given key and cert. +## Env: KOMODO_SSL_ENABLED +## Default: false +ssl_enabled = false + +## Path to the ssl key. +## Env: KOMODO_SSL_KEY_FILE +## Default: /config/ssl/key.pem +ssl_key_file = "/config/ssl/key.pem" + +## Path to the ssl cert. +## Env: KOMODO_SSL_CERT_FILE +## Default: /config/ssl/cert.pem +ssl_cert_file = "/config/ssl/cert.pem" + +############ +# DATABASE # +############ + +## Configure the database connection in one of the following ways: + +## Pass a full Mongo URI to the database. +## Example: mongodb://username:password@localhost:27017 +## Env: KOMODO_DATABASE_URI or KOMODO_DATABASE_URI_FILE +## Optional, can usually use `address`, `username`, `password` instead. +database.uri = "" + +## ==== * OR * ==== ## + +# Construct the address as mongodb://{username}:{password}@{address} +## Env: KOMODO_DATABASE_ADDRESS +database.address = "localhost:27017" +## Env: KOMODO_DATABASE_USERNAME or KOMODO_DATABASE_USERNAME_FILE +database.username = "" +## Env: KOMODO_DATABASE_PASSWORD or KOMODO_DATABASE_PASSWORD_FILE +database.password = "" + +## ==== other ==== + +## Komodo will create its collections under this database name. +## The only reason to change this is if multiple Komodo Cores share the same db. +## Env: KOMODO_DATABASE_DB_NAME +## Default: komodo. +database.db_name = "komodo" + +## This is the assigned app_name of the mongo client. +## The only reason to change this is if multiple Komodo Cores share the same db. +## Env: KOMODO_DATABASE_APP_NAME +## Default: komodo_core. +database.app_name = "komodo_core" ############ # WEBHOOKS # @@ -169,24 +258,25 @@ mongo.address = "localhost:27017" ## This token must be given to git provider during repo webhook config. ## The secret configured on the git provider side must match the secret configured here. -## Env: KOMODO_WEBHOOK_SECRET -## Default: empty (none) +## If not provided, +## Env: KOMODO_WEBHOOK_SECRET or KOMODO_WEBHOOK_SECRET_FILE +## Optional, no default. webhook_secret = "a_random_webhook_secret" ## An alternate base url that is used to recieve git webhook requests. ## If empty or not specified, will use 'host' address as base. ## This is useful if Komodo is on an internal network, but can have a -## proxy just allowing through the webhook api using NGINX. +## proxy just allowing through the webhook listener api using NGINX. ## Env: KOMODO_WEBHOOK_BASE_URL ## Default: empty (none) -# webhook_base_url = "https://git-webhook.komo.do" +webhook_base_url = "" ## Configure Github webhook app. Enables webhook management apis. ## -## Env: KOMODO_GITHUB_WEBHOOK_APP_APP_ID +## Env: KOMODO_GITHUB_WEBHOOK_APP_APP_ID or KOMODO_GITHUB_WEBHOOK_APP_APP_ID_FILE # github_webhook_app.app_id = 1234455 # Find on the app page. ## Env: -## - KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS +## - KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS_FILE ## - KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES # github_webhook_app.installations = [ # ## Find the id after installing the app to user / organization. "namespace" is the username / organization name. @@ -208,23 +298,24 @@ webhook_secret = "a_random_webhook_secret" ## Env: KOMODO_LOGGING_LEVEL ## Options: off, error, warn, info, debug, trace ## Default: info -# logging.level = "info" +logging.level = "info" ## Specify the logging format for stdout / stderr. ## Env: KOMODO_LOGGING_STDIO ## Options: standard, json, none ## Default: standard -# logging.stdio = "standard" +logging.stdio = "standard" ## Optionally specify a opentelemetry otlp endpoint to send traces to. +## Example: http://localhost:4317 ## Env: KOMODO_LOGGING_OTLP_ENDPOINT -# logging.otlp_endpoint = "http://localhost:4317" +logging.otlp_endpoint = "" ## Set the opentelemetry service name. ## This will be attached to the telemetry Komodo will send. ## Env: KOMODO_LOGGING_OPENTELEMETRY_SERVICE_NAME ## Default: "Komodo" -# logging.opentelemetry_service_name = "Komodo-01" +logging.opentelemetry_service_name = "Komodo" ########### # PRUNING # @@ -234,69 +325,52 @@ webhook_secret = "a_random_webhook_secret" ## Stats older that are than this number of days are deleted on a daily cycle. ## Env: KOMODO_KEEP_STATS_FOR_DAYS ## Default: 14 -# keep_stats_for_days = 14 +keep_stats_for_days = 14 ## The number of days to keep alerts around, or 0 to disable pruning. ## Alerts older that are than this number of days are deleted on a daily cycle. ## Env: KOMODO_KEEP_ALERTS_FOR_DAYS ## Default: 14 -# keep_alerts_for_days = 14 +keep_alerts_for_days = 14 ################## # POLL INTERVALS # ################## -## Interval at which to poll Stacks for any updates / automated actions. -## Env: KOMODO_STACK_POLL_INTERVAL -## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. -## Default: `5-min`. -# stack_poll_interval = "1-min" - -## Interval at which to poll Syncs for any updates / automated actions. -## Env: KOMODO_SYNC_POLL_INTERVAL -## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. -## Default: `5-min`. -# sync_poll_interval = "1-min" - -## Interval at which to poll Builds (latest commit hash) for any updates / automated actions. -## Env: KOMODO_STACK_POLL_INTERVAL -## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. -## Default: `5-min`. -# build_poll_interval = "1-min" - -## Interval at which to poll Repos (latest commit hash) for any updates / automated actions. -## Env: KOMODO_REPO_POLL_INTERVAL -## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. -## Default: `5-min`. -# repo_poll_interval = "1-min" - ## Controls the rate at which servers are polled for health, system stats, and container status. ## This affects network usage, and the size of the stats stored in mongo. +## Env: KOMODO_MONITORING_INTERVAL +## Options: 1-sec, 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min ## Default: 15-sec -## Options: 5-sec, 15-sec, 30-sec, 1-min, 2-min, 5-min, 15-min -# monitoring_interval = "5-sec" +monitoring_interval = "15-sec" + +## Interval at which to poll Resources for any updates / automated actions. +## Env: KOMODO_RESOURCE_POLL_INTERVAL +## Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`. +## Default: 5-min +resource_poll_interval = "5-min" ################### # CLOUD PROVIDERS # ################### -## Komodo can build images on purpose deployed AWS EC2 instances, -## and afterwards destroying the instance. +## Komodo can build images by deploying AWS EC2 instances, +## running the build, and afterwards destroying the instance. ## Additionally, Komodo can deploy cloud VPS on AWS EC2 and Hetzner. ## Use the Template resource to configure launch preferences. ## Hetzner is not supported for builds as their pricing model is by the hour, ## while AWS is by the minute. This is very important for builds. -## Provide aws api keys for ephemeral builders / server launch -## Env: KOMODO_AWS_ACCESS_KEY_ID -# aws.access_key_id = "your_aws_key_id" -## Env: KOMODO_AWS_SECRET_ACCESS_KEY -# aws.secret_access_key = "your_aws_secret_key" +## Provide AWS api keys for ephemeral builders / server launch +## Env: KOMODO_AWS_ACCESS_KEY_ID or KOMODO_AWS_ACCESS_KEY_ID_FILE +aws.access_key_id = "" +## Env: KOMODO_AWS_SECRET_ACCESS_KEY or KOMODO_AWS_SECRET_ACCESS_KEY_FILE +aws.secret_access_key = "" -## Provide hetzner api token for server launch -## Env: KOMODO_HETZNER_TOKEN -# hetzner.token = "your_hetzner_token" +## Provide Hetzner api token for server launch +## Env: KOMODO_HETZNER_TOKEN or KOMODO_HETZNER_TOKEN_FILE +hetzner.token = "" ################# # GIT PROVIDERS # @@ -350,32 +424,15 @@ webhook_secret = "a_random_webhook_secret" # ] # organizations = ["Mogh"] # These become available in the UI -## Configure AWS ECR registries. -## Ecr is a special case of registry, as using it is pretty different than others. -## You can configure multiple of these with different "labels", and select -## then by label in the UI. - -# [aws_ecr_registry.label_1] -# region = "us-east-1" -# account_id = "1234455" -# access_key_id = "your_aws_key_id_1" -# secret_access_key = "your_aws_secret_key_1" - -# [aws_ecr_registry.label_2] -# region = "us-west-1" -# account_id = "1234455" -# access_key_id = "your_aws_key_id_2" -# secret_access_key = "your_aws_secret_key_2" - ########### # SECRETS # ########### -## Provide core-based secrets. +## Provide Core based secrets. ## These will be available to interpolate into your Deployment / Stack environments, ## and will be hidden in the UI and logs. -## These are available to use on any periphery (Server), -## but you can also limit access more by placing them in a single peripheries config instead. +## These are available to use on any Periphery (Server), +## but you can also limit access more by placing them in a single Periphery's config file instead. ## These cannot be configured on the environment. # [secrets] diff --git a/config/periphery.config.toml b/config/periphery.config.toml index c22a54e03..e03a341eb 100644 --- a/config/periphery.config.toml +++ b/config/periphery.config.toml @@ -1,71 +1,125 @@ -########################### -# KOMODO PERIPHERY CONFIG # -########################### +################################ +# 🦎 KOMODO PERIPHERY CONFIG 🦎 # +################################ -## Optional. The port the server runs on. 8120 is default +## This is the offical "Default" config file for Komodo Periphery. +## It serves as documentation for the meaning of the fields. +## It is located at `https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml`. + +## All fields with a "Default" provided are optional. If they are +## left out of the file, the "Default" value will be used. + +## If Periphery was installed on the host (systemd install script), this +## file will be located either in `/etc/komodo/periphery.config.toml`, +## or for user installs, `$HOME/.config/komodo/periphery.config.toml`. + +## Optional. The port the server runs on. ## Env: PERIPHERY_PORT -# port = 8120 +## Default: 8120 +port = 8120 -## Optional. /etc/komodo/repos is default. ## The directory periphery will use to manage repos. ## The periphery user must have write access to this directory. ## Env: PERIPHERY_REPO_DIR -# repo_dir = "/home/ubuntu/komodo/repos" +## Default: /etc/komodo/repos +repo_dir = "/etc/komodo/repos" -## Optional. /etc/komodo/stacks is default. ## The directory periphery will use to manage stacks. ## The periphery user must have write access to this directory. ## Env: PERIPHERY_STACK_DIR -# stack_dir = "/home/ubuntu/komodo/stacks" +## Default: /etc/komodo/stacks +stack_dir = "/etc/komodo/stacks" -## Optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded +## How often Periphery polls the host for system stats, +## like CPU / memory usage. ## Env: PERIPHERY_STATS_POLLING_RATE -# stats_polling_rate = "1-sec" +## Options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min +## Default: 5-sec +stats_polling_rate = "5-sec" ## Whether stack actions should use `docker-compose ...` ## instead of `docker compose ...`. -## default: false ## Env: PERIPHERY_LEGACY_COMPOSE_CLI -# legacy_compose_cli = true +## Default: false +legacy_compose_cli = false -## Optional. Only include mounts at specific paths in the disc report. +## Optional. Only include mounts at specific paths in the disk report. ## Env: PERIPHERY_INCLUDE_DISK_MOUNTS -# include_disk_mounts = ["/etc/komodo/repos"] +## Default: empty, which won't filter down the disks. +include_disk_mounts = [] ## Optional. Don't include these mounts in the disk report. ## Env: PERIPHERY_EXCLUDE_DISK_MOUNTS -# exclude_disk_mounts = ["/etc/komodo/repos"] +## Default: empty, which won't exclude any disks. +exclude_disk_mounts = [] ######## # AUTH # ######## ## Optional. Limit the ip addresses which can call the periphery api. -## Default is empty, which will not block any request by ip. ## Env: PERIPHERY_ALLOWED_IPS -# allowed_ips = ["127.0.0.1"] +## Default: empty, which will not block any request by ip. +allowed_ips = [] ## Optional. Require callers to provide on of the provided passkeys to access the periphery api. -## Default is empty, which will not require any passkey to be passed by core. -## Env: PERIPHERY_PASSKEYS -# passkeys = ["abcdefghijk"] +## Env: PERIPHERY_PASSKEYS or PERIPHERY_PASSKEYS_FILE +## Default: empty, which will not require any passkey to be passed by core. +passkeys = [] +############ +# Security # +############ +## Enable HTTPS server using the given key and cert. +## If true and a key / cert at the given paths are not found, +## self signed keys will be generated using openssl. +## Env: PERIPHERY_SSL_ENABLED +## Default: false (will change to `true` in later release) +ssl_enabled = true + +## Path to the ssl key. +## Env: PERIPHERY_SSL_KEY_FILE +## Default: /etc/komodo/ssl/key.pem +ssl_key_file = "/etc/komodo/ssl/key.pem" + +## Path to the ssl cert. +## Env: PERIPHERY_SSL_CERT_FILE +## Default: /etc/komodo/ssl/cert.pem +ssl_cert_file = "/etc/komodo/ssl/cert.pem" ########### -# SECRETS # +# LOGGING # ########### -## provide periphery-based secrets -# [secrets] -# SECRET_1 = "value_1" -# SECRET_2 = "value_2" +## Specify the logging verbosity +## Options: off, error, warn, info, debug, trace +## Default: info +## Env: PERIPHERY_LOGGING_LEVEL +logging.level = "info" + +## Specify the logging format for stdout / stderr. +## Env: PERIPHERY_LOGGING_STDIO +## Options: standard, json, none +## Default: standard +logging.stdio = "standard" + +## Specify a opentelemetry otlp endpoint to send traces to. +## Example: http://localhost:4317. +## Env: PERIPHERY_LOGGING_OTLP_ENDPOINT +## Optional, no default +logging.otlp_endpoint = "" + +## Set the opentelemetry service name attached to the telemetry Periphery will send. +## Env: PERIPHERY_LOGGING_OPENTELEMETRY_SERVICE_NAME +## Default: "Komodo" +logging.opentelemetry_service_name = "Periphery" ################# # GIT PROVIDERS # ################# -## configure periphery-based git providers +## configure Periphery based git providers # [[git_provider]] # domain = "github.com" # accounts = [ @@ -90,7 +144,7 @@ # REGISTRY PROVIDERS # ###################### -## configure periphery-based docker registries +## Configure Periphery based docker registries # [[docker_registry]] # domain = "docker.io" # accounts = [ @@ -106,27 +160,10 @@ # organizations = ["Mogh"] # These become available in the UI ########### -# LOGGING # +# SECRETS # ########### -## Specify the logging verbosity -## Default: info -## Options: off, error, warn, info, debug, trace -## Env: PERIPHERY_LOGGING_LEVEL -# logging.level = "debug" - -## Specify the logging format for stdout / stderr. -## Default: standard -## Options: standard, json, none -## Env: PERIPHERY_LOGGING_STDIO -# logging.stdio = "json" - -## Specify a opentelemetry otlp endpoint to send traces to -## Optional, default unassigned -## Env: PERIPHERY_LOGGING_OTLP_ENDPOINT -# logging.otlp_endpoint = "http://localhost:4317" - -## Set the opentelemetry service name attached to the telemetry this periphery will send. -## Default: "Komodo" -## Env: PERIPHERY_LOGGING_OPENTELEMETRY_SERVICE_NAME -# logging.opentelemetry_service_name = "Periphery-02" \ No newline at end of file +## Provide periphery-based secrets +# [secrets] +# SECRET_1 = "value_1" +# SECRET_2 = "value_2" \ No newline at end of file diff --git a/docsite/docs/docker-compose.md b/docsite/docs/docker-compose.md index fa8d0511e..b94b7e1e4 100644 --- a/docsite/docs/docker-compose.md +++ b/docsite/docs/docker-compose.md @@ -6,16 +6,14 @@ Komodo can deploy docker compose projects through the `Stack` resource. Komodo supports 3 ways of defining the compose files: 1. **Write them in the UI**, and Komodo will write them to your host at deploy-time. - 2. **Store them in a git repo**, and have Komodo clone it on the host to deploy. - 3. **Store the files anywhere on the host**, and Komodo will just run the compose commands on the existing files. - -The recommended way to deploy Stacks is using compose files located in a git repo. + 2. **Store the files anywhere on the host**, and Komodo will just run the compose commands on the existing files. + 3. **Store them in a git repo**, and have Komodo clone it on the host to deploy. If you manage your compose files in git repos: - All your files, across all servers, are available locally to edit in your favorite text editor. - All of your changes are tracked, and can be reverted. -- You can use the git webhooks to do other automations when you change the compose file contents. Redeploying will be as easy as just `git push`. +- You can use the git webhooks to do other automations when you change the compose file contents. Redeploying will be as easy as `git push`. :::info Many Komodo resources need access to git repos. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials. diff --git a/docsite/docs/setup/connect-servers.mdx b/docsite/docs/setup/connect-servers.mdx index f39c1e8ee..a49a2d882 100644 --- a/docsite/docs/setup/connect-servers.mdx +++ b/docsite/docs/setup/connect-servers.mdx @@ -14,8 +14,8 @@ You can install Periphery as a systemd managed process, run it as a [docker cont :::warning Allowing unintended access to the Periphery agent API is a security risk. Ensure to take appropriate measures to block access to the Periphery API, such as firewall rules on port `8120`. -Additionally, you can whitelist your Komodo Core IP address in the [Periphery config](https://github.com/mbecker20/komodo/blob/2463ed3879ee56821f99d1f09581d659ee5d0575/config_example/periphery.config.example.toml#L46), -and configure it to [only accept requests matching including your Core passkey](https://github.com/mbecker20/komodo/blob/2463ed3879ee56821f99d1f09581d659ee5d0575/config_example/periphery.config.example.toml#L51). +Additionally, you can whitelist your Komodo Core IP address in the [Periphery config](https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml#L46), +and configure it to [only accept requests matching including your Core passkey](https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml#L51). ::: ### Install the Periphery agent - systemd @@ -50,18 +50,34 @@ services: ports: - 8120:8120 volumes: + ## Mount external docker socket - /var/run/docker.sock:/var/run/docker.sock - - repos:/etc/komodo/repos # manage repos in a docker volume, or change it to an accessible host directory. - - stacks:/etc/komodo/stacks # manage stacks in a docker volume, or change it to an accessible host directory. + ## Allow Periphery to see processes outside of container + - /proc:/proc + ## use self signed certs in docker volume, + ## or mount your own signed certs. + - ssl-certs:/etc/komodo/ssl + ## manage repos in a docker volume, + ## or change it to an accessible host directory. + - repos:/etc/komodo/repos + ## manage stack files in a docker volume, + ## or change it to an accessible host directory. + - stacks:/etc/komodo/stacks + ## Optionally mount a path to store compose files + # - /path/to/compose:/host/compose environment: - # If the disk size is overreporting, can use one of these to - # whitelist / blacklist the disks to filter them, whichever is easier. - # Accepts comma separated list of paths. - # Usually whitelisting just /etc/hostname gives correct size. + ## Full list: `https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml` + ## Enable HTTPS server + PERIPHERY_SSL_ENABLED: true + ## If the disk size is overreporting, can use one of these to + ## whitelist / blacklist the disks to filter them, whichever is easier. + ## Accepts comma separated list of paths. + ## Usually whitelisting just /etc/hostname gives correct size. PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/hostname # PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap,/etc/repos volumes: + ssl-certs: repos: stacks: ``` @@ -70,7 +86,7 @@ volumes: 1. Download the periphery binary from the latest [release](https://github.com/mbecker20/komodo/releases). -2. Create and edit your config files, following the [config example](https://github.com/mbecker20/komodo/blob/main/config_example/periphery.config.example.toml). +2. Create and edit your config files, following the [config example](https://github.com/mbecker20/komodo/blob/main/config/periphery.config.toml). :::note See the [periphery config docs](https://docs.rs/komodo_client/latest/komodo_client/entities/config/periphery/index.html) @@ -113,3 +129,20 @@ There are two ways to merge config files. The default behavior is to completely For example, with `--merge-nested-config true` you can specify an allowed ip in the base config, and another in the overide config, they will both be present in the final config. Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config. + +## Configuration + +Quick download to `./komodo/periphery.config.toml`: +```bash +wget -P komodo https://raw.githubusercontent.com/mbecker20/komodo/main/config/periphery.config.toml +``` + +```mdx-code-block +import RemoteCodeFile from "@site/src/components/RemoteCodeFile"; + + +``` diff --git a/frontend/package.json b/frontend/package.json index e077d6b70..ef5aab53c 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -11,6 +11,7 @@ "build-client": "cd ../client/core/ts && yarn && yarn build && yarn link" }, "dependencies": { + "@monaco-editor/react": "^4.6.0", "@radix-ui/react-checkbox": "^1.1.1", "@radix-ui/react-dialog": "^1.1.1", "@radix-ui/react-dropdown-menu": "^2.1.1", @@ -36,6 +37,7 @@ "jotai": "2.9.2", "lightweight-charts": "4.2.0", "lucide-react": "0.437.0", + "monaco-editor": "^0.52.0", "react": "18.3.1", "react-dom": "18.3.1", "react-minimal-pie-chart": "8.4.0", diff --git a/frontend/src/components/alert/details.tsx b/frontend/src/components/alert/details.tsx index 3a886f9f2..95ead745b 100644 --- a/frontend/src/components/alert/details.tsx +++ b/frontend/src/components/alert/details.tsx @@ -2,7 +2,12 @@ import { ResourceLink } from "@components/resources/common"; import { useRead } from "@lib/hooks"; import { UsableResource } from "@types"; import { Button } from "@ui/button"; -import { Dialog, DialogContent, DialogHeader, DialogTrigger } from "@ui/dialog"; +import { + Dialog, + DialogContent, + DialogHeader, + DialogTrigger, +} from "@ui/dialog"; import { useState } from "react"; import { AlertLevel } from "."; import { fmt_date_with_minutes } from "@lib/formatting"; @@ -11,6 +16,8 @@ import { alert_level_intention, text_color_class_by_intention, } from "@lib/color"; +import { MonacoEditor } from "@components/monaco"; +import { Types } from "@komodo/client"; export const AlertDetailsDialog = ({ id }: { id: string }) => { const [open, set] = useState(false); @@ -22,25 +29,39 @@ export const AlertDetailsDialog = ({ id }: { id: string }) => { Details - + set(false)} /> + + ); +}; + +export const AlertDetailsDialogContent = ({ + alert, + onClose, +}: { + alert: Types.Alert | undefined; + onClose: () => void; +}) => ( + <> + {alert && ( + {alert && ( <> - + {alert && ( - <> +
set(false)} + onClick={onClose} />
{fmt_date_with_minutes(new Date(alert.ts))}
- +
)}
-
+
{/** Alert type */}
@@ -69,12 +90,16 @@ export const AlertDetailsDialog = ({ id }: { id: string }) => {
{/** Alert data */} -
{JSON.stringify(alert.data.data, undefined, 2)}
+
)} - - ); -}; + )} + +); diff --git a/frontend/src/components/alert/table.tsx b/frontend/src/components/alert/table.tsx index bc486c674..f3e26d236 100644 --- a/frontend/src/components/alert/table.tsx +++ b/frontend/src/components/alert/table.tsx @@ -4,9 +4,10 @@ import { AlertLevel } from "."; import { AlertDetailsDialog } from "./details"; import { UsableResource } from "@types"; import { ResourceLink } from "@components/resources/common"; -import { bg_color_class_by_intention } from "@lib/color"; -import { Card, CardHeader } from "@ui/card"; -import { cn } from "@lib/utils"; +import { + alert_level_intention, + text_color_class_by_intention, +} from "@lib/color"; export const AlertsTable = ({ alerts, @@ -37,15 +38,16 @@ export const AlertsTable = ({ showResolved && { header: "Status", cell: ({ row }) => { - const color = bg_color_class_by_intention( - row.original.resolved ? "Good" : "Critical" - ); return ( - - - {row.original.resolved ? "Resolved" : "Open"} - - +
+ {row.original.resolved ? "RESOLVED" : "OPEN"} +
); }, }, diff --git a/frontend/src/components/alert/topbar.tsx b/frontend/src/components/alert/topbar.tsx index 171cf77f3..c97cd093f 100644 --- a/frontend/src/components/alert/topbar.tsx +++ b/frontend/src/components/alert/topbar.tsx @@ -6,19 +6,14 @@ import { DropdownMenuItem, DropdownMenuTrigger, } from "@ui/dropdown-menu"; -import { AlertTriangle, Clock } from "lucide-react"; +import { AlertTriangle } from "lucide-react"; import { AlertLevel } from "."; import { ResourceLink } from "@components/resources/common"; import { UsableResource } from "@types"; -import { - Dialog, - DialogContent, - DialogDescription, - DialogHeader, - DialogTitle, -} from "@ui/dialog"; +import { Dialog } from "@ui/dialog"; import { Types } from "@komodo/client"; import { useState } from "react"; +import { AlertDetailsDialogContent } from "./details"; export const TopbarAlerts = () => { const { data } = useRead( @@ -38,7 +33,7 @@ export const TopbarAlerts = () => { return ( <> - + +
+
+ - - {components[show] && ( -
- {components[show].map( - ({ - label, - labelHidden, - icon, - actions, - description, - hidden, - contentHidden, - components, - }) => - !hidden && ( - - {!labelHidden && ( - -
- - {icon} - {label} - - {description && ( - - - - - - - - {description} - - - )} -
- {actions} -
- )} - {!contentHidden && ( - - set((p) => ({ ...p, ...u }))} - components={components} - disabled={disabled} - /> - - )} -
- ) - )}
- )} +
+
+ {sections.map( + (section) => + components[section] && ( +
+
+ {section &&

{section}

} + +
+ {section && ( +

+ {section} +

+ )} +
+ {components[section].map( + ({ + label, + boldLabel = true, + labelHidden, + icon, + labelExtra, + actions, + description, + hidden, + contentHidden, + components, + }) => + !hidden && ( +
+ {!labelHidden && ( +
+
+
+ {icon} +
+ {label} +
+ {labelExtra} +
+ {description && ( +
+ {description} +
+ )} +
+ {actions} +
+ )} + {!contentHidden && ( + set((p) => ({ ...p, ...u }))} + components={components} + disabled={disabled} + /> + )} +
+ ) + )} +
+
+ ) + )} +
); diff --git a/frontend/src/components/config/util.tsx b/frontend/src/components/config/util.tsx index 61cb37592..42bf88574 100644 --- a/frontend/src/components/config/util.tsx +++ b/frontend/src/components/config/util.tsx @@ -1,5 +1,5 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ -import { useRead } from "@lib/hooks"; +import { useCtrlKeyListener, useRead } from "@lib/hooks"; import { Types } from "@komodo/client"; import { Select, @@ -13,14 +13,14 @@ import { Input } from "@ui/input"; import { Switch } from "@ui/switch"; import { CheckCircle, - Info, MinusCircle, PlusCircle, Save, + Search, SearchX, } from "lucide-react"; -import { ReactNode, RefObject, useState } from "react"; -import { cn, filterBySplit } from "@lib/utils"; +import { ReactNode, useState } from "react"; +import { cn, env_to_text, filterBySplit } from "@lib/utils"; import { Dialog, DialogContent, @@ -30,7 +30,7 @@ import { DialogTrigger, } from "@ui/dialog"; import { snake_case_to_upper_space_case } from "@lib/formatting"; -import { ConfirmButton, TextUpdateMenu } from "@components/util"; +import { ConfirmButton, ShowHideButton } from "@components/util"; import { Popover, PopoverContent, PopoverTrigger } from "@ui/popover"; import { Command, @@ -40,8 +40,13 @@ import { CommandItem, CommandList, } from "@ui/command"; -import { HoverCard, HoverCardContent, HoverCardTrigger } from "@ui/hover-card"; -import { Card } from "@ui/card"; + +import { Card, CardContent, CardHeader } from "@ui/card"; +import { + soft_text_color_class_by_intention, + text_color_class_by_intention, +} from "@lib/color"; +import { MonacoDiffEditor, MonacoEditor } from "@components/monaco"; export const ConfigItem = ({ label, @@ -50,42 +55,33 @@ export const ConfigItem = ({ children, className, }: { - label?: string; + label?: ReactNode; boldLabel?: boolean; description?: ReactNode; children: ReactNode; className?: string; }) => ( - <> -
-
- {label && ( -
- {snake_case_to_upper_space_case(label)} +
+ {(label || description) && ( +
+ {label && typeof label === "string" && ( +
+ {label.split("_").join(" ")}
)} + {label && typeof label !== "string" && label} {description && ( - - - - - - - - {description} - - +
{description}
)}
- {children} -
-
- + )} + {children} +
); export const ConfigInput = ({ @@ -145,7 +141,7 @@ export const ConfigInput = ({ export const ConfigSwitch = ({ label, boldLabel, - value, + value: checked, description, disabled, onChange, @@ -157,8 +153,36 @@ export const ConfigSwitch = ({ disabled: boolean; onChange: (value: boolean) => void; }) => ( - - + +
onChange(!checked)} + > + {/*
+ DISABLED +
*/} + +
+ {checked ? "ENABLED" : "DISABLED"} +
+
); @@ -335,18 +359,23 @@ export const ProviderSelectorConfig = (params: { onSelect: (id: string) => void; https?: boolean; onHttpsSwitch?: () => void; + description?: string; + boldLabel?: boolean; }) => { const select = params.account_type === "git" ? "git provider" : "docker registry"; + const label = + params.account_type === "git" ? "Git Provider" : "Image Registry"; return ( {params.account_type === "git" ? ( -
+
+ )} + {props.values.length > 0 && } + ); }; +export type InputListProps = { + field: keyof T; + values: string[]; + disabled: boolean; + set: (update: Partial) => void; + placeholder?: string; + className?: string; +}; + export const InputList = ({ field, values, @@ -499,15 +531,8 @@ export const InputList = ({ set, placeholder, className, -}: { - field: keyof T; - values: string[]; - disabled: boolean; - set: (update: Partial) => void; - placeholder?: string; - className?: string; -}) => ( -
+}: InputListProps) => ( +
{values.map((arg, i) => (
@@ -539,18 +564,27 @@ export const InputList = ({
); -interface ConfirmUpdateProps { - content: string; +interface ConfirmUpdateProps { + previous: T; + content: Partial; onConfirm: () => void; disabled: boolean; } -export const ConfirmUpdate = ({ +export function ConfirmUpdate({ + previous, content, onConfirm, disabled, -}: ConfirmUpdateProps) => { +}: ConfirmUpdateProps) { const [open, set] = useState(false); + useCtrlKeyListener("s", () => { + if (open) { + onConfirm(); + } else { + set(true); + } + }); return ( @@ -563,13 +597,19 @@ export const ConfirmUpdate = ({ Save - + Confirm Update -
- New configuration to be applied: -
{content}
+
+ {Object.entries(content).map(([key, val], i) => ( + + ))}
); -}; +} +function ConfirmUpdateItem({ + _key, + val: _val, + previous, +}: { + _key: keyof T; + val: T[keyof T]; + previous: T; +}) { + const [show, setShow] = useState(true); + const val = + typeof _val === "string" + ? _key === "environment" || + _key === "build_args" || + _key === "secret_args" + ? _val + .split("\n") + .filter((line) => !line.startsWith("#")) + .map((line) => line.split(" #")[0]) + .join("\n") + : _val + : JSON.stringify(_val, null, 2); + const prev_val = + typeof previous[_key] === "string" + ? previous[_key] + : _key === "environment" || + _key === "build_args" || + _key === "secret_args" + ? env_to_text(previous[_key] as any) ?? "" + : JSON.stringify(previous[_key], null, 2); + const showDiff = + val?.includes("\n") || + prev_val?.includes("\n") || + Math.max(val?.length ?? 0, prev_val?.length ?? 0) > 30; + return ( +
+ + +

+ {snake_case_to_upper_space_case(_key as string)} +

+ +
+ {show && ( + + {showDiff ? ( + + ) : ( +
+                
+                  {prev_val || "None"}
+                {" "}
+                {"->"}{" "}
+                
+                  {val || "None"}
+                
+              
+ )} +
+ )} +
+
+ ); +} export const SystemCommand = ({ value, @@ -596,30 +706,23 @@ export const SystemCommand = ({ set: (value: Types.SystemCommand) => void; }) => { return ( -
-
-
-
Path:
- set({ ...(value || {}), path: e.target.value })} - disabled={disabled} - /> -
-
-
Command:
- set({ ...(value || {}), command })} - triggerClassName="w-[200px] lg:w-[300px] xl:w-[400px]" - disabled={disabled} - /> -
+
+
+
Path:
+ set({ ...(value || {}), path: e.target.value })} + disabled={disabled} + />
+ set({ ...(value || {}), command })} + readOnly={disabled} + />
); }; @@ -639,6 +742,19 @@ export const AddExtraArgMenu = ({ const filtered = filterBySplit(suggestions, search, (item) => item); + if (suggestions.length === 0) { + return ( + + ); + } + return ( @@ -650,7 +766,7 @@ export const AddExtraArgMenu = ({ Add Extra Arg - + void; + registry: Types.ImageRegistryConfig | undefined; + setRegistry: (registry: Types.ImageRegistryConfig) => void; disabled: boolean; // For builds, its builder id. For servers, its server id. resource_id?: string; - registry_types?: Types.ImageRegistry["type"][]; }) => { - const registry = _registry ?? default_registry_config("None"); - // This is the only way to get organizations for now const config_provider = useRead("ListDockerRegistriesFromConfig", { target: resource_id ? { type: "Builder", id: resource_id } : undefined, }).data?.find((provider) => { - if (registry.type === "Standard") { - return provider.domain === registry.params.domain; - } else { - return false; - } + return provider.domain === registry?.domain; }); - if (registry.type === "None") { - return ( - - - - ); - } - if (registry.type === "AwsEcr") { - return ( - -
- - setRegistry({ - type: "AwsEcr", - params: label, - }) - } - disabled={disabled} - /> - -
-
- ); - } - const organizations = config_provider?.organizations ?? []; return ( <>
setRegistry({ ...registry, - params: { ...registry.params, domain }, + domain, }) } showCustom={false} /> -
{organizations.length > 0 && ( @@ -803,84 +863,42 @@ export const ImageRegistryConfig = ({ > setRegistry({ ...registry, - params: { ...registry.params, organization }, + organization, + }) + } + disabled={disabled} + /> + + )} + {registry && ( + + + setRegistry({ + ...registry, + account, }) } disabled={disabled} /> )} - - - setRegistry({ - ...registry, - params: { ...registry.params, account }, - }) - } - disabled={disabled} - /> - ); }; -const REGISTRY_TYPES: Types.ImageRegistry["type"][] = [ - "None", - "Standard", - "AwsEcr", -]; - -const RegistryTypeSelector = ({ - registry, - setRegistry, - registry_types = REGISTRY_TYPES, - disabled, -}: { - registry: Types.ImageRegistry; - setRegistry: (registry: Types.ImageRegistry) => void; - registry_types?: Types.ImageRegistry["type"][]; - disabled: boolean; -}) => { - return ( - - ); -}; - const OrganizationSelector = ({ organizations, selected, @@ -953,32 +971,20 @@ const OrganizationSelector = ({ ); }; -const default_registry_config = ( - type: Types.ImageRegistry["type"] -): Types.ImageRegistry => { - switch (type) { - case "None": - return { type, params: {} }; - case "AwsEcr": - return { type, params: "" }; - case "Standard": - return { - type, - params: { domain: "docker.io", account: "", organization: "" }, - }; - } -}; - export const SecretSelector = ({ keys, onSelect, type, disabled, + align = "start", + side = "right", }: { keys: string[]; onSelect: (key: string) => void; type: "Variable" | "Secret"; disabled: boolean; + align?: "start" | "center" | "end"; + side?: "bottom" | "right"; }) => { const [open, setOpen] = useState(false); const [search, setSearch] = useState(""); @@ -995,11 +1001,15 @@ export const SecretSelector = ({ - + ); }; - -/// Takes in env -export const SecretsForEnvironment = ({ - env, - setEnv, - envRef, -}: { - /// Environment file - env?: string; - setEnv: (env: string) => void; - envRef: RefObject; -}) => { - const variables = useRead("ListVariables", {}).data ?? []; - const secrets = useRead("ListSecrets", {}).data ?? []; - - const _env = env || ""; - - if (variables.length === 0 && secrets.length === 0) return; - - return ( -
- {variables.length > 0 && ( - v.name)} - onSelect={(variable) => - setEnv( - _env.slice(0, envRef.current?.selectionStart) + - `[[${variable}]]` + - _env.slice(envRef.current?.selectionStart, undefined) - ) - } - disabled={false} - /> - )} - {secrets.length > 0 && ( - - setEnv( - _env.slice(0, envRef.current?.selectionStart) + - `[[${secret}]]` + - _env.slice(envRef.current?.selectionStart, undefined) - ) - } - disabled={false} - /> - )} -
- ); -}; diff --git a/frontend/src/components/export.tsx b/frontend/src/components/export.tsx index 09ed8deda..ab009839a 100644 --- a/frontend/src/components/export.tsx +++ b/frontend/src/components/export.tsx @@ -11,6 +11,7 @@ import { import { FileDown, Loader2 } from "lucide-react"; import { useState } from "react"; import { CopyButton } from "./util"; +import { MonacoEditor } from "./monaco"; export const ExportButton = ({ targets, @@ -27,7 +28,7 @@ export const ExportButton = ({ return ( - @@ -89,11 +90,9 @@ const ExportPre = ({ content: string | undefined; }) => { return ( -
+
{loading && } -
-
{content}
-
+
); diff --git a/frontend/src/components/layouts.tsx b/frontend/src/components/layouts.tsx index 813084b16..504bf36fe 100644 --- a/frontend/src/components/layouts.tsx +++ b/frontend/src/components/layouts.tsx @@ -24,6 +24,7 @@ export const Layout = () => { const nav = useNavigate(); useShiftKeyListener("H", () => nav("/")); useShiftKeyListener("G", () => nav("/servers")); + useShiftKeyListener("Z", () => nav("/stacks")); useShiftKeyListener("D", () => nav("/deployments")); useShiftKeyListener("B", () => nav("/builds")); useShiftKeyListener("R", () => nav("/repos")); @@ -32,7 +33,7 @@ export const Layout = () => { return ( <> -
+
@@ -90,7 +91,7 @@ export const Page = ({
-
+
{icon}

{title}

@@ -177,7 +178,7 @@ export const Section = ({ children, itemsCenterTitleRow, }: SectionProps) => ( -
+
- diff --git a/frontend/src/components/monaco.tsx b/frontend/src/components/monaco.tsx new file mode 100644 index 000000000..fe191e3cb --- /dev/null +++ b/frontend/src/components/monaco.tsx @@ -0,0 +1,166 @@ +import { useEffect, useState } from "react"; +import monaco from "monaco-editor"; +import { DiffEditor, Editor } from "@monaco-editor/react"; +import { useTheme } from "@ui/theme"; +import { cn } from "@lib/utils"; + +const MIN_EDITOR_HEIGHT = 56; +// const MAX_EDITOR_HEIGHT = 500; + +export type MonacoLanguage = + | "yaml" + | "toml" + | "json" + | "key_value" + | "shell" + | "dockerfile" + | "rust" + | undefined; + +export const MonacoEditor = ({ + value, + onValueChange, + language, + readOnly, +}: { + value: string | undefined; + onValueChange?: (value: string) => void; + language: MonacoLanguage; + readOnly?: boolean; +}) => { + const [editor, setEditor] = + useState(null); + + const line_count = value?.split(/\r\n|\r|\n/).length ?? 0; + + useEffect(() => { + if (!editor) return; + const contentHeight = line_count * 18 + 30; + const node = editor.getContainerDomNode(); + node.style.height = `${Math.max( + Math.ceil(contentHeight), + MIN_EDITOR_HEIGHT + )}px`; + // node.style.height = `${Math.max( + // Math.min(Math.ceil(contentHeight), MAX_EDITOR_HEIGHT), + // MIN_EDITOR_HEIGHT + // )}px`; + }, [editor, line_count]); + + const { theme: _theme } = useTheme(); + const theme = + _theme === "system" + ? window.matchMedia("(prefers-color-scheme: dark)").matches + ? "dark" + : "light" + : _theme; + + const options: monaco.editor.IStandaloneEditorConstructionOptions = { + minimap: { enabled: false }, + scrollbar: { alwaysConsumeMouseWheel: false }, + scrollBeyondLastLine: false, + folding: false, + automaticLayout: true, + renderValidationDecorations: "on", + renderLineHighlightOnlyWhenFocus: true, + readOnly, + tabSize: 2, + detectIndentation: true, + padding: { + top: 15, + }, + }; + + return ( +
+ onValueChange?.(v ?? "")} + onMount={(editor) => setEditor(editor)} + /> +
+ ); +}; + +const MIN_DIFF_HEIGHT = 100; +const MAX_DIFF_HEIGHT = 400; + +export const MonacoDiffEditor = ({ + original, + modified, + onModifiedValueChange, + language, + readOnly, + containerClassName, + hideUnchangedRegions = true, +}: { + original: string | undefined; + modified: string | undefined; + onModifiedValueChange?: (value: string) => void; + language: MonacoLanguage; + readOnly?: boolean; + containerClassName?: string; + hideUnchangedRegions?: boolean; +}) => { + const [editor, setEditor] = + useState(null); + + const original_line_count = original?.split(/\r\n|\r|\n/).length ?? 0; + const modified_line_count = modified?.split(/\r\n|\r|\n/).length ?? 0; + const line_count = Math.max(original_line_count, modified_line_count); + + useEffect(() => { + if (!editor) return; + const contentHeight = line_count * 18 + 30; + const node = editor.getContainerDomNode(); + node.style.height = `${Math.max( + Math.min(Math.ceil(contentHeight), MAX_DIFF_HEIGHT), + MIN_DIFF_HEIGHT + )}px`; + }, [editor, line_count]); + + const { theme: _theme } = useTheme(); + const theme = + _theme === "system" + ? window.matchMedia("(prefers-color-scheme: dark)").matches + ? "dark" + : "light" + : _theme; + + const options: monaco.editor.IStandaloneDiffEditorConstructionOptions = { + minimap: { enabled: true }, + scrollbar: { alwaysConsumeMouseWheel: false }, + scrollBeyondLastLine: false, + hideUnchangedRegions: { enabled: hideUnchangedRegions }, + folding: false, + automaticLayout: true, + renderValidationDecorations: "on", + renderLineHighlightOnlyWhenFocus: true, + readOnly, + padding: { + top: 15, + }, + }; + + return ( +
+ { + const modifiedEditor = editor.getModifiedEditor(); + modifiedEditor.onDidChangeModelContent((_) => { + onModifiedValueChange?.(modifiedEditor.getValue()); + }); + setEditor(editor); + }} + /> +
+ ); +}; diff --git a/frontend/src/components/omnibar.tsx b/frontend/src/components/omnibar.tsx index 707aa0ba1..2d72c3103 100644 --- a/frontend/src/components/omnibar.tsx +++ b/frontend/src/components/omnibar.tsx @@ -37,7 +37,7 @@ export const OmniSearch = ({ variant="outline" onClick={() => setOpen(true)} className={cn( - "flex items-center gap-4 w-fit md:w-[200px] lg:w-[300px] xl:w-[400px] justify-between", + "flex items-center gap-4 w-fit md:w-[200px] lg:w-[300px] xl:w-[400px] justify-between hover:bg-card/50", className )} > diff --git a/frontend/src/components/resources/alerter/config/alert_types.tsx b/frontend/src/components/resources/alerter/config/alert_types.tsx index 7b2830cc2..ee867407b 100644 --- a/frontend/src/components/resources/alerter/config/alert_types.tsx +++ b/frontend/src/components/resources/alerter/config/alert_types.tsx @@ -33,9 +33,29 @@ export const AlertTypeConfig = ({ (alert_type) => !alert_types.includes(alert_type) ); return ( - -
-
+ +
+ {at.length ? ( + + ) : undefined} +
{alert_types.map((type) => ( ))}
- {at.length ? ( - - ) : undefined}
); diff --git a/frontend/src/components/resources/alerter/config/endpoint.tsx b/frontend/src/components/resources/alerter/config/endpoint.tsx index 70bfd58fd..f349cc31a 100644 --- a/frontend/src/components/resources/alerter/config/endpoint.tsx +++ b/frontend/src/components/resources/alerter/config/endpoint.tsx @@ -1,9 +1,19 @@ import { ConfigItem } from "@components/config/util"; -import { TextUpdateMenu } from "@components/util"; +import { MonacoEditor } from "@components/monaco"; import { Types } from "@komodo/client"; -import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@ui/select"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@ui/select"; -const ENDPOINT_TYPES: Types.AlerterEndpoint["type"][] = ["Custom", "Slack"]; +const ENDPOINT_TYPES: Types.AlerterEndpoint["type"][] = [ + "Custom", + "Discord", + "Slack", +]; export const EndpointConfig = ({ endpoint, @@ -15,36 +25,37 @@ export const EndpointConfig = ({ disabled: boolean; }) => { return ( - -
- - - set({ ...endpoint, params: { ...endpoint.params, url } }) - } - placeholder="Enter endpoint url" - triggerClassName="w-[250px]" - /> -
+ + + + set({ ...endpoint, params: { ...endpoint.params, url } }) + } + readOnly={disabled} + /> ); }; @@ -54,5 +65,7 @@ const default_url = (type: Types.AlerterEndpoint["type"]) => { ? "http://localhost:7000" : type === "Slack" ? "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX" + : type === "Discord" + ? "https://discord.com/api/webhooks/XXXXXXXXXXXX/XXXX-XXXXXXXXXX" : ""; }; diff --git a/frontend/src/components/resources/alerter/config/index.tsx b/frontend/src/components/resources/alerter/config/index.tsx index 427cb0ccc..a9bd8520e 100644 --- a/frontend/src/components/resources/alerter/config/index.tsx +++ b/frontend/src/components/resources/alerter/config/index.tsx @@ -21,6 +21,8 @@ export const AlerterConfig = ({ id }: { id: string }) => { return ( { await mutateAsync({ id, config: update }); }} components={{ - general: [ + "": [ { - label: "General", + label: "Enabled", + labelHidden: true, + components: { + enabled: { + boldLabel: true, + description: "Whether to send alerts to the endpoint.", + }, + }, + }, + { + label: "Endpoint", + labelHidden: true, components: { - enabled: true, endpoint: (endpoint, set) => ( { disabled={disabled} /> ), + }, + }, + { + label: "Filter", + labelHidden: true, + components: { alert_types: (alert_types, set) => (
- {resources.length ? ( -
- Alerts {blacklist ? "blacklisted" : "whitelisted"} by{" "} - {resources.length} resources -
- ) : undefined} @@ -206,6 +200,12 @@ export const ResourcesConfig = ({ + {resources.length ? ( +
+ Alerts {blacklist ? "blacklisted" : "whitelisted"} by{" "} + {resources.length} resources +
+ ) : undefined}
); diff --git a/frontend/src/components/resources/alerter/index.tsx b/frontend/src/components/resources/alerter/index.tsx index 54471a404..397165d06 100644 --- a/frontend/src/components/resources/alerter/index.tsx +++ b/frontend/src/components/resources/alerter/index.tsx @@ -1,4 +1,4 @@ -import { useRead } from "@lib/hooks"; +import { useRead, useUser } from "@lib/hooks"; import { RequiredResourceComponents } from "@types"; import { AlarmClock } from "lucide-react"; import { Link } from "react-router-dom"; @@ -7,13 +7,14 @@ import { AlerterConfig } from "./config"; import { DeleteResource, NewResource } from "../common"; import { AlerterTable } from "./table"; import { Types } from "@komodo/client"; +import { ResourcePageHeader } from "@components/util"; const useAlerter = (id?: string) => useRead("ListAlerters", {}).data?.find((d) => d.id === id); export const AlerterComponents: RequiredResourceComponents = { list_item: (id) => useAlerter(id), - use_links: () => undefined, + resource_links: () => undefined, Description: () => <>Route alerts to various endpoints., @@ -36,7 +37,10 @@ export const AlerterComponents: RequiredResourceComponents = { ); }, - New: () => , + New: () => { + const is_admin = useUser().data?.admin; + return is_admin && ; + }, Table: ({ resources }) => ( @@ -45,6 +49,7 @@ export const AlerterComponents: RequiredResourceComponents = { Icon: () => , BigIcon: () => , + State: () => null, Status: {}, Info: { @@ -63,4 +68,17 @@ export const AlerterComponents: RequiredResourceComponents = { Config: AlerterConfig, DangerZone: ({ id }) => , + + ResourcePageHeader: ({ id }) => { + const alerter = useAlerter(id); + return ( + } + name={alerter?.name} + state={alerter?.info.enabled ? "Enabled" : "Disabled"} + status={alerter?.info.endpoint_type} + /> + ); + }, }; diff --git a/frontend/src/components/resources/build/config.tsx b/frontend/src/components/resources/build/config.tsx index b46296558..9ce1dbba5 100644 --- a/frontend/src/components/resources/build/config.tsx +++ b/frontend/src/components/resources/build/config.tsx @@ -7,21 +7,20 @@ import { ImageRegistryConfig, InputList, ProviderSelectorConfig, - SecretsForEnvironment, SystemCommand, } from "@components/config/util"; import { useInvalidate, useRead, useWrite } from "@lib/hooks"; -import { env_to_text } from "@lib/utils"; import { Types } from "@komodo/client"; import { Button } from "@ui/button"; -import { Textarea } from "@ui/textarea"; import { Ban, CirclePlus, PlusCircle } from "lucide-react"; -import { ReactNode, createRef, useState } from "react"; -import { BuilderSelector, CopyGithubWebhook, LabelsConfig } from "../common"; +import { ReactNode, useState } from "react"; +import { CopyGithubWebhook, ResourceLink, ResourceSelector } from "../common"; import { useToast } from "@ui/use-toast"; import { text_color_class_by_intention } from "@lib/color"; import { ConfirmButton } from "@components/util"; import { Link } from "react-router-dom"; +import { SecretsSearch } from "@components/config/env_vars"; +import { MonacoEditor } from "@components/monaco"; export const BuildConfig = ({ id, @@ -46,6 +45,8 @@ export const BuildConfig = ({ return ( ( - - ), + builder_id: (builder_id, set) => { + return ( + + Builder: + +
+ ) : ( + "Select Builder" + ) + } + description="Select the Builder to build with." + > + set({ builder_id })} + disabled={disabled} + align="start" + /> + + ); + }, }, }, { label: "Version", - labelHidden: true, components: { version: (_version, set) => { const version = @@ -78,6 +100,7 @@ export const BuildConfig = ({ set({ version: version as any })} @@ -86,11 +109,14 @@ export const BuildConfig = ({ /> ); }, - auto_increment_version: true, + auto_increment_version: { + description: + "Automatically increment the patch number on every build.", + }, }, }, { - label: "Git", + label: "Source", components: { git_provider: (provider, set) => { const https = update.git_https ?? config.git_https; @@ -127,23 +153,16 @@ export const BuildConfig = ({ description: "Select a custom branch, or default to 'main'.", }, commit: { - placeholder: "Enter a specific commit hash. Optional.", + placeholder: "Enter commit hash", description: "Switch to a specific hash after cloning the branch.", }, }, }, { - label: "Image", + label: "Build", + labelHidden: true, components: { - image_registry: (registry, set) => ( - set({ image_registry })} - resource_id={update.builder_id ?? config.builder_id} - disabled={disabled} - /> - ), build_path: { placeholder: ".", description: @@ -157,113 +176,21 @@ export const BuildConfig = ({ }, }, { - label: "Custom Name / Tag", + label: "Registry", + labelHidden: true, components: { - image_name: { - description: "Optional. Push the image under a different name", - placeholder: "Custom image name", - }, - image_tag: { - description: - "Optional. Postfix the image version with a custom tag.", - placeholder: "Custom image tag", - }, - }, - }, - { - label: "Extra Args", - description: ( -
-
Pass extra arguments to 'docker build'.
- - - -
- ), - contentHidden: - (update.extra_args ?? config.extra_args)?.length === 0, - actions: !disabled && ( - - set((update) => ({ - ...update, - extra_args: [ - ...(update.extra_args ?? config.extra_args ?? []), - suggestion, - ], - })) - } - disabled={disabled} - /> - ), - components: { - extra_args: (value, set) => ( - - ), - }, - }, - { - label: "Labels", - description: "Attach --labels to image.", - contentHidden: (update.labels ?? config.labels)?.length === 0, - actions: !disabled && ( - - ), - components: { - labels: (l, set) => ( - ( + set({ image_registry })} + resource_id={update.builder_id ?? config.builder_id} disabled={disabled} /> ), }, }, { - label: "Pre Build", - description: - "Execute a shell command before running docker build. The given Cwd is relative to repo root.", - components: { - pre_build: (value, set) => ( - set({ pre_build: value })} - disabled={disabled} - /> - ), - }, - }, - { - label: "Git Webhook", + label: "Webhook", description: "Configure your repo provider to send webhooks to Komodo", components: { @@ -386,47 +313,137 @@ export const BuildConfig = ({ }, }, ], - "Build Args": [ + advanced: [ + { + label: "Tagging", + components: { + image_name: { + description: "Push the image under a different name", + placeholder: "Custom image name", + }, + image_tag: { + description: "Postfix the image version with a custom tag.", + placeholder: "Custom image tag", + }, + }, + }, + { + label: "Pre Build", + description: + "Execute a shell command before running docker build. The 'path' is relative to the root of the repo.", + components: { + pre_build: (value, set) => ( + set({ pre_build: value })} + disabled={disabled} + /> + ), + }, + }, { label: "Build Args", description: "Pass build args to 'docker build'. These can be used in the Dockerfile via ARG, and are visible in the final image.", + labelExtra: !disabled && , components: { - build_args: (vars, set) => { - const args = - typeof vars === "object" ? env_to_text(vars) : vars; - return ( - - ); - }, - skip_secret_interp: true, + build_args: (env, set) => ( + set({ build_args })} + language="key_value" + readOnly={disabled} + /> + ), }, }, - ], - "Secret Args": [ { label: "Secret Args", - description: - "Pass secret args to 'docker build'. These can be used in the Dockerfile via ARG, and are visible in the final image.", + description: ( +
+
+ Pass secrets to 'docker build'. These values remain hidden in + the final image by using docker secret mounts. +
+ + See docker docs. + +
+ ), + labelExtra: !disabled && , components: { - secret_args: (vars, set) => { - const args = - typeof vars === "object" ? env_to_text(vars) : vars; - return ( - ( + set({ secret_args })} + language="key_value" + readOnly={disabled} + /> + ), + }, + }, + { + label: "Extra Args", + labelHidden: true, + components: { + extra_args: (value, set) => ( + +
Pass extra arguments to 'docker build'.
+ + See docker docs. + +
+ } + > + {!disabled && ( + + set({ + extra_args: [ + ...(update.extra_args ?? config.extra_args ?? []), + suggestion, + ], + }) + } + disabled={disabled} + /> + )} + - ); - }, - skip_secret_interp: true, +
+ ), + }, + }, + { + label: "Labels", + description: "Attach --labels to image.", + components: { + labels: (labels, set) => ( + set({ labels })} + readOnly={disabled} + /> + ), }, }, ], @@ -434,35 +451,3 @@ export const BuildConfig = ({ /> ); }; - -const Args = ({ - type, - args, - set, - disabled, -}: { - type: "build" | "secret"; - args: string; - set: (input: Partial) => void; - disabled: boolean; -}) => { - const ref = createRef(); - const setArgs = (args: string) => set({ [`${type}_args`]: args }); - - return ( - - {!disabled && ( - - )} -