forked from github-starred/komodo
Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2463ed3879 | ||
|
|
a2758ce6f4 | ||
|
|
3f1788dbbb | ||
|
|
33a0560af6 | ||
|
|
610a10c488 | ||
|
|
39b217687d | ||
|
|
2f73461979 | ||
|
|
aae9bb9e51 | ||
|
|
7d011d93fa | ||
|
|
bffdea4357 | ||
|
|
790566bf79 | ||
|
|
b17db93f13 | ||
|
|
daa2ea9361 | ||
|
|
176fb04707 | ||
|
|
5ba1254cdb | ||
|
|
43593162b0 | ||
|
|
418f359492 | ||
|
|
3cded60166 | ||
|
|
6f70f9acb0 | ||
|
|
6e1064e58e | ||
|
|
d96e5b4c46 | ||
|
|
5a8822c7d2 | ||
|
|
1f2d236228 | ||
|
|
a89bd4a36d | ||
|
|
0b40dff72b | ||
|
|
59874f0a92 | ||
|
|
14e459b32e | ||
|
|
f6c55b7be1 | ||
|
|
460819a145 | ||
|
|
91f4df8ac2 | ||
|
|
6a19e18539 | ||
|
|
30c5fa3569 | ||
|
|
4b6aa1d73d | ||
|
|
5dfd007580 | ||
|
|
955670d979 | ||
|
|
f70e359f14 | ||
|
|
a2b0981f76 | ||
|
|
49a8e581bf | ||
|
|
2d0c1724db | ||
|
|
20ae1c22d7 | ||
|
|
e8d75b2a3d | ||
|
|
e23d68f86a | ||
|
|
2111976450 | ||
|
|
8a0109522b | ||
|
|
8d75fa3f2f | ||
|
|
197e938346 | ||
|
|
6ba0184551 | ||
|
|
c456b67018 | ||
|
|
02e152af4d |
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[build]
|
||||
rustflags = ["-Wunused-crate-dependencies"]
|
||||
285
Cargo.lock
generated
285
Cargo.lock
generated
@@ -41,10 +41,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "alerter"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum 0.7.5",
|
||||
"axum",
|
||||
"dotenvy",
|
||||
"envy",
|
||||
"logger",
|
||||
@@ -86,9 +86,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b"
|
||||
checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
@@ -195,9 +195,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
|
||||
|
||||
[[package]]
|
||||
name = "aws-config"
|
||||
version = "1.5.4"
|
||||
version = "1.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "caf6cfe2881cb1fcbba9ae946fb9a6480d3b7a714ca84c74925014a89ef3387a"
|
||||
checksum = "4e95816a168520d72c0e7680c405a5a8c1fb6a035b4bc4b9d7b0de8e1a941697"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -215,7 +215,6 @@ dependencies = [
|
||||
"fastrand",
|
||||
"hex",
|
||||
"http 0.2.12",
|
||||
"hyper 0.14.28",
|
||||
"ring 0.17.8",
|
||||
"time",
|
||||
"tokio",
|
||||
@@ -238,9 +237,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-runtime"
|
||||
version = "1.3.1"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87c5f920ffd1e0526ec9e70e50bf444db50b204395a0fa7016bbf9e31ea1698f"
|
||||
checksum = "f42c2d4218de4dcd890a109461e2f799a1a2ba3bcd2cde9af88360f5df9266c6"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-sigv4",
|
||||
@@ -253,6 +252,7 @@ dependencies = [
|
||||
"fastrand",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"tracing",
|
||||
@@ -261,9 +261,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ec2"
|
||||
version = "1.62.0"
|
||||
version = "1.66.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1de56a6030ebc05c84d23b6ed37971244d63ac8eb23e13781dd6d5c550c8d2f"
|
||||
checksum = "5bf8f784ab315034c3abbd4bd4aeb23c43093d5beb534f34fbb54df9064e4e96"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -285,9 +285,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ecr"
|
||||
version = "1.37.0"
|
||||
version = "1.40.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b744392dee153a7e3181cb61b894c6450bcb6a392fc85949dfc520c93d3d0fc"
|
||||
checksum = "8e6c64dafa2d1f523b2f24ab410d1c89f41d912c901e7599f9350ea1188fd42c"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -307,9 +307,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sso"
|
||||
version = "1.36.0"
|
||||
version = "1.38.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6acca681c53374bf1d9af0e317a41d12a44902ca0f2d1e10e5cb5bb98ed74f35"
|
||||
checksum = "fca5e0b9fb285638f1007e9d961d963b9e504ab968fe5a3807cce94070bd0ce3"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -329,9 +329,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ssooidc"
|
||||
version = "1.37.0"
|
||||
version = "1.39.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b79c6bdfe612503a526059c05c9ccccbf6bd9530b003673cb863e547fd7c0c9a"
|
||||
checksum = "bc3e48ec239bb734db029ceef83599f4c9b3ce5d25c961b5bcd3f031c15bed54"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -351,9 +351,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sts"
|
||||
version = "1.36.0"
|
||||
version = "1.38.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32e6ecdb2bd756f3b2383e6f0588dc10a4e65f5d551e70a56e0bfe0c884673ce"
|
||||
checksum = "ede095dfcc5c92b224813c24a82b65005a475c98d737e2726a898cf583e2e8bd"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -447,9 +447,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime"
|
||||
version = "1.6.2"
|
||||
version = "1.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce87155eba55e11768b8c1afa607f3e864ae82f03caf63258b37455b0ad02537"
|
||||
checksum = "0abbf454960d0db2ad12684a1640120e7557294b0ff8e2f11236290a1b293225"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-http",
|
||||
@@ -474,9 +474,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-runtime-api"
|
||||
version = "1.7.1"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30819352ed0a04ecf6a2f3477e344d2d1ba33d43e0f09ad9047c12e0d923616f"
|
||||
checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-types",
|
||||
@@ -491,9 +491,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-types"
|
||||
version = "1.2.0"
|
||||
version = "1.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cfe321a6b21f5d8eabd0ade9c55d3d0335f3c3157fc2b3e87f05f34b539e4df5"
|
||||
checksum = "6cee7cadb433c781d3299b916fbf620fea813bf38f49db282fb6858141a05cc8"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"bytes",
|
||||
@@ -538,34 +538,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.6.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum-core 0.3.4",
|
||||
"bitflags 1.3.2",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.28",
|
||||
"itoa",
|
||||
"matchit",
|
||||
"memchr",
|
||||
"mime",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"rustversion",
|
||||
"serde",
|
||||
"sync_wrapper 0.1.2",
|
||||
"tower",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.7.5"
|
||||
@@ -573,7 +545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum-core 0.4.3",
|
||||
"axum-core",
|
||||
"axum-macros",
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
@@ -604,23 +576,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"mime",
|
||||
"rustversion",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.4.3"
|
||||
@@ -648,8 +603,8 @@ version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733"
|
||||
dependencies = [
|
||||
"axum 0.7.5",
|
||||
"axum-core 0.4.3",
|
||||
"axum",
|
||||
"axum-core",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"headers",
|
||||
@@ -911,9 +866,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.13"
|
||||
version = "4.5.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fbb260a053428790f3de475e304ff84cdbc4face759ea7a3e64c1edd938a7fc"
|
||||
checksum = "ed6719fffa43d0d87e5fd8caeab59be1554fb028cd30edc88fc4369b17971019"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -921,9 +876,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.13"
|
||||
version = "4.5.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "64b17d7ea74e9f833c7dbf2cbe4fb12ff26783eda4782a8975b72f895c9b4d99"
|
||||
checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -967,7 +922,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "command"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"monitor_client",
|
||||
"run_command",
|
||||
@@ -1351,7 +1306,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "formatting"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"serror",
|
||||
]
|
||||
@@ -1482,7 +1437,7 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
|
||||
|
||||
[[package]]
|
||||
name = "git"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"command",
|
||||
@@ -1796,14 +1751,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "hyper-timeout"
|
||||
version = "0.4.1"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1"
|
||||
checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793"
|
||||
dependencies = [
|
||||
"hyper 0.14.28",
|
||||
"hyper 1.4.1",
|
||||
"hyper-util",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-io-timeout",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2085,12 +2041,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "logger"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"monitor_client",
|
||||
"opentelemetry",
|
||||
"opentelemetry-otlp",
|
||||
"opentelemetry-semantic-conventions",
|
||||
"opentelemetry_sdk",
|
||||
"tracing",
|
||||
"tracing-opentelemetry",
|
||||
@@ -2154,7 +2111,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "migrator"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@@ -2207,21 +2164,21 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mongo_indexed"
|
||||
version = "1.0.0"
|
||||
version = "2.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "202ab1775c25b55f035ae5a5e10c6505ddcb8319159bca055ebe3790542478e4"
|
||||
checksum = "556e2883109599e3cc28c7ad0d700c0b5c297e4d17ae810c669d18f79a02df26"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"mongo_indexed_derive",
|
||||
"mongodb",
|
||||
"serde",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mongo_indexed_derive"
|
||||
version = "1.0.0"
|
||||
version = "2.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "177b97b43773866e7ed1aa023acdbe438d973ce181d64d9769c303c002b3eaf5"
|
||||
checksum = "d4f3a4215a0cb95aea5fe33a77d38f0e0f7e7b9bf315bede04e0ea0c46fb704a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -2289,7 +2246,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "monitor_cli"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -2297,19 +2254,15 @@ dependencies = [
|
||||
"futures",
|
||||
"merge_config_files",
|
||||
"monitor_client",
|
||||
"partial_derive2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"strum 0.26.3",
|
||||
"tokio",
|
||||
"toml",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "monitor_client"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2341,14 +2294,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "monitor_core"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
"aws-config",
|
||||
"aws-sdk-ec2",
|
||||
"aws-sdk-ecr",
|
||||
"axum 0.7.5",
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"base64 0.22.1",
|
||||
"bcrypt",
|
||||
@@ -2369,7 +2322,6 @@ dependencies = [
|
||||
"nom_pem",
|
||||
"octorust",
|
||||
"ordered_hash_map",
|
||||
"parse_csl",
|
||||
"partial_derive2",
|
||||
"periphery_client",
|
||||
"rand",
|
||||
@@ -2383,13 +2335,11 @@ dependencies = [
|
||||
"serror",
|
||||
"sha2",
|
||||
"slack_client_rs",
|
||||
"strum 0.26.3",
|
||||
"svi",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"toml_pretty",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
"typeshare",
|
||||
@@ -2399,11 +2349,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "monitor_periphery"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
"axum 0.7.5",
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"bollard",
|
||||
"clap",
|
||||
@@ -2416,13 +2366,11 @@ dependencies = [
|
||||
"logger",
|
||||
"merge_config_files",
|
||||
"monitor_client",
|
||||
"parse_csl",
|
||||
"periphery_client",
|
||||
"resolver_api",
|
||||
"run_command",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
"serror",
|
||||
"svi",
|
||||
"sysinfo",
|
||||
@@ -2433,9 +2381,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mungos"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c89188a577f4e4464d83203336cc57ee5c835bf0e52ce3748f0abc090466ed8"
|
||||
checksum = "59e4fbd27eaabaeb49c1d6260dee7dad26b26371ca3faec73c83406a506c1dc0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"envy",
|
||||
@@ -2629,9 +2577,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
version = "0.23.0"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b69a91d4893e713e06f724597ad630f1fa76057a5e1026c0ca67054a9032a76"
|
||||
checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
@@ -2643,13 +2591,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-otlp"
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a94c69209c05319cdf7460c6d4c055ed102be242a0a6245835d7bc42c6ec7f54"
|
||||
checksum = "6b925a602ffb916fb7421276b86756027b37ee708f9dce2dbdcc51739f07e727"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"futures-core",
|
||||
"http 0.2.12",
|
||||
"http 1.1.0",
|
||||
"opentelemetry",
|
||||
"opentelemetry-proto",
|
||||
"opentelemetry_sdk",
|
||||
@@ -2661,9 +2609,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry-proto"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "984806e6cf27f2b49282e2a05e288f30594f3dbc74eb7a6e99422bc48ed78162"
|
||||
checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9"
|
||||
dependencies = [
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
@@ -2672,36 +2620,32 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.23.0"
|
||||
name = "opentelemetry-semantic-conventions"
|
||||
version = "0.16.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ae312d58eaa90a82d2e627fd86e075cf5230b3f11794e2ed74199ebbe572d4fd"
|
||||
checksum = "1cefe0543875379e47eb5f1e68ff83f45cc41366a92dfd0d073d513bf68e9a05"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry_sdk"
|
||||
version = "0.24.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "692eac490ec80f24a17828d49b40b60f5aeaccdfe6a503f939713afd22bc28df"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"futures-channel",
|
||||
"futures-executor",
|
||||
"futures-util",
|
||||
"glob",
|
||||
"lazy_static",
|
||||
"once_cell",
|
||||
"opentelemetry",
|
||||
"ordered-float",
|
||||
"percent-encoding",
|
||||
"rand",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ordered-float"
|
||||
version = "4.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ordered_hash_map"
|
||||
version = "0.4.0"
|
||||
@@ -2772,12 +2716,6 @@ dependencies = [
|
||||
"windows-targets 0.52.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parse_csl"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffa94c2e5674923c67d7f3dfce1279507b191e10eb064881b46ed3e1256e5ca6"
|
||||
|
||||
[[package]]
|
||||
name = "parse_link_header"
|
||||
version = "0.3.3"
|
||||
@@ -2835,7 +2773,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "periphery_client"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"monitor_client",
|
||||
@@ -2908,9 +2846,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost"
|
||||
version = "0.12.6"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29"
|
||||
checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"prost-derive",
|
||||
@@ -2918,9 +2856,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prost-derive"
|
||||
version = "0.12.6"
|
||||
version = "0.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1"
|
||||
checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"itertools",
|
||||
@@ -3532,9 +3470,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.204"
|
||||
version = "1.0.208"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12"
|
||||
checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
@@ -3550,9 +3488,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.204"
|
||||
version = "1.0.208"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222"
|
||||
checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3572,9 +3510,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.122"
|
||||
version = "1.0.125"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da"
|
||||
checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed"
|
||||
dependencies = [
|
||||
"indexmap 2.2.6",
|
||||
"itoa",
|
||||
@@ -3684,7 +3622,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8f432d878d404110352cfbaa031d8a6878a166cb7f50e00ab87d0508f8f68a0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum 0.7.5",
|
||||
"axum",
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
@@ -3985,24 +3923,6 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tests"
|
||||
version = "1.13.1"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"dotenvy",
|
||||
"envy",
|
||||
"logger",
|
||||
"monitor_client",
|
||||
"mungos",
|
||||
"partial_derive2",
|
||||
"rand",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.63"
|
||||
@@ -4097,16 +4017,6 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-io-timeout"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf"
|
||||
dependencies = [
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-macros"
|
||||
version = "2.4.0"
|
||||
@@ -4246,23 +4156,26 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.11.0"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13"
|
||||
checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"axum 0.6.20",
|
||||
"base64 0.21.7",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"h2 0.3.26",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"hyper 0.14.28",
|
||||
"h2 0.4.5",
|
||||
"http 1.1.0",
|
||||
"http-body 1.0.0",
|
||||
"http-body-util",
|
||||
"hyper 1.4.1",
|
||||
"hyper-timeout",
|
||||
"hyper-util",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
"prost",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tower",
|
||||
@@ -4374,9 +4287,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tracing-opentelemetry"
|
||||
version = "0.24.0"
|
||||
version = "0.25.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f68803492bf28ab40aeccaecc7021096bd256baf7ca77c3d425d89b35a7be4e4"
|
||||
checksum = "a9784ed4da7d921bc8df6963f8c80a0e4ce34ba6ba76668acadd3edbd985ff3b"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
@@ -4594,7 +4507,7 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
|
||||
|
||||
[[package]]
|
||||
name = "update_logger"
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"logger",
|
||||
|
||||
31
Cargo.toml
31
Cargo.toml
@@ -3,7 +3,7 @@ resolver = "2"
|
||||
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.13.1"
|
||||
version = "1.13.4"
|
||||
edition = "2021"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -15,7 +15,7 @@ monitor_client = { path = "client/core/rs" }
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
monitor_client = "1.13.1"
|
||||
monitor_client = "1.13.3"
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
command = { path = "lib/command" }
|
||||
@@ -32,11 +32,10 @@ merge_config_files = "0.1.5"
|
||||
async_timing_util = "1.0.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "1.0.0"
|
||||
mongo_indexed = "2.0.1"
|
||||
resolver_api = "1.1.1"
|
||||
toml_pretty = "1.1.2"
|
||||
parse_csl = "0.1.0"
|
||||
mungos = "1.0.0"
|
||||
mungos = "1.0.1"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
@@ -49,15 +48,14 @@ futures-util = "0.3.30"
|
||||
# SERVER
|
||||
axum = { version = "0.7.5", features = ["ws", "json"] }
|
||||
axum-extra = { version = "0.9.3", features = ["typed-header"] }
|
||||
tower = { version = "0.4.13", features = ["timeout"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = "0.23.1"
|
||||
|
||||
# SER/DE
|
||||
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
|
||||
serde = { version = "1.0.204", features = ["derive"] }
|
||||
serde = { version = "1.0.208", features = ["derive"] }
|
||||
strum = { version = "0.26.3", features = ["derive"] }
|
||||
serde_json = "1.0.122"
|
||||
serde_json = "1.0.125"
|
||||
serde_yaml = "0.9.34"
|
||||
toml = "0.8.19"
|
||||
|
||||
@@ -66,15 +64,16 @@ anyhow = "1.0.86"
|
||||
thiserror = "1.0.63"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
|
||||
opentelemetry_sdk = { version = "0.24.1", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["json"] }
|
||||
tracing-opentelemetry = "0.24.0"
|
||||
opentelemetry-otlp = "0.16.0"
|
||||
opentelemetry = "0.23.0"
|
||||
opentelemetry-semantic-conventions = "0.16.0"
|
||||
tracing-opentelemetry = "0.25.0"
|
||||
opentelemetry-otlp = "0.17.0"
|
||||
opentelemetry = "0.24.0"
|
||||
tracing = "0.1.40"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.13", features = ["derive"] }
|
||||
clap = { version = "4.5.16", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
@@ -95,9 +94,9 @@ bollard = "0.17.0"
|
||||
sysinfo = "0.31.2"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.5.4"
|
||||
aws-sdk-ec2 = "1.62.0"
|
||||
aws-sdk-ecr = "1.37.0"
|
||||
aws-config = "1.5.5"
|
||||
aws-sdk-ec2 = "1.66.0"
|
||||
aws-sdk-ecr = "1.40.0"
|
||||
|
||||
# MISC
|
||||
derive_builder = "0.20.0"
|
||||
|
||||
@@ -17,18 +17,13 @@ path = "src/main.rs"
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client.workspace = true
|
||||
# mogh
|
||||
partial_derive2.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
serde_json.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
anyhow.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
toml.workspace = true
|
||||
clap.workspace = true
|
||||
|
||||
@@ -30,7 +30,6 @@ mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
toml_pretty.workspace = true
|
||||
run_command.workspace = true
|
||||
parse_csl.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
@@ -56,9 +55,7 @@ dotenvy.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
tokio.workspace = true
|
||||
tower.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
regex.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -33,7 +33,7 @@ EXPOSE 9000
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
|
||||
LABEL org.opencontainers.image.description="A tool to build and deploy software across many servers"
|
||||
LABEL org.opencontainers.image.description="Monitor Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD ["./core"]
|
||||
@@ -79,7 +79,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.building = true)?;
|
||||
|
||||
build.config.version.increment();
|
||||
if build.config.auto_increment_version {
|
||||
build.config.version.increment();
|
||||
}
|
||||
update.version = build.config.version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
|
||||
@@ -77,7 +77,11 @@ impl Resolve<UpdateBuild, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<RefreshBuildCache, User> for State {
|
||||
#[instrument(name = "RefreshBuildCache", skip(self, user))]
|
||||
#[instrument(
|
||||
name = "RefreshBuildCache",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshBuildCache { build }: RefreshBuildCache,
|
||||
@@ -216,6 +220,12 @@ impl Resolve<CreateBuildWebhook, User> for State {
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if build.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&build.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = webhook_base_url.as_ref().unwrap_or(host);
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::time::Instant;
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use monitor_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -31,7 +32,10 @@ mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
@@ -178,7 +182,11 @@ async fn handler(
|
||||
Ok((TypedHeader(ContentType::json()), res??))
|
||||
}
|
||||
|
||||
#[instrument(name = "WriteRequest", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
name = "WriteRequest",
|
||||
skip(user, request),
|
||||
fields(user_id = user.id, request = format!("{:?}", request.extract_variant()))
|
||||
)]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
|
||||
@@ -75,7 +75,11 @@ impl Resolve<UpdateRepo, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<RefreshRepoCache, User> for State {
|
||||
#[instrument(name = "RefreshRepoCache", skip(self, user))]
|
||||
#[instrument(
|
||||
name = "RefreshRepoCache",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshRepoCache { repo }: RefreshRepoCache,
|
||||
@@ -90,6 +94,11 @@ impl Resolve<RefreshRepoCache, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
// Nothing to do
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let config = core_config();
|
||||
|
||||
let repo_dir = config.repo_directory.join(random_string(10));
|
||||
@@ -217,6 +226,12 @@ impl Resolve<CreateRepoWebhook, User> for State {
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if repo.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&repo.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = webhook_base_url.as_ref().unwrap_or(host);
|
||||
let url = match action {
|
||||
RepoWebhookAction::Clone => {
|
||||
|
||||
@@ -6,7 +6,8 @@ use monitor_client::{
|
||||
config::core::CoreConfig,
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
stack::{PartialStackConfig, Stack, StackInfo},
|
||||
server::ServerState,
|
||||
stack::{ComposeContents, PartialStackConfig, Stack, StackInfo},
|
||||
update::Update,
|
||||
user::User,
|
||||
NoData, Operation,
|
||||
@@ -19,18 +20,22 @@ use mungos::{
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::get_server_with_status,
|
||||
stack::{
|
||||
remote::get_remote_compose_contents,
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
monitor::update_cache_for_stack,
|
||||
resource,
|
||||
state::{db_client, github_client, State},
|
||||
};
|
||||
@@ -42,23 +47,7 @@ impl Resolve<CreateStack, User> for State {
|
||||
CreateStack { name, config }: CreateStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
let res = resource::create::<Stack>(&name, config, &user).await;
|
||||
if let Ok(stack) = &res {
|
||||
if let Err(e) = self
|
||||
.resolve(RefreshStackCache { stack: name }, user.clone())
|
||||
.await
|
||||
{
|
||||
let mut update =
|
||||
make_update(stack, Operation::RefreshStackCache, &user);
|
||||
update.push_error_log(
|
||||
"refresh stack cache",
|
||||
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
|
||||
);
|
||||
add_update(update).await.ok();
|
||||
};
|
||||
update_cache_for_stack(stack).await;
|
||||
}
|
||||
res
|
||||
resource::create::<Stack>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,24 +65,7 @@ impl Resolve<CopyStack, User> for State {
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
let res =
|
||||
resource::create::<Stack>(&name, config.into(), &user).await;
|
||||
if let Ok(stack) = &res {
|
||||
if let Err(e) = self
|
||||
.resolve(RefreshStackCache { stack: name }, user.clone())
|
||||
.await
|
||||
{
|
||||
let mut update =
|
||||
make_update(stack, Operation::RefreshStackCache, &user);
|
||||
update.push_error_log(
|
||||
"refresh stack cache",
|
||||
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
|
||||
);
|
||||
add_update(update).await.ok();
|
||||
};
|
||||
update_cache_for_stack(stack).await;
|
||||
}
|
||||
res
|
||||
resource::create::<Stack>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,23 +87,7 @@ impl Resolve<UpdateStack, User> for State {
|
||||
UpdateStack { id, config }: UpdateStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
let res = resource::update::<Stack>(&id, config, &user).await;
|
||||
if let Ok(stack) = &res {
|
||||
if let Err(e) = self
|
||||
.resolve(RefreshStackCache { stack: id }, user.clone())
|
||||
.await
|
||||
{
|
||||
let mut update =
|
||||
make_update(stack, Operation::RefreshStackCache, &user);
|
||||
update.push_error_log(
|
||||
"refresh stack cache",
|
||||
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
|
||||
);
|
||||
add_update(update).await.ok();
|
||||
};
|
||||
update_cache_for_stack(stack).await;
|
||||
}
|
||||
res
|
||||
resource::update::<Stack>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,7 +153,10 @@ impl Resolve<RefreshStackCache, User> for State {
|
||||
|
||||
let file_contents_empty = stack.config.file_contents.is_empty();
|
||||
|
||||
if file_contents_empty && stack.config.repo.is_empty() {
|
||||
if !stack.config.files_on_host
|
||||
&& file_contents_empty
|
||||
&& stack.config.repo.is_empty()
|
||||
{
|
||||
// Nothing to do without one of these
|
||||
return Ok(NoData {});
|
||||
}
|
||||
@@ -210,8 +169,63 @@ impl Resolve<RefreshStackCache, User> for State {
|
||||
remote_errors,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
) = if file_contents_empty {
|
||||
) = if stack.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
if stack.config.server_id.is_empty() {
|
||||
(vec![], None, None, None, None)
|
||||
} else {
|
||||
let (server, status) =
|
||||
get_server_with_status(&stack.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
(vec![], None, None, None, None)
|
||||
} else {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.file_paths().to_vec(),
|
||||
name: stack.name.clone(),
|
||||
run_directory: stack.config.run_directory.clone(),
|
||||
})
|
||||
.await
|
||||
.context(
|
||||
"failed to get compose file contents from host",
|
||||
) {
|
||||
Ok(res) => res,
|
||||
Err(e) => GetComposeContentsOnHostResponse {
|
||||
contents: Default::default(),
|
||||
errors: vec![ComposeContents {
|
||||
path: stack.config.run_directory.clone(),
|
||||
contents: format_serror(&e.into()),
|
||||
}],
|
||||
},
|
||||
};
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &contents {
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
&mut services,
|
||||
) {
|
||||
warn!(
|
||||
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
|
||||
stack.name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(services, Some(contents), Some(errors), None, None)
|
||||
}
|
||||
}
|
||||
} else if file_contents_empty {
|
||||
// ================
|
||||
// REPO BASED STACK
|
||||
// ================
|
||||
let (
|
||||
remote_contents,
|
||||
remote_errors,
|
||||
@@ -247,6 +261,9 @@ impl Resolve<RefreshStackCache, User> for State {
|
||||
latest_message,
|
||||
)
|
||||
} else {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
// =============
|
||||
let mut services = Vec::new();
|
||||
if let Err(e) = extract_services_into_res(
|
||||
// this should latest (not deployed), so make the project name fresh.
|
||||
@@ -348,6 +365,12 @@ impl Resolve<CreateStackWebhook, User> for State {
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if stack.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&stack.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = webhook_base_url.as_ref().unwrap_or(host);
|
||||
let url = match action {
|
||||
StackWebhookAction::Refresh => {
|
||||
|
||||
@@ -105,7 +105,11 @@ impl Resolve<UpdateResourceSync, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
#[instrument(name = "RefreshResourceSyncPending", level = "debug", skip(self, user))]
|
||||
#[instrument(
|
||||
name = "RefreshResourceSyncPending",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
|
||||
@@ -420,6 +424,12 @@ impl Resolve<CreateSyncWebhook, User> for State {
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if sync.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&sync.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = webhook_base_url.as_ref().unwrap_or(host);
|
||||
let url = match action {
|
||||
SyncWebhookAction::Refresh => {
|
||||
|
||||
@@ -32,8 +32,7 @@ pub struct CreateServerBody {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub placement_group: Option<i64>,
|
||||
/// Public Network options
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub public_net: Option<PublicNet>,
|
||||
pub public_net: PublicNet,
|
||||
/// ID or name of the Server type this Server should be created with
|
||||
pub server_type: HetznerServerType,
|
||||
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
|
||||
|
||||
@@ -129,14 +129,12 @@ pub async fn launch_hetzner_server(
|
||||
labels,
|
||||
networks: private_network_ids,
|
||||
placement_group: (placement_group > 0).then_some(placement_group),
|
||||
public_net: (enable_public_ipv4 || enable_public_ipv6).then_some(
|
||||
create_server::PublicNet {
|
||||
enable_ipv4: enable_public_ipv4,
|
||||
enable_ipv6: enable_public_ipv6,
|
||||
ipv4: None,
|
||||
ipv6: None,
|
||||
},
|
||||
),
|
||||
public_net: create_server::PublicNet {
|
||||
enable_ipv4: enable_public_ipv4,
|
||||
enable_ipv6: enable_public_ipv6,
|
||||
ipv4: None,
|
||||
ipv6: None,
|
||||
},
|
||||
server_type: hetzner_server_type(server_type),
|
||||
ssh_keys,
|
||||
start_after_create: true,
|
||||
|
||||
@@ -77,6 +77,7 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
host: env.monitor_host.unwrap_or(config.host),
|
||||
port: env.monitor_port.unwrap_or(config.port),
|
||||
passkey: env.monitor_passkey.unwrap_or(config.passkey),
|
||||
ensure_server: env.monitor_ensure_server.unwrap_or(config.ensure_server),
|
||||
jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret),
|
||||
jwt_ttl: env
|
||||
.monitor_jwt_ttl
|
||||
|
||||
@@ -1,20 +1,34 @@
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
use std::{collections::HashSet, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::entities::{
|
||||
permission::{Permission, PermissionLevel, UserTarget},
|
||||
server::Server,
|
||||
update::{Log, ResourceTarget, Update},
|
||||
user::User,
|
||||
EnvironmentVar,
|
||||
use monitor_client::{
|
||||
api::write::CreateServer,
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
permission::{Permission, PermissionLevel, UserTarget},
|
||||
server::{PartialServerConfig, Server},
|
||||
sync::ResourceSync,
|
||||
update::{Log, ResourceTarget, Update},
|
||||
user::{system_user, User},
|
||||
EnvironmentVar,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId, to_document, Bson},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, to_document, Bson};
|
||||
use periphery_client::PeripheryClient;
|
||||
use query::get_global_variables;
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
use crate::{
|
||||
config::core_config,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
pub mod action_state;
|
||||
pub mod alert;
|
||||
@@ -280,8 +294,15 @@ pub async fn interpolate_variables_secrets_into_environment(
|
||||
Ok(secret_replacers)
|
||||
}
|
||||
|
||||
pub async fn startup_cleanup() {
|
||||
tokio::join!(
|
||||
startup_in_progress_update_cleanup(),
|
||||
startup_open_alert_cleanup(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Run on startup, as no updates should be in progress on startup
|
||||
pub async fn startup_in_progress_update_cleanup() {
|
||||
async fn startup_in_progress_update_cleanup() {
|
||||
let log = Log::error(
|
||||
"monitor shutdown",
|
||||
String::from("Monitor shutdown during execution. If this is a build, the builder may not have been terminated.")
|
||||
@@ -308,3 +329,93 @@ pub async fn startup_in_progress_update_cleanup() {
|
||||
error!("failed to cleanup in progress updates on startup | {e:#}")
|
||||
}
|
||||
}
|
||||
|
||||
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
|
||||
async fn startup_open_alert_cleanup() {
|
||||
let db = db_client().await;
|
||||
let Ok(alerts) =
|
||||
find_collect(&db.alerts, doc! { "resolved": false }, None)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"failed to list all alerts for startup open alert cleanup | {e:?}"
|
||||
)
|
||||
})
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let futures = alerts.into_iter().map(|alert| async move {
|
||||
match alert.target {
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get::<Server>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get::<ResourceSync>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
// No other resources should have open alerts.
|
||||
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
|
||||
}
|
||||
});
|
||||
let to_update_ids = join_all(futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
if let Err(e) = db
|
||||
.alerts
|
||||
.update_many(
|
||||
doc! { "_id": { "$in": to_update_ids } },
|
||||
doc! { "$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": monitor_timestamp()
|
||||
} },
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"failed to clean up invalid open alerts on startup | {e:#}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures a default server exists with the defined address
|
||||
pub async fn ensure_server() {
|
||||
let ensure_server = &core_config().ensure_server;
|
||||
if ensure_server.is_empty() {
|
||||
return;
|
||||
}
|
||||
let db = db_client().await;
|
||||
let Ok(server) = db
|
||||
.servers
|
||||
.find_one(doc! { "config.address": ensure_server })
|
||||
.await
|
||||
.inspect_err(|e| error!("Failed to initialize 'ensure_server'. Failed to query db. {e:?}"))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
if server.is_some() {
|
||||
return;
|
||||
}
|
||||
if let Err(e) = State
|
||||
.resolve(
|
||||
CreateServer {
|
||||
name: String::from("default"),
|
||||
config: PartialServerConfig {
|
||||
address: Some(ensure_server.to_string()),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
system_user().to_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("Failed to initialize 'ensure_server'. Failed to CreateServer. {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use monitor_client::entities::{
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
deployment::{ContainerSummary, Deployment, DeploymentState},
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
server_template::ServerTemplate,
|
||||
stack::{ComposeProject, Stack, StackState},
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::{ResourceTarget, ResourceTargetVariant, Update},
|
||||
@@ -33,6 +33,11 @@ use crate::{
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::stack::{
|
||||
compose_container_match_regex,
|
||||
services::extract_services_from_stack,
|
||||
};
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
// user: Id or username
|
||||
pub async fn get_user(user: &str) -> anyhow::Result<User> {
|
||||
@@ -93,51 +98,76 @@ pub async fn get_deployment_state(
|
||||
}
|
||||
|
||||
/// Can pass all the containers from the same server
|
||||
pub fn get_stack_state_from_projects(
|
||||
stack: &Stack,
|
||||
projects: &[ComposeProject],
|
||||
pub fn get_stack_state_from_containers(
|
||||
ignore_services: &[String],
|
||||
services: &[StackServiceNames],
|
||||
containers: &[ContainerSummary],
|
||||
) -> StackState {
|
||||
let project_name = stack.project_name(false);
|
||||
let Some(status) = projects
|
||||
// first filter the containers to only ones which match the service
|
||||
let services = services
|
||||
.iter()
|
||||
.find(|project| project.name == project_name)
|
||||
.and_then(|project| project.status.as_deref())
|
||||
else {
|
||||
return StackState::Down;
|
||||
};
|
||||
let Ok(states) = status
|
||||
.split(", ")
|
||||
.filter_map(|state| state.split('(').next())
|
||||
.map(|state| {
|
||||
state.parse::<DeploymentState>().with_context(|| {
|
||||
format!("failed to parse stack state entry: {state}")
|
||||
})
|
||||
.filter(|service| {
|
||||
!ignore_services.contains(&service.service_name)
|
||||
})
|
||||
.collect::<anyhow::Result<Vec<_>>>()
|
||||
.inspect_err(|e| warn!("{e:#}"))
|
||||
else {
|
||||
return StackState::Unknown;
|
||||
};
|
||||
if states.is_empty() {
|
||||
.collect::<Vec<_>>();
|
||||
let containers = containers.iter().filter(|container| {
|
||||
services.iter().any(|StackServiceNames { service_name, container_name }| {
|
||||
match compose_container_match_regex(container_name)
|
||||
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
|
||||
{
|
||||
Ok(regex) => regex,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return false
|
||||
}
|
||||
}.is_match(&container.name)
|
||||
})
|
||||
}).collect::<Vec<_>>();
|
||||
if containers.is_empty() {
|
||||
return StackState::Down;
|
||||
}
|
||||
if states.len() > 1 {
|
||||
if services.len() != containers.len() {
|
||||
return StackState::Unhealthy;
|
||||
}
|
||||
match states[0] {
|
||||
DeploymentState::Unknown => StackState::Unknown,
|
||||
DeploymentState::NotDeployed => StackState::Down,
|
||||
DeploymentState::Created => StackState::Created,
|
||||
DeploymentState::Restarting => StackState::Restarting,
|
||||
DeploymentState::Running => StackState::Running,
|
||||
DeploymentState::Removing => StackState::Removing,
|
||||
DeploymentState::Paused => StackState::Paused,
|
||||
DeploymentState::Exited => StackState::Stopped,
|
||||
DeploymentState::Dead => StackState::Dead,
|
||||
let running = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Running);
|
||||
if running {
|
||||
return StackState::Running;
|
||||
}
|
||||
let paused = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Paused);
|
||||
if paused {
|
||||
return StackState::Paused;
|
||||
}
|
||||
let stopped = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Exited);
|
||||
if stopped {
|
||||
return StackState::Stopped;
|
||||
}
|
||||
let restarting = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Restarting);
|
||||
if restarting {
|
||||
return StackState::Restarting;
|
||||
}
|
||||
let dead = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Dead);
|
||||
if dead {
|
||||
return StackState::Dead;
|
||||
}
|
||||
let removing = containers
|
||||
.iter()
|
||||
.all(|container| container.state == DeploymentState::Removing);
|
||||
if removing {
|
||||
return StackState::Removing;
|
||||
}
|
||||
StackState::Unhealthy
|
||||
}
|
||||
|
||||
/// Gets stack state fresh from periphery
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn get_stack_state(
|
||||
stack: &Stack,
|
||||
@@ -150,11 +180,17 @@ pub async fn get_stack_state(
|
||||
if status != ServerState::Ok {
|
||||
return Ok(StackState::Unknown);
|
||||
}
|
||||
let projects = super::periphery_client(&server)?
|
||||
.request(periphery_client::api::compose::ListComposeProjects {})
|
||||
let containers = super::periphery_client(&server)?
|
||||
.request(periphery_client::api::container::GetContainerList {})
|
||||
.await?;
|
||||
|
||||
Ok(get_stack_state_from_projects(stack, &projects))
|
||||
let services = extract_services_from_stack(stack, false).await?;
|
||||
|
||||
Ok(get_stack_state_from_containers(
|
||||
&stack.config.ignore_services,
|
||||
&services,
|
||||
&containers,
|
||||
))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
|
||||
@@ -91,7 +91,11 @@ pub async fn get_stack_and_server(
|
||||
Ok((stack, server))
|
||||
}
|
||||
|
||||
pub fn compose_container_match_regex(container_name: &str) -> anyhow::Result<Regex> {
|
||||
pub fn compose_container_match_regex(
|
||||
container_name: &str,
|
||||
) -> anyhow::Result<Regex> {
|
||||
let regex = format!("^{container_name}-?[0-9]*$");
|
||||
Regex::new(®ex).with_context(|| format!("failed to construct valid regex from {regex}"))
|
||||
}
|
||||
Regex::new(®ex).with_context(|| {
|
||||
format!("failed to construct valid regex from {regex}")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::{fs, path::{Path, PathBuf}};
|
||||
use std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
|
||||
@@ -11,11 +11,11 @@ use crate::{
|
||||
};
|
||||
|
||||
// pub mod deployment;
|
||||
pub mod deploy;
|
||||
pub mod remote;
|
||||
pub mod resource;
|
||||
pub mod user_groups;
|
||||
pub mod variables;
|
||||
pub mod deploy;
|
||||
|
||||
mod file;
|
||||
mod resources;
|
||||
@@ -47,6 +47,9 @@ async fn refresh_syncs() {
|
||||
return;
|
||||
};
|
||||
for sync in syncs {
|
||||
if sync.config.repo.is_empty() {
|
||||
continue;
|
||||
}
|
||||
State
|
||||
.resolve(
|
||||
RefreshResourceSyncPending { sync: sync.id },
|
||||
|
||||
@@ -31,15 +31,20 @@ pub async fn handle_build_webhook(
|
||||
let lock = build_locks().get_or_insert_default(&build_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &build.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !build.config.webhook_enabled {
|
||||
return Err(anyhow!("build does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != build.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
|
||||
@@ -216,6 +216,7 @@ pub fn router() -> Router {
|
||||
async fn verify_gh_signature(
|
||||
headers: HeaderMap,
|
||||
body: &str,
|
||||
custom_secret: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
// wait random amount of time
|
||||
tokio::time::sleep(random_duration(0, 500)).await;
|
||||
@@ -229,10 +230,13 @@ async fn verify_gh_signature(
|
||||
return Err(anyhow!("failed to unwrap signature"));
|
||||
}
|
||||
let signature = signature.unwrap().replace("sha256=", "");
|
||||
let mut mac = HmacSha256::new_from_slice(
|
||||
core_config().webhook_secret.as_bytes(),
|
||||
)
|
||||
.expect("github webhook | failed to create hmac sha256");
|
||||
let secret_bytes = if custom_secret.is_empty() {
|
||||
core_config().webhook_secret.as_bytes()
|
||||
} else {
|
||||
custom_secret.as_bytes()
|
||||
};
|
||||
let mut mac = HmacSha256::new_from_slice(secret_bytes)
|
||||
.expect("github webhook | failed to create hmac sha256");
|
||||
mac.update(body.as_bytes());
|
||||
let expected = mac.finalize().into_bytes().encode_hex::<String>();
|
||||
if signature == expected {
|
||||
|
||||
@@ -33,15 +33,24 @@ pub async fn handle_procedure_webhook(
|
||||
procedure_locks().get_or_insert_default(&procedure_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let procedure = resource::get::<Procedure>(&procedure_id).await?;
|
||||
|
||||
verify_gh_signature(
|
||||
headers,
|
||||
&body,
|
||||
&procedure.config.webhook_secret,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !procedure.config.webhook_enabled {
|
||||
return Err(anyhow!("procedure does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != target_branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
let procedure = resource::get::<Procedure>(&procedure_id).await?;
|
||||
if !procedure.config.webhook_enabled {
|
||||
return Err(anyhow!("procedure does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = ExecuteRequest::RunProcedure(RunProcedure {
|
||||
procedure: procedure_id,
|
||||
|
||||
@@ -30,15 +30,20 @@ pub async fn handle_repo_clone_webhook(
|
||||
let lock = repo_locks().get_or_insert_default(&repo_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let repo = resource::get::<Repo>(&repo_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
return Err(anyhow!("repo does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != repo.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req =
|
||||
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
|
||||
@@ -64,15 +69,20 @@ pub async fn handle_repo_pull_webhook(
|
||||
let lock = repo_locks().get_or_insert_default(&repo_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let repo = resource::get::<Repo>(&repo_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
return Err(anyhow!("repo does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != repo.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
|
||||
repo: repo_id,
|
||||
@@ -96,23 +106,30 @@ pub async fn handle_repo_build_webhook(
|
||||
let lock = repo_locks().get_or_insert_default(&repo_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let repo = resource::get::<Repo>(&repo_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
return Err(anyhow!("repo does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != repo.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = crate::api::execute::ExecuteRequest::BuildRepo(BuildRepo {
|
||||
repo: repo_id,
|
||||
});
|
||||
let req =
|
||||
crate::api::execute::ExecuteRequest::BuildRepo(BuildRepo {
|
||||
repo: repo_id,
|
||||
});
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let crate::api::execute::ExecuteRequest::BuildRepo(req) = req else {
|
||||
let crate::api::execute::ExecuteRequest::BuildRepo(req) = req
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
State.resolve(req, (user, update)).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,15 +31,20 @@ pub async fn handle_stack_refresh_webhook(
|
||||
let lock = stack_locks().get_or_insert_default(&stack_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let stack = resource::get::<Stack>(&stack_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
return Err(anyhow!("stack does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != stack.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
State
|
||||
.resolve(RefreshStackCache { stack: stack.id }, user)
|
||||
@@ -58,15 +63,20 @@ pub async fn handle_stack_deploy_webhook(
|
||||
let lock = stack_locks().get_or_insert_default(&stack_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let stack = resource::get::<Stack>(&stack_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
return Err(anyhow!("stack does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != stack.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = ExecuteRequest::DeployStack(DeployStack {
|
||||
stack: stack_id,
|
||||
|
||||
@@ -31,15 +31,20 @@ pub async fn handle_sync_refresh_webhook(
|
||||
let lock = sync_locks().get_or_insert_default(&sync_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let sync = resource::get::<ResourceSync>(&sync_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !sync.config.webhook_enabled {
|
||||
return Err(anyhow!("sync does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != sync.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
State
|
||||
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
|
||||
@@ -58,15 +63,20 @@ pub async fn handle_sync_execute_webhook(
|
||||
let lock = sync_locks().get_or_insert_default(&sync_id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
verify_gh_signature(headers, &body).await?;
|
||||
let request_branch = extract_branch(&body)?;
|
||||
let sync = resource::get::<ResourceSync>(&sync_id).await?;
|
||||
|
||||
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
|
||||
.await?;
|
||||
|
||||
if !sync.config.webhook_enabled {
|
||||
return Err(anyhow!("sync does not have webhook enabled"));
|
||||
}
|
||||
|
||||
let request_branch = extract_branch(&body)?;
|
||||
if request_branch != sync.config.branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
|
||||
@@ -5,8 +5,6 @@ use std::{net::SocketAddr, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::Router;
|
||||
use helpers::startup_in_progress_update_cleanup;
|
||||
use state::jwt_client;
|
||||
use tower_http::{
|
||||
cors::{Any, CorsLayer},
|
||||
services::{ServeDir, ServeFile},
|
||||
@@ -34,9 +32,11 @@ async fn app() -> anyhow::Result<()> {
|
||||
info!("config: {:?}", config.sanitized());
|
||||
|
||||
// includes init db_client check to crash on db init failure
|
||||
startup_in_progress_update_cleanup().await;
|
||||
helpers::startup_cleanup().await;
|
||||
// Maybe initialize default server in All In One deployment.
|
||||
helpers::ensure_server().await;
|
||||
// init jwt client to crash on failure
|
||||
jwt_client();
|
||||
state::jwt_client();
|
||||
|
||||
// Spawn tasks
|
||||
monitor::spawn_monitor_loop();
|
||||
|
||||
@@ -339,6 +339,18 @@ pub async fn alert_servers(
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// Need to close any open ones on disks no longer reported
|
||||
if let Some(disk_alerts) = server_disk_alerts {
|
||||
for (path, alert) in disk_alerts {
|
||||
if !health.disks.contains_key(path) {
|
||||
let mut alert = alert.clone();
|
||||
alert.level = SeverityLevel::Ok;
|
||||
alert_ids_to_close
|
||||
.push((alert, server.info.send_disk_alerts));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokio::join!(
|
||||
|
||||
@@ -36,11 +36,9 @@ pub async fn alert_stacks(
|
||||
if status.curr.state != prev {
|
||||
// send alert
|
||||
let Ok(stack) =
|
||||
resource::get::<Stack>(&status.curr.id)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to get stack from db | {e:#?}")
|
||||
})
|
||||
resource::get::<Stack>(&status.curr.id).await.inspect_err(
|
||||
|e| error!("failed to get stack from db | {e:#?}"),
|
||||
)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -10,7 +10,7 @@ use monitor_client::entities::{
|
||||
stats::{ServerHealth, SystemStats},
|
||||
Server, ServerState,
|
||||
},
|
||||
stack::{ComposeProject, Stack, StackService, StackState},
|
||||
stack::{ComposeProject, StackService, StackState},
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use periphery_client::api::{self, git::GetLatestCommit};
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
monitor::{alert::check_alerts, record::record_server_stats},
|
||||
resource,
|
||||
state::{db_client, deployment_status_cache, repo_status_cache},
|
||||
};
|
||||
|
||||
@@ -189,7 +188,7 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
|
||||
let stats = if server.config.stats_monitoring {
|
||||
match periphery.request(api::stats::GetSystemStats {}).await {
|
||||
Ok(stats) => Some(stats),
|
||||
Ok(stats) => Some(filter_volumes(server, stats)),
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
@@ -214,7 +213,7 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
Ok((containers, networks, images, projects)) => {
|
||||
tokio::join!(
|
||||
resources::update_deployment_cache(deployments, &containers),
|
||||
resources::update_stack_cache(stacks, &containers, &projects),
|
||||
resources::update_stack_cache(stacks, &containers),
|
||||
);
|
||||
insert_server_status(
|
||||
server,
|
||||
@@ -272,18 +271,19 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn update_cache_for_stack(stack: &Stack) {
|
||||
if stack.config.server_id.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Ok(server) = resource::get::<Server>(&stack.config.server_id)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
warn!("Failed to get server for stack {} | {e:#}", stack.name)
|
||||
})
|
||||
else {
|
||||
return;
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
fn filter_volumes(
|
||||
server: &Server,
|
||||
mut stats: SystemStats,
|
||||
) -> SystemStats {
|
||||
stats.disks.retain(|disk| {
|
||||
// Always filter out volume mounts
|
||||
!disk.mount.starts_with("/var/lib/docker/volumes")
|
||||
// Filter out any that were declared to ignore in server config
|
||||
&& !server
|
||||
.config
|
||||
.ignore_mounts
|
||||
.iter()
|
||||
.any(|mount| disk.mount.starts_with(mount))
|
||||
});
|
||||
stats
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::entities::{
|
||||
deployment::{ContainerSummary, Deployment, DeploymentState},
|
||||
stack::{ComposeProject, Stack, StackService, StackServiceNames},
|
||||
stack::{Stack, StackService, StackServiceNames},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_stack_state_from_projects,
|
||||
query::get_stack_state_from_containers,
|
||||
stack::{
|
||||
compose_container_match_regex,
|
||||
services::extract_services_from_stack,
|
||||
@@ -55,7 +55,6 @@ pub async fn update_deployment_cache(
|
||||
pub async fn update_stack_cache(
|
||||
stacks: Vec<Stack>,
|
||||
containers: &[ContainerSummary],
|
||||
projects: &[ComposeProject],
|
||||
) {
|
||||
let stack_status_cache = stack_status_cache();
|
||||
for stack in stacks {
|
||||
@@ -93,7 +92,11 @@ pub async fn update_stack_cache(
|
||||
.map(|s| s.curr.state);
|
||||
let status = CachedStackStatus {
|
||||
id: stack.id.clone(),
|
||||
state: get_stack_state_from_projects(&stack, projects),
|
||||
state: get_stack_state_from_containers(
|
||||
&stack.config.ignore_services,
|
||||
&services,
|
||||
containers,
|
||||
),
|
||||
services: services_with_containers,
|
||||
};
|
||||
stack_status_cache
|
||||
|
||||
@@ -780,13 +780,19 @@ where
|
||||
{
|
||||
let resource: ResourceTarget = resource.into();
|
||||
let (recent_field, id) = match resource {
|
||||
ResourceTarget::Server(id) => ("recent_servers", id),
|
||||
ResourceTarget::Deployment(id) => ("recent_deployments", id),
|
||||
ResourceTarget::Build(id) => ("recent_builds", id),
|
||||
ResourceTarget::Repo(id) => ("recent_repos", id),
|
||||
ResourceTarget::Procedure(id) => ("recent_procedures", id),
|
||||
// Don't need to do anything for others
|
||||
_ => return,
|
||||
ResourceTarget::Server(id) => ("recents.Server", id),
|
||||
ResourceTarget::Deployment(id) => ("recents.Deployment", id),
|
||||
ResourceTarget::Build(id) => ("recents.Build", id),
|
||||
ResourceTarget::Repo(id) => ("recents.Repo", id),
|
||||
ResourceTarget::Procedure(id) => ("recents.Procedure", id),
|
||||
ResourceTarget::Stack(id) => ("recents.Stack", id),
|
||||
ResourceTarget::Builder(id) => ("recents.Builder", id),
|
||||
ResourceTarget::Alerter(id) => ("recents.Alerter", id),
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
("recents.ServerTemplate", id)
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => ("recents.ResourceSync", id),
|
||||
ResourceTarget::System(_) => return,
|
||||
};
|
||||
if let Err(e) = db_client()
|
||||
.await
|
||||
|
||||
@@ -195,7 +195,7 @@ pub async fn refresh_repo_state_cache() {
|
||||
}
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to refresh repo state cache | {e:#}")
|
||||
warn!("failed to refresh repo state cache | {e:#}")
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -167,7 +167,7 @@ impl super::MonitorResource for Server {
|
||||
} },
|
||||
)
|
||||
.await
|
||||
.context("failed to detach server from repos")?;
|
||||
.context("failed to close deleted server alerts")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,27 +1,31 @@
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use monitor_client::entities::{
|
||||
permission::PermissionLevel,
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
stack::{
|
||||
PartialStackConfig, Stack, StackConfig, StackConfigDiff,
|
||||
StackInfo, StackListItem, StackListItemInfo, StackQuerySpecifics,
|
||||
StackState,
|
||||
use monitor_client::{
|
||||
api::write::RefreshStackCache,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
stack::{
|
||||
PartialStackConfig, Stack, StackConfig, StackConfigDiff,
|
||||
StackInfo, StackListItem, StackListItemInfo,
|
||||
StackQuerySpecifics, StackState,
|
||||
},
|
||||
update::{ResourceTargetVariant, Update},
|
||||
user::{stack_user, User},
|
||||
Operation,
|
||||
},
|
||||
update::{ResourceTargetVariant, Update},
|
||||
user::User,
|
||||
Operation,
|
||||
};
|
||||
use mungos::mongodb::Collection;
|
||||
use periphery_client::api::compose::ComposeExecution;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{periphery_client, query::get_stack_state},
|
||||
monitor::update_cache_for_server,
|
||||
resource,
|
||||
state::{
|
||||
action_states, db_client, server_status_cache, stack_status_cache,
|
||||
action_states, db_client, server_status_cache,
|
||||
stack_status_cache, State,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -142,13 +146,37 @@ impl super::MonitorResource for Stack {
|
||||
|
||||
async fn post_create(
|
||||
created: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
if !created.config.server_id.is_empty() {
|
||||
let server =
|
||||
resource::get::<Server>(&created.config.server_id).await?;
|
||||
update_cache_for_server(&server).await;
|
||||
if let Err(e) = State
|
||||
.resolve(
|
||||
RefreshStackCache {
|
||||
stack: created.name.clone(),
|
||||
},
|
||||
stack_user().to_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"refresh stack cache",
|
||||
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
|
||||
);
|
||||
};
|
||||
if created.config.server_id.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let Ok(server) = super::get::<Server>(&created.config.server_id)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
warn!(
|
||||
"Failed to get server for stack {} | {e:#}",
|
||||
created.name
|
||||
)
|
||||
})
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -168,14 +196,9 @@ impl super::MonitorResource for Stack {
|
||||
|
||||
async fn post_update(
|
||||
updated: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
if !updated.config.server_id.is_empty() {
|
||||
let server =
|
||||
resource::get::<Server>(&updated.config.server_id).await?;
|
||||
update_cache_for_server(&server).await;
|
||||
}
|
||||
Ok(())
|
||||
Self::post_create(updated, update).await
|
||||
}
|
||||
|
||||
// DELETE
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::time::Duration;
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::entities::{
|
||||
monitor_timestamp,
|
||||
resource::Resource,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, PendingSyncUpdatesData, ResourceSync,
|
||||
@@ -126,9 +127,20 @@ impl super::MonitorResource for ResourceSync {
|
||||
}
|
||||
|
||||
async fn pre_delete(
|
||||
_resource: &Resource<Self::Config, Self::Info>,
|
||||
resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
db_client().await.alerts
|
||||
.update_many(
|
||||
doc! { "target.type": "ResourceSync", "target.id": &resource.id },
|
||||
doc! { "$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": monitor_timestamp()
|
||||
} },
|
||||
)
|
||||
.await
|
||||
.context("failed to close deleted sync alerts")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -212,6 +212,7 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
|
||||
builder_id: String::new(),
|
||||
skip_secret_interp: value.skip_secret_interp,
|
||||
version: value.version.into(),
|
||||
auto_increment_version: true,
|
||||
image_name: Default::default(),
|
||||
image_tag: Default::default(),
|
||||
git_provider: String::from("github.com"),
|
||||
@@ -243,6 +244,7 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
|
||||
use_buildx,
|
||||
labels: Default::default(),
|
||||
webhook_enabled: true,
|
||||
webhook_secret: Default::default(),
|
||||
commit: Default::default(),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -300,6 +300,7 @@ impl TryFrom<Server> for monitor_client::entities::server::Server {
|
||||
config: monitor_client::entities::server::ServerConfig {
|
||||
address: value.address,
|
||||
enabled: value.enabled,
|
||||
ignore_mounts: Default::default(),
|
||||
auto_prune: value.auto_prune,
|
||||
send_unreachable_alerts: true,
|
||||
stats_monitoring: true,
|
||||
|
||||
@@ -139,6 +139,7 @@ impl From<BuildConfig>
|
||||
minor: value.version.minor,
|
||||
patch: value.version.patch,
|
||||
},
|
||||
auto_increment_version: true,
|
||||
image_name: Default::default(),
|
||||
image_tag: Default::default(),
|
||||
git_provider: String::from("github.com"),
|
||||
@@ -163,6 +164,7 @@ impl From<BuildConfig>
|
||||
extra_args: value.extra_args,
|
||||
use_buildx: value.use_buildx,
|
||||
webhook_enabled: value.webhook_enabled,
|
||||
webhook_secret: Default::default(),
|
||||
image_registry: value.image_registry.into(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,6 +124,7 @@ impl From<BuildConfig>
|
||||
minor: value.version.minor,
|
||||
patch: value.version.patch,
|
||||
},
|
||||
auto_increment_version: true,
|
||||
image_name: Default::default(),
|
||||
image_tag: Default::default(),
|
||||
git_provider: String::from("github.com"),
|
||||
@@ -157,6 +158,7 @@ impl From<BuildConfig>
|
||||
})
|
||||
},
|
||||
webhook_enabled: value.webhook_enabled,
|
||||
webhook_secret: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,12 +27,10 @@ merge_config_files.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
resolver_api.workspace = true
|
||||
run_command.workspace = true
|
||||
parse_csl.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
axum-extra.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
bollard.workspace = true
|
||||
|
||||
26
bin/periphery/Dockerfile
Normal file
26
bin/periphery/Dockerfile
Normal file
@@ -0,0 +1,26 @@
|
||||
# Build Periphery
|
||||
FROM rust:1.80.1-bookworm AS builder
|
||||
WORKDIR /builder
|
||||
COPY . .
|
||||
RUN cargo build -p monitor_periphery --release
|
||||
|
||||
# Final Image
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && apt install -y git curl ca-certificates && \
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
|
||||
# Copy
|
||||
COPY --from=builder /builder/target/release/periphery /
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 8120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
|
||||
LABEL org.opencontainers.image.description="Monitor Periphery"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose.
|
||||
ENTRYPOINT [ "./periphery" ]
|
||||
@@ -1,13 +1,21 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use command::run_monitor_command;
|
||||
use formatting::format_serror;
|
||||
use monitor_client::entities::{stack::ComposeProject, update::Log};
|
||||
use monitor_client::entities::{
|
||||
stack::{ComposeContents, ComposeProject},
|
||||
to_monitor_name,
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
compose::{compose_up, docker_compose},
|
||||
config::periphery_config,
|
||||
helpers::log_grep,
|
||||
State,
|
||||
};
|
||||
@@ -70,6 +78,58 @@ pub struct DockerComposeLsItem {
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<GetComposeContentsOnHost, ()> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetComposeContentsOnHost {
|
||||
name,
|
||||
run_directory,
|
||||
file_paths,
|
||||
}: GetComposeContentsOnHost,
|
||||
_: (),
|
||||
) -> anyhow::Result<GetComposeContentsOnHostResponse> {
|
||||
let root =
|
||||
periphery_config().stack_dir.join(to_monitor_name(&name));
|
||||
let run_directory = root.join(&run_directory);
|
||||
let run_directory = run_directory.canonicalize().context(
|
||||
"failed to validate run directory on host (canonicalize error)",
|
||||
)?;
|
||||
|
||||
let file_paths = file_paths
|
||||
.iter()
|
||||
.map(|path| {
|
||||
run_directory.join(path).components().collect::<PathBuf>()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut res = GetComposeContentsOnHostResponse::default();
|
||||
|
||||
for full_path in &file_paths {
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!(
|
||||
"failed to read compose file contents at {full_path:?}"
|
||||
)
|
||||
}) {
|
||||
Ok(contents) => {
|
||||
res.contents.push(ComposeContents {
|
||||
path: full_path.display().to_string(),
|
||||
contents,
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
res.errors.push(ComposeContents {
|
||||
path: full_path.display().to_string(),
|
||||
contents: format_serror(&e.into()),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<GetComposeServiceLog> for State {
|
||||
#[instrument(
|
||||
name = "GetComposeServiceLog",
|
||||
|
||||
@@ -91,6 +91,7 @@ pub enum PeripheryRequest {
|
||||
|
||||
// Compose
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
GetComposeContentsOnHost(GetComposeContentsOnHost),
|
||||
GetComposeServiceLog(GetComposeServiceLog),
|
||||
GetComposeServiceLogSearch(GetComposeServiceLogSearch),
|
||||
ComposeUp(ComposeUp),
|
||||
|
||||
@@ -58,7 +58,13 @@ pub async fn compose_up(
|
||||
let file_paths = stack
|
||||
.file_paths()
|
||||
.iter()
|
||||
.map(|path| (path, run_directory.join(path)))
|
||||
.map(|path| {
|
||||
(
|
||||
path,
|
||||
// This will remove any intermediate uneeded '/./' in the path
|
||||
run_directory.join(path).components().collect::<PathBuf>(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (path, full_path) in &file_paths {
|
||||
@@ -70,7 +76,7 @@ pub async fn compose_up(
|
||||
return Err(anyhow!("A compose file doesn't exist after writing stack. Ensure the run_directory and file_paths are correct."));
|
||||
}
|
||||
|
||||
for (path, full_path) in &file_paths {
|
||||
for (_, full_path) in &file_paths {
|
||||
let file_contents =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!(
|
||||
@@ -85,7 +91,7 @@ pub async fn compose_up(
|
||||
.push(Log::error("read compose file", error.clone()));
|
||||
// This should only happen for repo stacks, ie remote error
|
||||
res.remote_errors.push(ComposeContents {
|
||||
path: path.to_string(),
|
||||
path: full_path.display().to_string(),
|
||||
contents: error,
|
||||
});
|
||||
return Err(anyhow!(
|
||||
@@ -116,22 +122,6 @@ pub async fn compose_up(
|
||||
let last_project_name = stack.project_name(false);
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
// Pull images before destroying to minimize downtime.
|
||||
// If this fails, do not continue.
|
||||
let log = run_monitor_command(
|
||||
"compose pull",
|
||||
format!(
|
||||
"cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} pull{service_arg}",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
if !log.success {
|
||||
res.logs.push(log);
|
||||
return Err(anyhow!(
|
||||
"Failed to pull required images, stopping the run."
|
||||
));
|
||||
}
|
||||
|
||||
// Login to the registry to pull private images, if account is set
|
||||
if !stack.config.registry_account.is_empty() {
|
||||
let registry = ImageRegistry::Standard(StandardRegistryConfig {
|
||||
@@ -151,6 +141,22 @@ pub async fn compose_up(
|
||||
.context("failed to login to image registry")?;
|
||||
}
|
||||
|
||||
// Pull images before destroying to minimize downtime.
|
||||
// If this fails, do not continue.
|
||||
let log = run_monitor_command(
|
||||
"compose pull",
|
||||
format!(
|
||||
"cd {run_dir} && {docker_compose} -p {project_name} -f {file_args} pull{service_arg}",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
if !log.success {
|
||||
res.logs.push(log);
|
||||
return Err(anyhow!(
|
||||
"Failed to pull required images, stopping the run."
|
||||
));
|
||||
}
|
||||
|
||||
// Take down the existing containers.
|
||||
// This one tries to use the previously deployed service name, to ensure the right stack is taken down.
|
||||
destroy_existing_containers(&last_project_name, service, res)
|
||||
@@ -172,12 +178,17 @@ pub async fn compose_up(
|
||||
res.deployed = log.success;
|
||||
res.logs.push(log);
|
||||
|
||||
if let Err(e) = fs::remove_dir_all(&root).await.with_context(|| {
|
||||
format!("failed to clean up files after deploy | path: {root:?} | ensure all volumes are mounted outside the repo directory (preferably use absolute path for mounts)")
|
||||
}) {
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("clean up files", format_serror(&e.into())))
|
||||
// Unless the files are supposed to be managed on the host,
|
||||
// clean up here, which will also let user know immediately if there will be a problem
|
||||
// with any accidental volumes mounted inside repo directory (just use absolute volumes anyways)
|
||||
if !stack.config.files_on_host {
|
||||
if let Err(e) = fs::remove_dir_all(&root).await.with_context(|| {
|
||||
format!("failed to clean up files after deploy | path: {root:?} | ensure all volumes are mounted outside the repo directory (preferably use absolute path for mounts)")
|
||||
}) {
|
||||
res
|
||||
.logs
|
||||
.push(Log::error("clean up files", format_serror(&e.into())))
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -198,8 +209,33 @@ async fn write_stack(
|
||||
// Cannot use canonicalize yet as directory may not exist.
|
||||
let run_directory = run_directory.components().collect::<PathBuf>();
|
||||
|
||||
if stack.config.file_contents.is_empty() {
|
||||
// Clone the repo
|
||||
if stack.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
// Only need to write environment file here (which does nothing if not using this feature)
|
||||
let env_file_path = match write_environment_file(
|
||||
&stack.config.environment,
|
||||
&stack.config.env_file_path,
|
||||
stack
|
||||
.config
|
||||
.skip_secret_interp
|
||||
.then_some(&periphery_config().secrets),
|
||||
&run_directory,
|
||||
&mut res.logs,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
return Err(anyhow!("failed to write environment file"));
|
||||
}
|
||||
};
|
||||
Ok(env_file_path)
|
||||
} else if stack.config.file_contents.is_empty() {
|
||||
// ================
|
||||
// REPO BASED FILES
|
||||
// ================
|
||||
if stack.config.repo.is_empty() {
|
||||
// Err response will be written to return, no need to add it to log here
|
||||
return Err(anyhow!("Must either input compose file contents directly or provide a repo. Got neither."));
|
||||
@@ -284,6 +320,9 @@ async fn write_stack(
|
||||
|
||||
Ok(env_file_path)
|
||||
} else {
|
||||
// ==============
|
||||
// UI BASED FILES
|
||||
// ==============
|
||||
// Ensure run directory exists
|
||||
fs::create_dir_all(&run_directory).await.with_context(|| {
|
||||
format!(
|
||||
|
||||
@@ -15,53 +15,59 @@ pub fn periphery_config() -> &'static PeripheryConfig {
|
||||
.expect("failed to parse periphery environment");
|
||||
let args = CliArgs::parse();
|
||||
let config_paths =
|
||||
args.config_path.unwrap_or(env.monitor_config_paths);
|
||||
args.config_path.unwrap_or(env.periphery_config_paths);
|
||||
let config = if config_paths.is_empty() {
|
||||
PeripheryConfig::default()
|
||||
} else {
|
||||
parse_config_paths::<PeripheryConfig>(
|
||||
config_paths,
|
||||
args.config_keyword.unwrap_or(env.monitor_config_keywords),
|
||||
args.config_keyword.unwrap_or(env.periphery_config_keywords),
|
||||
args
|
||||
.merge_nested_config
|
||||
.unwrap_or(env.monitor_merge_nested_config),
|
||||
.unwrap_or(env.periphery_merge_nested_config),
|
||||
args
|
||||
.extend_config_arrays
|
||||
.unwrap_or(env.monitor_extend_config_arrays),
|
||||
.unwrap_or(env.periphery_extend_config_arrays),
|
||||
)
|
||||
.expect("failed at parsing config from paths")
|
||||
};
|
||||
|
||||
PeripheryConfig {
|
||||
port: env.monitor_port.unwrap_or(config.port),
|
||||
repo_dir: env.monitor_repo_dir.unwrap_or(config.repo_dir),
|
||||
stack_dir: env.monitor_stack_dir.unwrap_or(config.stack_dir),
|
||||
port: env.periphery_port.unwrap_or(config.port),
|
||||
repo_dir: env.periphery_repo_dir.unwrap_or(config.repo_dir),
|
||||
stack_dir: env.periphery_stack_dir.unwrap_or(config.stack_dir),
|
||||
stats_polling_rate: env
|
||||
.monitor_stats_polling_rate
|
||||
.periphery_stats_polling_rate
|
||||
.unwrap_or(config.stats_polling_rate),
|
||||
legacy_compose_cli: env
|
||||
.monitor_legacy_compose_cli
|
||||
.periphery_legacy_compose_cli
|
||||
.unwrap_or(config.legacy_compose_cli),
|
||||
logging: LogConfig {
|
||||
level: args
|
||||
.log_level
|
||||
.map(LogLevel::from)
|
||||
.or(env.monitor_logging_level)
|
||||
.or(env.periphery_logging_level)
|
||||
.unwrap_or(config.logging.level),
|
||||
stdio: env
|
||||
.monitor_logging_stdio
|
||||
.periphery_logging_stdio
|
||||
.unwrap_or(config.logging.stdio),
|
||||
otlp_endpoint: env
|
||||
.monitor_logging_otlp_endpoint
|
||||
.periphery_logging_otlp_endpoint
|
||||
.or(config.logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.monitor_logging_opentelemetry_service_name
|
||||
.periphery_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
},
|
||||
allowed_ips: env
|
||||
.monitor_allowed_ips
|
||||
.periphery_allowed_ips
|
||||
.unwrap_or(config.allowed_ips),
|
||||
passkeys: env.monitor_passkeys.unwrap_or(config.passkeys),
|
||||
passkeys: env.periphery_passkeys.unwrap_or(config.passkeys),
|
||||
include_disk_mounts: env
|
||||
.periphery_include_disk_mounts
|
||||
.unwrap_or(config.include_disk_mounts),
|
||||
exclude_disk_mounts: env
|
||||
.periphery_exclude_disk_mounts
|
||||
.unwrap_or(config.exclude_disk_mounts),
|
||||
secrets: config.secrets,
|
||||
git_providers: config.git_providers,
|
||||
docker_registries: config.docker_registries,
|
||||
|
||||
@@ -101,11 +101,31 @@ impl StatsClient {
|
||||
}
|
||||
|
||||
fn get_disks(&self) -> Vec<SingleDiskUsage> {
|
||||
let config = periphery_config();
|
||||
self
|
||||
.disks
|
||||
.list()
|
||||
.iter()
|
||||
.filter(|d| d.file_system() != "overlay")
|
||||
.filter(|d| {
|
||||
if d.file_system() == "overlay" {
|
||||
return false;
|
||||
}
|
||||
let path = d.mount_point();
|
||||
for mount in &config.exclude_disk_mounts {
|
||||
if path == mount {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if config.include_disk_mounts.is_empty() {
|
||||
return true;
|
||||
}
|
||||
for mount in &config.include_disk_mounts {
|
||||
if path == mount {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
.map(|disk| {
|
||||
let file_system =
|
||||
disk.file_system().to_string_lossy().to_string();
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
[package]
|
||||
name = "tests"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
monitor_client.workspace = true
|
||||
logger.workspace = true
|
||||
tokio.workspace = true
|
||||
anyhow.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
mungos.workspace = true
|
||||
dotenvy.workspace = true
|
||||
envy.workspace = true
|
||||
rand.workspace = true
|
||||
tracing.workspace = true
|
||||
@@ -1,120 +0,0 @@
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateBuild, CreateBuilder, CreateDeployment, CreateServer,
|
||||
UpdateTagsOnResource,
|
||||
},
|
||||
entities::{
|
||||
build::BuildConfig,
|
||||
builder::{PartialBuilderConfig, ServerBuilderConfig},
|
||||
deployment::DeploymentConfig,
|
||||
server::ServerConfig,
|
||||
},
|
||||
MonitorClient,
|
||||
};
|
||||
use rand::Rng;
|
||||
|
||||
use crate::random_string;
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn tests() -> anyhow::Result<()> {
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
let monitor = MonitorClient::new_from_env().await?;
|
||||
|
||||
let tags = (0..6).map(|_| random_string(5)).collect::<Vec<_>>();
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut get_tags = || vec![tags[rng.gen_range(0..6)].to_string()];
|
||||
|
||||
let server_names = (0..20)
|
||||
.map(|i| format!("server-{}-{}", random_string(8), i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for name in &server_names {
|
||||
let resource = monitor
|
||||
.write(CreateServer {
|
||||
name: name.to_string(),
|
||||
config: ServerConfig::builder()
|
||||
.address(String::new())
|
||||
.build()?
|
||||
.into(),
|
||||
})
|
||||
.await?;
|
||||
info!("created server {}", resource.name);
|
||||
monitor
|
||||
.write(UpdateTagsOnResource {
|
||||
target: (&resource).into(),
|
||||
tags: get_tags(),
|
||||
})
|
||||
.await?;
|
||||
info!("updated tags on server {}", resource.name);
|
||||
}
|
||||
|
||||
for (i, server_name) in server_names.iter().enumerate() {
|
||||
let resource = monitor
|
||||
.write(CreateDeployment {
|
||||
name: format!("dep-{}-{}", random_string(8), i),
|
||||
config: DeploymentConfig::builder()
|
||||
.server_id(server_name.to_string())
|
||||
.build()?
|
||||
.into(),
|
||||
})
|
||||
.await?;
|
||||
info!("created deployment {}", resource.name);
|
||||
monitor
|
||||
.write(UpdateTagsOnResource {
|
||||
target: (&resource).into(),
|
||||
tags: get_tags(),
|
||||
})
|
||||
.await?;
|
||||
info!("updated tags on deployment {}", resource.name);
|
||||
}
|
||||
|
||||
let builder_names = (0..20)
|
||||
.map(|i| format!("builder-{}-{}", random_string(8), i))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (i, server_name) in server_names.iter().enumerate() {
|
||||
let resource = monitor
|
||||
.write(CreateBuilder {
|
||||
name: builder_names[i].clone(),
|
||||
config: PartialBuilderConfig::Server(
|
||||
ServerBuilderConfig {
|
||||
server_id: server_name.to_string(),
|
||||
}
|
||||
.into(),
|
||||
),
|
||||
})
|
||||
.await?;
|
||||
info!("created builder {}", resource.name);
|
||||
monitor
|
||||
.write(UpdateTagsOnResource {
|
||||
target: (&resource).into(),
|
||||
tags: get_tags(),
|
||||
})
|
||||
.await?;
|
||||
info!("updated tags on builder {}", resource.name);
|
||||
}
|
||||
|
||||
for (i, builder_name) in builder_names.iter().enumerate() {
|
||||
let resource = monitor
|
||||
.write(CreateBuild {
|
||||
name: format!("build-{}-{}", random_string(8), i),
|
||||
config: BuildConfig::builder()
|
||||
.builder_id(builder_name.to_string())
|
||||
.build()?
|
||||
.into(),
|
||||
})
|
||||
.await?;
|
||||
info!("created build {}", resource.name);
|
||||
monitor
|
||||
.write(UpdateTagsOnResource {
|
||||
target: (&resource).into(),
|
||||
tags: get_tags(),
|
||||
})
|
||||
.await?;
|
||||
info!("updated tags on build {}", resource.name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
|
||||
mod core;
|
||||
// mod periphery;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
logger::init(&Default::default())?;
|
||||
// periphery::tests().await?;
|
||||
core::tests().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn random_string(length: usize) -> String {
|
||||
thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(length)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
// use periphery_client::{requests, PeripheryClient};
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn tests() -> anyhow::Result<()> {
|
||||
let periphery =
|
||||
PeripheryClient::new("http://localhost:9001", "monitor_passkey");
|
||||
|
||||
let version = periphery.request(requests::GetVersion {}).await?;
|
||||
println!("{version:?}");
|
||||
|
||||
let system_info =
|
||||
periphery.request(requests::GetSystemInformation {}).await?;
|
||||
println!("{system_info:#?}");
|
||||
|
||||
let processes =
|
||||
periphery.request(requests::GetSystemProcesses {}).await?;
|
||||
// println!("{system_stats:#?}");
|
||||
|
||||
let periphery_process =
|
||||
processes.into_iter().find(|p| p.name.contains("periphery"));
|
||||
println!("{periphery_process:#?}");
|
||||
|
||||
let accounts = periphery.request(requests::GetAccounts {}).await?;
|
||||
println!("{accounts:#?}");
|
||||
|
||||
let secrets = periphery.request(requests::GetSecrets {}).await?;
|
||||
println!("{secrets:#?}");
|
||||
|
||||
let container_stats = periphery
|
||||
.request(requests::GetContainerStatsList {})
|
||||
.await?;
|
||||
println!("{container_stats:#?}");
|
||||
|
||||
let res = periphery.request(requests::GetNetworkList {}).await?;
|
||||
println!("{res:#?}");
|
||||
|
||||
let res = periphery
|
||||
.request(requests::GetContainerStats {
|
||||
name: "monitor-mongo".into(),
|
||||
})
|
||||
.await?;
|
||||
println!("{res:#?}");
|
||||
|
||||
let res = periphery
|
||||
.request(requests::GetContainerLog {
|
||||
name: "monitor-mongo".into(),
|
||||
tail: 50,
|
||||
})
|
||||
.await?;
|
||||
println!("{res:#?}");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -33,7 +33,7 @@ Users will have to manage their own versioning though.
|
||||
- **Sync**: Sync resources declared in toml files in Github repos.
|
||||
- Manage resources declaratively, with git history for configuration rollbacks.
|
||||
- See the actions which will be performed in the UI, and execute them upon manual confirmation.
|
||||
- Use a Github webhook to automatically execute syncs on git push.
|
||||
- Use a Git webhook to automatically execute syncs on git push.
|
||||
|
||||
- **Resource Tagging**
|
||||
- Attach multiple *tags* to resources, which can be used to group related resources together. These can be used to filter resources in the UI.
|
||||
|
||||
@@ -96,4 +96,4 @@ pub struct PruneImages {
|
||||
pub struct PruneContainers {
|
||||
/// Id or name
|
||||
pub server: String,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +92,7 @@ pub type CreateDockerRegistryAccountResponse = DockerRegistryAccount;
|
||||
pub struct UpdateDockerRegistryAccount {
|
||||
/// The id of the docker registry to update
|
||||
pub id: String,
|
||||
/// The partial docker registry account.
|
||||
/// The partial docker registry account.
|
||||
pub account: _PartialDockerRegistryAccount,
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@ use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::{
|
||||
stack::{Stack, _PartialStackConfig}, update::Update, NoData
|
||||
stack::{Stack, _PartialStackConfig},
|
||||
update::Update,
|
||||
NoData,
|
||||
};
|
||||
|
||||
use super::MonitorWriteRequest;
|
||||
|
||||
@@ -92,6 +92,13 @@ pub struct BuildConfig {
|
||||
#[builder(default)]
|
||||
pub version: Version,
|
||||
|
||||
/// Whether to automatically increment the patch on every build.
|
||||
/// Default is `true`
|
||||
#[serde(default = "default_auto_increment_version")]
|
||||
#[builder(default = "default_auto_increment_version()")]
|
||||
#[partial_default(default_auto_increment_version())]
|
||||
pub auto_increment_version: bool,
|
||||
|
||||
/// An alternate name for the image pushed to the repository.
|
||||
/// If this is empty, it will use the build name.
|
||||
///
|
||||
@@ -127,6 +134,15 @@ pub struct BuildConfig {
|
||||
#[partial_default(default_git_https())]
|
||||
pub git_https: bool,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// The repo used as the source of the build.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
@@ -143,14 +159,17 @@ pub struct BuildConfig {
|
||||
#[builder(default)]
|
||||
pub commit: String,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Optionally provide an alternate webhook secret for this build.
|
||||
/// If its an empty string, use the default secret from the config.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// The optional command run after repo clone and before docker build.
|
||||
#[serde(default)]
|
||||
@@ -185,12 +204,6 @@ pub struct BuildConfig {
|
||||
#[builder(default)]
|
||||
pub use_buildx: bool,
|
||||
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Any extra docker cli arguments to be included in the build command
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
@@ -250,6 +263,10 @@ impl BuildConfig {
|
||||
}
|
||||
}
|
||||
|
||||
fn default_auto_increment_version() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_git_provider() -> String {
|
||||
String::from("github.com")
|
||||
}
|
||||
@@ -280,6 +297,7 @@ impl Default for BuildConfig {
|
||||
builder_id: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
version: Default::default(),
|
||||
auto_increment_version: default_auto_increment_version(),
|
||||
image_name: Default::default(),
|
||||
image_tag: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
@@ -298,6 +316,7 @@ impl Default for BuildConfig {
|
||||
use_buildx: Default::default(),
|
||||
image_registry: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,8 @@ pub struct Env {
|
||||
pub monitor_port: Option<u16>,
|
||||
/// Override `passkey`
|
||||
pub monitor_passkey: Option<String>,
|
||||
/// Override `ensure_server`
|
||||
pub monitor_ensure_server: Option<String>,
|
||||
/// Override `jwt_secret`
|
||||
pub monitor_jwt_secret: Option<String>,
|
||||
/// Override `jwt_ttl`
|
||||
@@ -157,6 +159,9 @@ fn default_config_path() -> String {
|
||||
/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml) for a full example.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CoreConfig {
|
||||
// ===========
|
||||
// = General =
|
||||
// ===========
|
||||
/// The title of this monitor deployment. Will be used in the browser page title.
|
||||
/// Default: 'Monitor'
|
||||
#[serde(default = "default_title")]
|
||||
@@ -177,6 +182,41 @@ pub struct CoreConfig {
|
||||
/// Should be some secure hash, maybe 20-40 chars.
|
||||
pub passkey: String,
|
||||
|
||||
/// Disable user ability to use the UI to update resource configuration.
|
||||
#[serde(default)]
|
||||
pub ui_write_disabled: bool,
|
||||
|
||||
/// If defined, ensure an enabled server exists at this address.
|
||||
/// Use with All In One compose.
|
||||
/// Example: `http://monitor-periphery:8120`
|
||||
#[serde(default)]
|
||||
pub ensure_server: String,
|
||||
|
||||
// ============
|
||||
// = Database =
|
||||
// ============
|
||||
/// Configure core mongo connection.
|
||||
///
|
||||
/// An easy deployment method is to use Mongo Atlas to provide
|
||||
/// a reliable database.
|
||||
pub mongo: MongoConfig,
|
||||
|
||||
// ================
|
||||
// = Auth / Login =
|
||||
// ================
|
||||
/// enable login with local auth
|
||||
#[serde(default)]
|
||||
pub local_auth: bool,
|
||||
|
||||
/// Enable transparent mode, which gives all (enabled) users read access to all resources.
|
||||
#[serde(default)]
|
||||
pub transparent_mode: bool,
|
||||
|
||||
/// New users will be automatically enabled.
|
||||
/// Combined with transparent mode, this is suitable for a demo instance.
|
||||
#[serde(default)]
|
||||
pub enable_new_users: bool,
|
||||
|
||||
/// Optionally provide a specific jwt secret.
|
||||
/// Passing nothing or an empty string will cause one to be generated.
|
||||
/// Default: "" (empty string)
|
||||
@@ -188,13 +228,64 @@ pub struct CoreConfig {
|
||||
#[serde(default = "default_jwt_ttl")]
|
||||
pub jwt_ttl: Timelength,
|
||||
|
||||
/// Specify the directory used to clone stack / repo / build repos, for latest hash / contents.
|
||||
/// The default is fine when using a container.
|
||||
/// This directory has no need for persistence, so no need to mount it.
|
||||
/// Default: `/repos`
|
||||
#[serde(default = "default_repo_directory")]
|
||||
pub repo_directory: PathBuf,
|
||||
// =========
|
||||
// = Oauth =
|
||||
// =========
|
||||
/// Configure google oauth
|
||||
#[serde(default)]
|
||||
pub google_oauth: OauthCredentials,
|
||||
|
||||
/// Configure github oauth
|
||||
#[serde(default)]
|
||||
pub github_oauth: OauthCredentials,
|
||||
|
||||
// ============
|
||||
// = Webhooks =
|
||||
// ============
|
||||
/// Used to verify validity from webhooks.
|
||||
/// Should be some secure hash maybe 20-40 chars.
|
||||
/// It is given to git provider when configuring the webhook.
|
||||
#[serde(default)]
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// Override the webhook listener base url, if None will use the address defined as 'host'.
|
||||
/// Example: `https://webhooks.mogh.tech`
|
||||
///
|
||||
/// This can be used if core sits on an internal network which is
|
||||
/// unreachable directly from the open internet.
|
||||
/// A reverse proxy in a public network can forward webhooks to the internal monitor.
|
||||
pub webhook_base_url: Option<String>,
|
||||
|
||||
/// Configure a Github Webhook app.
|
||||
/// Allows users to manage repo webhooks from within the Monitor UI.
|
||||
#[serde(default)]
|
||||
pub github_webhook_app: GithubWebhookAppConfig,
|
||||
|
||||
// ===========
|
||||
// = Logging =
|
||||
// ===========
|
||||
/// Configure logging
|
||||
#[serde(default)]
|
||||
pub logging: LogConfig,
|
||||
|
||||
// ===========
|
||||
// = Pruning =
|
||||
// ===========
|
||||
/// Number of days to keep stats, or 0 to disable pruning.
|
||||
/// Stats older than this number of days are deleted on a daily cycle
|
||||
/// Default: 14
|
||||
#[serde(default = "default_prune_days")]
|
||||
pub keep_stats_for_days: u64,
|
||||
|
||||
/// Number of days to keep alerts, or 0 to disable pruning.
|
||||
/// Alerts older than this number of days are deleted on a daily cycle
|
||||
/// Default: 14
|
||||
#[serde(default = "default_prune_days")]
|
||||
pub keep_alerts_for_days: u64,
|
||||
|
||||
// ==================
|
||||
// = Poll Intervals =
|
||||
// ==================
|
||||
/// Interval at which to poll stacks for any updates / automated actions.
|
||||
/// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`
|
||||
/// Default: `5-min`.
|
||||
@@ -224,72 +315,9 @@ pub struct CoreConfig {
|
||||
#[serde(default = "default_monitoring_interval")]
|
||||
pub monitoring_interval: Timelength,
|
||||
|
||||
/// Number of days to keep stats, or 0 to disable pruning.
|
||||
/// Stats older than this number of days are deleted on a daily cycle
|
||||
/// Default: 14
|
||||
#[serde(default = "default_prune_days")]
|
||||
pub keep_stats_for_days: u64,
|
||||
|
||||
/// Number of days to keep alerts, or 0 to disable pruning.
|
||||
/// Alerts older than this number of days are deleted on a daily cycle
|
||||
/// Default: 14
|
||||
#[serde(default = "default_prune_days")]
|
||||
pub keep_alerts_for_days: u64,
|
||||
|
||||
/// Configure logging
|
||||
#[serde(default)]
|
||||
pub logging: LogConfig,
|
||||
|
||||
/// Enable transparent mode, which gives all (enabled) users read access to all resources.
|
||||
#[serde(default)]
|
||||
pub transparent_mode: bool,
|
||||
|
||||
/// Disable user ability to use the UI to update resource configuration.
|
||||
#[serde(default)]
|
||||
pub ui_write_disabled: bool,
|
||||
|
||||
/// enable login with local auth
|
||||
#[serde(default)]
|
||||
pub local_auth: bool,
|
||||
|
||||
/// Configure google oauth
|
||||
#[serde(default)]
|
||||
pub google_oauth: OauthCredentials,
|
||||
|
||||
/// Configure github oauth
|
||||
#[serde(default)]
|
||||
pub github_oauth: OauthCredentials,
|
||||
|
||||
/// New users will be automatically enabled.
|
||||
/// Combined with transparent mode, this is suitable for a demo instance.
|
||||
#[serde(default)]
|
||||
pub enable_new_users: bool,
|
||||
|
||||
/// Used to verify validity from webhooks.
|
||||
/// Should be some secure hash maybe 20-40 chars.
|
||||
/// It is given to git provider when configuring the webhook.
|
||||
#[serde(default)]
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// Override the webhook listener base url, if None will use the address defined as 'host'.
|
||||
/// Example: `https://webhooks.mogh.tech`
|
||||
///
|
||||
/// This can be used if core sits on an internal network which is
|
||||
/// unreachable directly from the open internet.
|
||||
/// A reverse proxy in a public network can forward webhooks to the internal monitor.
|
||||
pub webhook_base_url: Option<String>,
|
||||
|
||||
/// Configure a Github Webhook app.
|
||||
/// Allows users to manage repo webhooks from within the Monitor UI.
|
||||
#[serde(default)]
|
||||
pub github_webhook_app: GithubWebhookAppConfig,
|
||||
|
||||
/// Configure core mongo connection.
|
||||
///
|
||||
/// An easy deployment method is to use Mongo Atlas to provide
|
||||
/// a reliable database.
|
||||
pub mongo: MongoConfig,
|
||||
|
||||
// ===================
|
||||
// = Cloud Providers =
|
||||
// ===================
|
||||
/// Configure AWS credentials to use with AWS builds / server launches.
|
||||
#[serde(default)]
|
||||
pub aws: AwsCredentials,
|
||||
@@ -298,17 +326,17 @@ pub struct CoreConfig {
|
||||
#[serde(default)]
|
||||
pub hetzner: HetznerCredentials,
|
||||
|
||||
/// Configure core-based secrets. These will be preferentially interpolated into
|
||||
/// values if they contain a matching secret. Otherwise, the periphery will have to have the
|
||||
/// secret configured.
|
||||
#[serde(default)]
|
||||
pub secrets: HashMap<String, String>,
|
||||
|
||||
// =================
|
||||
// = Git Providers =
|
||||
// =================
|
||||
/// Configure git credentials used to clone private repos.
|
||||
/// Supports any git provider.
|
||||
#[serde(default, alias = "git_provider")]
|
||||
pub git_providers: Vec<GitProvider>,
|
||||
|
||||
// ======================
|
||||
// = Registry Providers =
|
||||
// ======================
|
||||
/// Configure docker credentials used to push / pull images.
|
||||
/// Supports any docker image repository.
|
||||
#[serde(default, alias = "docker_registry")]
|
||||
@@ -317,6 +345,25 @@ pub struct CoreConfig {
|
||||
/// Configure aws ecr registries, which are handled differently than other registries
|
||||
#[serde(default, alias = "aws_ecr_registry")]
|
||||
pub aws_ecr_registries: Vec<AwsEcrConfigWithCredentials>,
|
||||
|
||||
// ===========
|
||||
// = Secrets =
|
||||
// ===========
|
||||
/// Configure core-based secrets. These will be preferentially interpolated into
|
||||
/// values if they contain a matching secret. Otherwise, the periphery will have to have the
|
||||
/// secret configured.
|
||||
#[serde(default)]
|
||||
pub secrets: HashMap<String, String>,
|
||||
|
||||
// =========
|
||||
// = Other =
|
||||
// =========
|
||||
/// Specify the directory used to clone stack / repo / build repos, for latest hash / contents.
|
||||
/// The default is fine when using a container.
|
||||
/// This directory has no need for persistence, so no need to mount it.
|
||||
/// Default: `/repos`
|
||||
#[serde(default = "default_repo_directory")]
|
||||
pub repo_directory: PathBuf,
|
||||
}
|
||||
|
||||
fn default_title() -> String {
|
||||
@@ -356,6 +403,7 @@ impl CoreConfig {
|
||||
host: config.host,
|
||||
port: config.port,
|
||||
passkey: empty_or_redacted(&config.passkey),
|
||||
ensure_server: config.ensure_server,
|
||||
jwt_secret: empty_or_redacted(&config.jwt_secret),
|
||||
jwt_ttl: config.jwt_ttl,
|
||||
repo_directory: config.repo_directory,
|
||||
|
||||
@@ -86,7 +86,7 @@ pub struct Env {
|
||||
///
|
||||
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
|
||||
#[serde(default)]
|
||||
pub monitor_config_paths: Vec<String>,
|
||||
pub periphery_config_paths: Vec<String>,
|
||||
/// If specifying folders, use this to narrow down which
|
||||
/// files will be matched to parse into the final [PeripheryConfig].
|
||||
/// Only files inside the folders which have names containing all keywords
|
||||
@@ -94,120 +94,56 @@ pub struct Env {
|
||||
///
|
||||
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
|
||||
#[serde(default)]
|
||||
pub monitor_config_keywords: Vec<String>,
|
||||
pub periphery_config_keywords: Vec<String>,
|
||||
|
||||
/// Will merge nested config object (eg. secrets, providers) across multiple
|
||||
/// config files. Default: `false`
|
||||
///
|
||||
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
|
||||
#[serde(default)]
|
||||
pub monitor_merge_nested_config: bool,
|
||||
pub periphery_merge_nested_config: bool,
|
||||
|
||||
/// Will extend config arrays (eg. `allowed_ips`, `passkeys`) across multiple config files.
|
||||
/// Default: `false`
|
||||
///
|
||||
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
|
||||
#[serde(default)]
|
||||
pub monitor_extend_config_arrays: bool,
|
||||
pub periphery_extend_config_arrays: bool,
|
||||
|
||||
/// Override `port`
|
||||
pub monitor_port: Option<u16>,
|
||||
pub periphery_port: Option<u16>,
|
||||
/// Override `repo_dir`
|
||||
pub monitor_repo_dir: Option<PathBuf>,
|
||||
pub periphery_repo_dir: Option<PathBuf>,
|
||||
/// Override `stack_dir`
|
||||
pub monitor_stack_dir: Option<PathBuf>,
|
||||
pub periphery_stack_dir: Option<PathBuf>,
|
||||
/// Override `stats_polling_rate`
|
||||
pub monitor_stats_polling_rate: Option<Timelength>,
|
||||
pub periphery_stats_polling_rate: Option<Timelength>,
|
||||
/// Override `legacy_compose_cli`
|
||||
pub monitor_legacy_compose_cli: Option<bool>,
|
||||
pub periphery_legacy_compose_cli: Option<bool>,
|
||||
|
||||
// LOGGING
|
||||
/// Override `logging.level`
|
||||
pub monitor_logging_level: Option<LogLevel>,
|
||||
pub periphery_logging_level: Option<LogLevel>,
|
||||
/// Override `logging.stdio`
|
||||
pub monitor_logging_stdio: Option<StdioLogMode>,
|
||||
pub periphery_logging_stdio: Option<StdioLogMode>,
|
||||
/// Override `logging.otlp_endpoint`
|
||||
pub monitor_logging_otlp_endpoint: Option<String>,
|
||||
pub periphery_logging_otlp_endpoint: Option<String>,
|
||||
/// Override `logging.opentelemetry_service_name`
|
||||
pub monitor_logging_opentelemetry_service_name: Option<String>,
|
||||
pub periphery_logging_opentelemetry_service_name: Option<String>,
|
||||
|
||||
/// Override `allowed_ips`
|
||||
pub monitor_allowed_ips: Option<Vec<IpAddr>>,
|
||||
pub periphery_allowed_ips: Option<Vec<IpAddr>>,
|
||||
/// Override `passkeys`
|
||||
pub monitor_passkeys: Option<Vec<String>>,
|
||||
pub periphery_passkeys: Option<Vec<String>>,
|
||||
/// Override `include_disk_mounts`
|
||||
pub periphery_include_disk_mounts: Option<Vec<PathBuf>>,
|
||||
/// Override `exclude_disk_mounts`
|
||||
pub periphery_exclude_disk_mounts: Option<Vec<PathBuf>>,
|
||||
}
|
||||
|
||||
/// # Periphery Configuration File
|
||||
///
|
||||
/// The periphery agent initializes it's configuration by reading the environment,
|
||||
/// parsing the [PeripheryConfig] schema from the files specified by cli args (and falling back to `env.config_paths`),
|
||||
/// and then applying any config field overrides specified in the environment.
|
||||
///
|
||||
/// ## Example TOML
|
||||
/// ```toml
|
||||
/// ## optional. 8120 is default
|
||||
/// port = 8120
|
||||
///
|
||||
/// ## optional. `/etc/monitor/repos` is default.
|
||||
/// repo_dir = "/etc/monitor/repos"
|
||||
///
|
||||
/// ## optional. `/etc/monitor/stacks` is default.
|
||||
/// stack_dir = "/etc/monitor/stacks"
|
||||
///
|
||||
/// ## optional. 5-sec is default.
|
||||
/// ## can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min.
|
||||
/// ## controls granularity of system stats recorded
|
||||
/// stats_polling_rate = "5-sec"
|
||||
///
|
||||
/// ## Whether stack actions should use `docker-compose ...`
|
||||
/// ## instead of `docker compose ...`.
|
||||
/// ## default: false
|
||||
/// legacy_compose_cli = false
|
||||
///
|
||||
/// ## optional. default is empty, which will not block any request by ip.
|
||||
/// allowed_ips = ["127.0.0.1"]
|
||||
///
|
||||
/// ## optional. default is empty, which will not require any passkey to be passed by core.
|
||||
/// passkeys = ["abcdefghijk"]
|
||||
///
|
||||
/// ## specify the log level of the monitor core application
|
||||
/// ## default: info
|
||||
/// ## options: off, error, warn, info, debug, trace
|
||||
/// logging.level = "info"
|
||||
///
|
||||
/// ## specify the logging format for stdout / stderr.
|
||||
/// ## default: standard
|
||||
/// ## options: standard, json, none
|
||||
/// logging.stdio = "standard"
|
||||
///
|
||||
/// ## specify an otlp endpoint to send traces to
|
||||
/// ## optional, default unassigned
|
||||
/// # logging.otlp_endpoint = "http://localhost:4317"
|
||||
///
|
||||
/// ## specify the service name to send with otlp traces.
|
||||
/// ## optional, default 'Monitor'.
|
||||
/// # logging.opentelemetry_service_name = "Monitor"
|
||||
///
|
||||
/// ## configure perihery-based secrets
|
||||
/// [secrets]
|
||||
/// # SECRET_1 = "value_1"
|
||||
/// # SECRET_2 = "value_2"
|
||||
///
|
||||
/// ## configure periphery-based git providers
|
||||
/// # [[git_provider]]
|
||||
/// # domain = "git.mogh.tech" # use a custom provider, like self-hosted gitea
|
||||
/// # accounts = [
|
||||
/// # { username = "mbecker20", token = "access_token_for_account" },
|
||||
/// # ]
|
||||
///
|
||||
/// ## configure periphery-based docker registries
|
||||
/// # [[docker_registry]]
|
||||
/// # domain = "docker.io"
|
||||
/// # accounts = [
|
||||
/// # { username = "mbecker2020", token = "access_token_for_account" }
|
||||
/// # ]
|
||||
/// # organizations = ["DockerhubOrganization"]
|
||||
/// ```
|
||||
/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml) for a full example.
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct PeripheryConfig {
|
||||
/// The port periphery will run on.
|
||||
@@ -254,6 +190,14 @@ pub struct PeripheryConfig {
|
||||
#[serde(default)]
|
||||
pub passkeys: Vec<String>,
|
||||
|
||||
/// If non-empty, only includes specific mount paths in the disk report.
|
||||
#[serde(default)]
|
||||
pub include_disk_mounts: Vec<PathBuf>,
|
||||
|
||||
/// Exclude specific mount paths in the disk report.
|
||||
#[serde(default)]
|
||||
pub exclude_disk_mounts: Vec<PathBuf>,
|
||||
|
||||
/// Mapping on local periphery secrets. These can be interpolated into eg. Deployment environment variables.
|
||||
/// Default: none
|
||||
#[serde(default)]
|
||||
@@ -288,18 +232,20 @@ fn default_stats_polling_rate() -> Timelength {
|
||||
|
||||
impl Default for PeripheryConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
port: default_periphery_port(),
|
||||
repo_dir: default_repo_dir(),
|
||||
stack_dir: default_stack_dir(),
|
||||
stats_polling_rate: default_stats_polling_rate(),
|
||||
legacy_compose_cli: Default::default(),
|
||||
logging: Default::default(),
|
||||
allowed_ips: Default::default(),
|
||||
passkeys: Default::default(),
|
||||
secrets: Default::default(),
|
||||
git_providers: Default::default(),
|
||||
docker_registries: Default::default(),
|
||||
Self {
|
||||
port: default_periphery_port(),
|
||||
repo_dir: default_repo_dir(),
|
||||
stack_dir: default_stack_dir(),
|
||||
stats_polling_rate: default_stats_polling_rate(),
|
||||
legacy_compose_cli: Default::default(),
|
||||
logging: Default::default(),
|
||||
allowed_ips: Default::default(),
|
||||
passkeys: Default::default(),
|
||||
include_disk_mounts: Default::default(),
|
||||
exclude_disk_mounts: Default::default(),
|
||||
secrets: Default::default(),
|
||||
git_providers: Default::default(),
|
||||
docker_registries: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LogConfig {
|
||||
/// The logging level. default: info
|
||||
#[serde(default)]
|
||||
@@ -21,6 +21,18 @@ fn default_opentelemetry_service_name() -> String {
|
||||
String::from("Monitor")
|
||||
}
|
||||
|
||||
impl Default for LogConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
level: Default::default(),
|
||||
stdio: Default::default(),
|
||||
otlp_endpoint: None,
|
||||
opentelemetry_service_name: default_opentelemetry_service_name(
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
|
||||
@@ -66,6 +66,12 @@ pub struct ProcedureConfig {
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Optionally provide an alternate webhook secret for this procedure.
|
||||
/// If its an empty string, use the default secret from the config.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub webhook_secret: String,
|
||||
}
|
||||
|
||||
impl ProcedureConfig {
|
||||
@@ -83,6 +89,7 @@ impl Default for ProcedureConfig {
|
||||
Self {
|
||||
stages: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,9 +10,7 @@ pub type _PartialGitProviderAccount = PartialGitProviderAccount;
|
||||
/// Configuration to access private git repos from various git providers.
|
||||
/// Note. Cannot create two accounts with the same domain and username.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Default, Partial,
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, Partial)]
|
||||
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
#[cfg_attr(
|
||||
@@ -66,9 +64,7 @@ pub type _PartialDockerRegistryAccount = PartialDockerRegistryAccount;
|
||||
|
||||
/// Configuration to access private image repositories on various registries.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Default, Partial,
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Default, Partial)]
|
||||
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
#[cfg_attr(
|
||||
@@ -108,4 +104,4 @@ pub struct DockerRegistryAccount {
|
||||
|
||||
fn default_registry_domain() -> String {
|
||||
String::from("docker.io")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,6 +111,23 @@ pub struct RepoConfig {
|
||||
#[partial_default(default_git_provider())]
|
||||
pub git_provider: String,
|
||||
|
||||
/// Whether to use https to clone the repo (versus http). Default: true
|
||||
///
|
||||
/// Note. Monitor does not currently support cloning repos via ssh.
|
||||
#[serde(default = "default_git_https")]
|
||||
#[builder(default = "default_git_https()")]
|
||||
#[partial_default(default_git_https())]
|
||||
pub git_https: bool,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// The github repo to clone.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
@@ -127,28 +144,23 @@ pub struct RepoConfig {
|
||||
#[builder(default)]
|
||||
pub commit: String,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// Whether to use https to clone the repo (versus http). Default: true
|
||||
///
|
||||
/// Note. Monitor does not currently support cloning repos via ssh.
|
||||
#[serde(default = "default_git_https")]
|
||||
#[builder(default = "default_git_https()")]
|
||||
#[partial_default(default_git_https())]
|
||||
pub git_https: bool,
|
||||
|
||||
/// Explicitly specify the folder to clone the repo in.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub path: String,
|
||||
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Optionally provide an alternate webhook secret for this repo.
|
||||
/// If its an empty string, use the default secret from the config.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// Command to be run after the repo is cloned.
|
||||
/// The path is relative to the root of the repo.
|
||||
#[serde(default)]
|
||||
@@ -189,12 +201,6 @@ pub struct RepoConfig {
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
}
|
||||
|
||||
impl RepoConfig {
|
||||
@@ -241,6 +247,7 @@ impl Default for RepoConfig {
|
||||
env_file_path: default_env_file_path(),
|
||||
skip_secret_interp: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,12 @@ pub struct ServerConfig {
|
||||
#[partial_default(default_enabled())]
|
||||
pub enabled: bool,
|
||||
|
||||
/// Sometimes the system stats reports a mount path that is not desired.
|
||||
/// Use this field to filter it out from the report.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub ignore_mounts: Vec<String>,
|
||||
|
||||
/// Whether to monitor any server stats beyond passing health check.
|
||||
/// default: true
|
||||
#[serde(default = "default_stats_monitoring")]
|
||||
@@ -194,6 +200,7 @@ impl Default for ServerConfig {
|
||||
Self {
|
||||
address: Default::default(),
|
||||
enabled: default_enabled(),
|
||||
ignore_mounts: Default::default(),
|
||||
stats_monitoring: default_stats_monitoring(),
|
||||
auto_prune: default_auto_prune(),
|
||||
send_unreachable_alerts: default_send_alerts(),
|
||||
|
||||
@@ -153,7 +153,8 @@ pub struct StackInfo {
|
||||
#[serde(default)]
|
||||
pub latest_services: Vec<StackServiceNames>,
|
||||
|
||||
/// The remote compose file contents. This is updated whenever Monitor refreshes the stack cache.
|
||||
/// The remote compose file contents, whether on host or in repo.
|
||||
/// This is updated whenever Monitor refreshes the stack cache.
|
||||
/// It will be empty if the file is defined directly in the stack config.
|
||||
pub remote_contents: Option<Vec<ComposeContents>>,
|
||||
/// If there was an error in getting the remote contents, it will be here.
|
||||
@@ -165,6 +166,230 @@ pub struct StackInfo {
|
||||
pub latest_message: Option<String>,
|
||||
}
|
||||
|
||||
#[typeshare(serialized_as = "Partial<StackConfig>")]
|
||||
pub type _PartialStackConfig = PartialStackConfig;
|
||||
|
||||
/// The compose file configuration.
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Builder, Partial)]
|
||||
#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
pub struct StackConfig {
|
||||
/// The server to deploy the stack on.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub server_id: String,
|
||||
|
||||
/// Optionally specify a custom project name for the stack.
|
||||
/// If this is empty string, it will default to the stack name.
|
||||
/// Used with `docker compose -p {project_name}`.
|
||||
///
|
||||
/// Note. Can be used to import pre-existing stacks.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub project_name: String,
|
||||
|
||||
/// Directory to change to (`cd`) before running `docker compose up -d`.
|
||||
/// Default: `./` (the repo root)
|
||||
#[serde(default = "default_run_directory")]
|
||||
#[builder(default = "default_run_directory()")]
|
||||
#[partial_default(default_run_directory())]
|
||||
pub run_directory: String,
|
||||
|
||||
/// Add paths to compose files, relative to the run path.
|
||||
/// If this is empty, will use file `compose.yaml`.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub file_paths: Vec<String>,
|
||||
|
||||
/// If this is checked, the stack will source the files on the host.
|
||||
/// Use `run_directory` and `file_paths` to specify the path on the host.
|
||||
/// This is useful for those who wish to setup their files on the host using SSH or similar,
|
||||
/// rather than defining the contents in UI or in a git repo.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub files_on_host: bool,
|
||||
|
||||
/// Used with `registry_account` to login to a registry before docker compose up.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub registry_provider: String,
|
||||
|
||||
/// Used with `registry_provider` to login to a registry before docker compose up.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub registry_account: String,
|
||||
|
||||
/// The extra arguments to pass after `docker compose up -d`.
|
||||
/// If empty, no extra arguments will be passed.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub extra_args: Vec<String>,
|
||||
|
||||
/// The environment variables passed to the compose file.
|
||||
/// They will be written to path defined in env_file_path,
|
||||
/// which is given relative to the run directory.
|
||||
///
|
||||
/// If it is empty, no file will be written.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::env_vars_deserializer"
|
||||
)]
|
||||
#[partial_attr(serde(
|
||||
default,
|
||||
deserialize_with = "super::option_env_vars_deserializer"
|
||||
))]
|
||||
#[builder(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
|
||||
/// The name of the written environment file before `docker compose up`.
|
||||
/// Relative to the repo root.
|
||||
/// Default: .env
|
||||
#[serde(default = "default_env_file_path")]
|
||||
#[builder(default = "default_env_file_path()")]
|
||||
#[partial_default(default_env_file_path())]
|
||||
pub env_file_path: String,
|
||||
|
||||
/// Whether to skip secret interpolation into the stack environment variables.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
|
||||
/// The contents of the file directly, for management in the UI.
|
||||
/// If this is empty, it will fall back to checking git config for
|
||||
/// repo based compose file.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub file_contents: String,
|
||||
|
||||
/// Ignore certain services declared in the compose file when checking
|
||||
/// the stack status. For example, an init service might be exited, but the
|
||||
/// stack should be healthy. This init service should be in `ignore_services`
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub ignore_services: Vec<String>,
|
||||
|
||||
/// The git provider domain. Default: github.com
|
||||
#[serde(default = "default_git_provider")]
|
||||
#[builder(default = "default_git_provider()")]
|
||||
#[partial_default(default_git_provider())]
|
||||
pub git_provider: String,
|
||||
|
||||
/// Whether to use https to clone the repo (versus http). Default: true
|
||||
///
|
||||
/// Note. Monitor does not currently support cloning repos via ssh.
|
||||
#[serde(default = "default_git_https")]
|
||||
#[builder(default = "default_git_https()")]
|
||||
#[partial_default(default_git_https())]
|
||||
pub git_https: bool,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// The Github repo used as the source of the build.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub repo: String,
|
||||
|
||||
/// The branch of the repo.
|
||||
#[serde(default = "default_branch")]
|
||||
#[builder(default = "default_branch()")]
|
||||
#[partial_default(default_branch())]
|
||||
pub branch: String,
|
||||
|
||||
/// Optionally set a specific commit hash.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub commit: String,
|
||||
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Optionally provide an alternate webhook secret for this stack.
|
||||
/// If its an empty string, use the default secret from the config.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// Whether to send StackStateChange alerts for this stack.
|
||||
#[serde(default = "default_send_alerts")]
|
||||
#[builder(default = "default_send_alerts()")]
|
||||
#[partial_default(default_send_alerts())]
|
||||
pub send_alerts: bool,
|
||||
}
|
||||
|
||||
impl StackConfig {
|
||||
pub fn builder() -> StackConfigBuilder {
|
||||
StackConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_env_file_path() -> String {
|
||||
String::from(".env")
|
||||
}
|
||||
|
||||
fn default_git_provider() -> String {
|
||||
String::from("github.com")
|
||||
}
|
||||
|
||||
fn default_git_https() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_branch() -> String {
|
||||
String::from("main")
|
||||
}
|
||||
|
||||
fn default_run_directory() -> String {
|
||||
String::from("./")
|
||||
}
|
||||
|
||||
fn default_webhook_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_send_alerts() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
impl Default for StackConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_id: Default::default(),
|
||||
project_name: Default::default(),
|
||||
run_directory: default_run_directory(),
|
||||
file_paths: Default::default(),
|
||||
files_on_host: Default::default(),
|
||||
registry_provider: Default::default(),
|
||||
registry_account: Default::default(),
|
||||
file_contents: Default::default(),
|
||||
ignore_services: Default::default(),
|
||||
extra_args: Default::default(),
|
||||
environment: Default::default(),
|
||||
env_file_path: default_env_file_path(),
|
||||
skip_secret_interp: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
git_https: default_git_https(),
|
||||
repo: Default::default(),
|
||||
branch: default_branch(),
|
||||
commit: Default::default(),
|
||||
git_account: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
send_alerts: default_send_alerts(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ComposeProject {
|
||||
@@ -218,206 +443,6 @@ pub struct StackService {
|
||||
pub container: Option<ContainerSummary>,
|
||||
}
|
||||
|
||||
#[typeshare(serialized_as = "Partial<StackConfig>")]
|
||||
pub type _PartialStackConfig = PartialStackConfig;
|
||||
|
||||
/// The compose file configuration.
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Builder, Partial)]
|
||||
#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
pub struct StackConfig {
|
||||
/// The server to deploy the stack on.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub server_id: String,
|
||||
|
||||
/// Optionally specify a custom project name for the stack.
|
||||
/// If this is empty string, it will default to the stack name.
|
||||
/// Used with `docker compose -p {project_name}`.
|
||||
///
|
||||
/// Note. Can be used to import pre-existing stacks.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub project_name: String,
|
||||
|
||||
/// Directory to change to (`cd`) before running `docker compose up -d`.
|
||||
/// Default: `./` (the repo root)
|
||||
#[serde(default = "default_run_directory")]
|
||||
#[builder(default = "default_run_directory()")]
|
||||
#[partial_default(default_run_directory())]
|
||||
pub run_directory: String,
|
||||
|
||||
/// Add paths to compose files, relative to the run path.
|
||||
/// If this is empty, will use file `compose.yaml`.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub file_paths: Vec<String>,
|
||||
|
||||
/// Used with `registry_account` to login to a registry before docker compose up.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub registry_provider: String,
|
||||
|
||||
/// Used with `registry_provider` to login to a registry before docker compose up.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub registry_account: String,
|
||||
|
||||
/// The extra arguments to pass after `docker compose up -d`.
|
||||
/// If empty, no extra arguments will be passed.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub extra_args: Vec<String>,
|
||||
|
||||
/// The environment variables passed to the compose file.
|
||||
/// They will be written to path defined in env_file_path,
|
||||
/// which is given relative to the run directory.
|
||||
///
|
||||
/// If it is empty, no file will be written.
|
||||
#[serde(
|
||||
default,
|
||||
deserialize_with = "super::env_vars_deserializer"
|
||||
)]
|
||||
#[partial_attr(serde(
|
||||
default,
|
||||
deserialize_with = "super::option_env_vars_deserializer"
|
||||
))]
|
||||
#[builder(default)]
|
||||
pub environment: Vec<EnvironmentVar>,
|
||||
|
||||
/// The name of the written environment file before `docker compose up`.
|
||||
/// Relative to the repo root.
|
||||
/// Default: .env
|
||||
#[serde(default = "default_env_file_path")]
|
||||
#[builder(default = "default_env_file_path()")]
|
||||
#[partial_default(default_env_file_path())]
|
||||
pub env_file_path: String,
|
||||
|
||||
/// Whether to skip secret interpolation into the stack environment variables.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub skip_secret_interp: bool,
|
||||
|
||||
/// The contents of the file directly, for management in the UI.
|
||||
/// If this is empty, it will fall back to checking git config for
|
||||
/// repo based compose file.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub file_contents: String,
|
||||
|
||||
/// The git provider domain. Default: github.com
|
||||
#[serde(default = "default_git_provider")]
|
||||
#[builder(default = "default_git_provider()")]
|
||||
#[partial_default(default_git_provider())]
|
||||
pub git_provider: String,
|
||||
|
||||
/// Whether to use https to clone the repo (versus http). Default: true
|
||||
///
|
||||
/// Note. Monitor does not currently support cloning repos via ssh.
|
||||
#[serde(default = "default_git_https")]
|
||||
#[builder(default = "default_git_https()")]
|
||||
#[partial_default(default_git_https())]
|
||||
pub git_https: bool,
|
||||
|
||||
/// The git account used to access private repos.
|
||||
/// Passing empty string can only clone public repos.
|
||||
///
|
||||
/// Note. A token for the account must be available in the core config or the builder server's periphery config
|
||||
/// for the configured git provider.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub git_account: String,
|
||||
|
||||
/// The Github repo used as the source of the build.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub repo: String,
|
||||
|
||||
/// The branch of the repo.
|
||||
#[serde(default = "default_branch")]
|
||||
#[builder(default = "default_branch()")]
|
||||
#[partial_default(default_branch())]
|
||||
pub branch: String,
|
||||
|
||||
/// Optionally set a specific commit hash.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub commit: String,
|
||||
|
||||
/// Whether incoming webhooks actually trigger action.
|
||||
#[serde(default = "default_webhook_enabled")]
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Whether to send StackStateChange alerts for this stack.
|
||||
#[serde(default = "default_send_alerts")]
|
||||
#[builder(default = "default_send_alerts()")]
|
||||
#[partial_default(default_send_alerts())]
|
||||
pub send_alerts: bool,
|
||||
}
|
||||
|
||||
impl StackConfig {
|
||||
pub fn builder() -> StackConfigBuilder {
|
||||
StackConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_env_file_path() -> String {
|
||||
String::from(".env")
|
||||
}
|
||||
|
||||
fn default_git_provider() -> String {
|
||||
String::from("github.com")
|
||||
}
|
||||
|
||||
fn default_git_https() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_branch() -> String {
|
||||
String::from("main")
|
||||
}
|
||||
|
||||
fn default_run_directory() -> String {
|
||||
String::from("./")
|
||||
}
|
||||
|
||||
fn default_webhook_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_send_alerts() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
impl Default for StackConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_id: Default::default(),
|
||||
project_name: Default::default(),
|
||||
run_directory: default_run_directory(),
|
||||
file_paths: Default::default(),
|
||||
registry_provider: Default::default(),
|
||||
registry_account: Default::default(),
|
||||
file_contents: Default::default(),
|
||||
extra_args: Default::default(),
|
||||
environment: Default::default(),
|
||||
env_file_path: default_env_file_path(),
|
||||
skip_secret_interp: Default::default(),
|
||||
git_provider: default_git_provider(),
|
||||
git_https: default_git_https(),
|
||||
repo: Default::default(),
|
||||
branch: default_branch(),
|
||||
commit: Default::default(),
|
||||
git_account: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
send_alerts: default_send_alerts(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]
|
||||
pub struct StackActionState {
|
||||
@@ -465,11 +490,4 @@ pub struct ComposeFile {
|
||||
pub struct ComposeService {
|
||||
pub image: Option<String>,
|
||||
pub container_name: Option<String>,
|
||||
pub deploy: Option<ComposeServiceDeploy>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ComposeServiceDeploy {
|
||||
pub replicas: u8,
|
||||
}
|
||||
|
||||
@@ -240,6 +240,12 @@ pub struct ResourceSyncConfig {
|
||||
#[builder(default = "default_webhook_enabled()")]
|
||||
#[partial_default(default_webhook_enabled())]
|
||||
pub webhook_enabled: bool,
|
||||
|
||||
/// Optionally provide an alternate webhook secret for this sync.
|
||||
/// If its an empty string, use the default secret from the config.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub webhook_secret: String,
|
||||
}
|
||||
|
||||
impl ResourceSyncConfig {
|
||||
@@ -280,6 +286,7 @@ impl Default for ResourceSyncConfig {
|
||||
resource_path: default_resource_path(),
|
||||
delete: Default::default(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,10 +78,14 @@ impl User {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether user is an inbuilt service user
|
||||
///
|
||||
/// NOTE: ALSO UPDATE `frontend/src/lib/utils/is_service_user` to match
|
||||
pub fn is_service_user(user_id: &str) -> bool {
|
||||
matches!(
|
||||
user_id,
|
||||
"Procedure"
|
||||
"System"
|
||||
| "Procedure"
|
||||
| "Github" // Github can be removed later, just keeping for backward compat.
|
||||
| "Git Webhook"
|
||||
| "Auto Redeploy"
|
||||
@@ -95,6 +99,7 @@ impl User {
|
||||
|
||||
pub fn admin_service_user(user_id: &str) -> Option<User> {
|
||||
match user_id {
|
||||
"System" => system_user().to_owned().into(),
|
||||
"Procedure" => procedure_user().to_owned().into(),
|
||||
// Github should be removed later, replaced by Git Webhook, just keeping for backward compat.
|
||||
"Github" => git_webhook_user().to_owned().into(),
|
||||
@@ -108,6 +113,19 @@ pub fn admin_service_user(user_id: &str) -> Option<User> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn system_user() -> &'static User {
|
||||
static SYSTEM_USER: OnceLock<User> = OnceLock::new();
|
||||
SYSTEM_USER.get_or_init(|| {
|
||||
let id_name = String::from("System");
|
||||
User {
|
||||
id: id_name.clone(),
|
||||
username: id_name,
|
||||
admin: true,
|
||||
..Default::default()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn procedure_user() -> &'static User {
|
||||
static PROCEDURE_USER: OnceLock<User> = OnceLock::new();
|
||||
PROCEDURE_USER.get_or_init(|| {
|
||||
|
||||
@@ -374,6 +374,11 @@ export interface BuildConfig {
|
||||
builder_id?: string;
|
||||
/** The current version of the build. */
|
||||
version?: Version;
|
||||
/**
|
||||
* Whether to automatically increment the patch on every build.
|
||||
* Default is `true`
|
||||
*/
|
||||
auto_increment_version: boolean;
|
||||
/**
|
||||
* An alternate name for the image pushed to the repository.
|
||||
* If this is empty, it will use the build name.
|
||||
@@ -401,12 +406,6 @@ export interface BuildConfig {
|
||||
* Note. Monitor does not currently support cloning repos via ssh.
|
||||
*/
|
||||
git_https: boolean;
|
||||
/** The repo used as the source of the build. */
|
||||
repo?: string;
|
||||
/** The branch of the repo. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/**
|
||||
* The git account used to access private repos.
|
||||
* Passing empty string can only clone public repos.
|
||||
@@ -415,6 +414,19 @@ export interface BuildConfig {
|
||||
* for the configured git provider.
|
||||
*/
|
||||
git_account?: string;
|
||||
/** The repo used as the source of the build. */
|
||||
repo?: string;
|
||||
/** The branch of the repo. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/**
|
||||
* Optionally provide an alternate webhook secret for this build.
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
/** The optional command run after repo clone and before docker build. */
|
||||
pre_build?: SystemCommand;
|
||||
/** Configuration for the registry to push the built image to. */
|
||||
@@ -430,8 +442,6 @@ export interface BuildConfig {
|
||||
skip_secret_interp?: boolean;
|
||||
/** Whether to use buildx to build (eg `docker buildx build ...`) */
|
||||
use_buildx?: boolean;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/** Any extra docker cli arguments to be included in the build command */
|
||||
extra_args?: string[];
|
||||
/**
|
||||
@@ -871,6 +881,11 @@ export interface ProcedureConfig {
|
||||
stages?: ProcedureStage[];
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/**
|
||||
* Optionally provide an alternate webhook secret for this procedure.
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -980,12 +995,12 @@ export interface RepoConfig {
|
||||
builder_id?: string;
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/** The github repo to clone. */
|
||||
repo?: string;
|
||||
/** The repo branch. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/**
|
||||
* Whether to use https to clone the repo (versus http). Default: true
|
||||
*
|
||||
* Note. Monitor does not currently support cloning repos via ssh.
|
||||
*/
|
||||
git_https: boolean;
|
||||
/**
|
||||
* The git account used to access private repos.
|
||||
* Passing empty string can only clone public repos.
|
||||
@@ -994,14 +1009,21 @@ export interface RepoConfig {
|
||||
* for the configured git provider.
|
||||
*/
|
||||
git_account?: string;
|
||||
/**
|
||||
* Whether to use https to clone the repo (versus http). Default: true
|
||||
*
|
||||
* Note. Monitor does not currently support cloning repos via ssh.
|
||||
*/
|
||||
git_https: boolean;
|
||||
/** The github repo to clone. */
|
||||
repo?: string;
|
||||
/** The repo branch. */
|
||||
branch: string;
|
||||
/** Optionally set a specific commit hash. */
|
||||
commit?: string;
|
||||
/** Explicitly specify the folder to clone the repo in. */
|
||||
path?: string;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/**
|
||||
* Optionally provide an alternate webhook secret for this repo.
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
/**
|
||||
* Command to be run after the repo is cloned.
|
||||
* The path is relative to the root of the repo.
|
||||
@@ -1028,8 +1050,6 @@ export interface RepoConfig {
|
||||
env_file_path: string;
|
||||
/** Whether to skip secret interpolation into the repo environment variable file. */
|
||||
skip_secret_interp?: boolean;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
}
|
||||
|
||||
export interface RepoInfo {
|
||||
@@ -1126,6 +1146,11 @@ export interface ServerConfig {
|
||||
* default: true
|
||||
*/
|
||||
enabled: boolean;
|
||||
/**
|
||||
* Sometimes the system stats reports a mount path that is not desired.
|
||||
* Use this field to filter it out from the report.
|
||||
*/
|
||||
ignore_mounts?: string[];
|
||||
/**
|
||||
* Whether to monitor any server stats beyond passing health check.
|
||||
* default: true
|
||||
@@ -1461,6 +1486,13 @@ export interface StackConfig {
|
||||
* If this is empty, will use file `compose.yaml`.
|
||||
*/
|
||||
file_paths?: string[];
|
||||
/**
|
||||
* If this is checked, the stack will source the files on the host.
|
||||
* Use `run_directory` and `file_paths` to specify the path on the host.
|
||||
* This is useful for those who wish to setup their files on the host using SSH or similar,
|
||||
* rather than defining the contents in UI or in a git repo.
|
||||
*/
|
||||
files_on_host?: boolean;
|
||||
/** Used with `registry_account` to login to a registry before docker compose up. */
|
||||
registry_provider?: string;
|
||||
/** Used with `registry_provider` to login to a registry before docker compose up. */
|
||||
@@ -1492,6 +1524,12 @@ export interface StackConfig {
|
||||
* repo based compose file.
|
||||
*/
|
||||
file_contents?: string;
|
||||
/**
|
||||
* Ignore certain services declared in the compose file when checking
|
||||
* the stack status. For example, an init service might be exited, but the
|
||||
* stack should be healthy. This init service should be in `ignore_services`
|
||||
*/
|
||||
ignore_services?: string[];
|
||||
/** The git provider domain. Default: github.com */
|
||||
git_provider: string;
|
||||
/**
|
||||
@@ -1516,6 +1554,11 @@ export interface StackConfig {
|
||||
commit?: string;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/**
|
||||
* Optionally provide an alternate webhook secret for this stack.
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
/** Whether to send StackStateChange alerts for this stack. */
|
||||
send_alerts: boolean;
|
||||
}
|
||||
@@ -1581,7 +1624,8 @@ export interface StackInfo {
|
||||
*/
|
||||
latest_services?: StackServiceNames[];
|
||||
/**
|
||||
* The remote compose file contents. This is updated whenever Monitor refreshes the stack cache.
|
||||
* The remote compose file contents, whether on host or in repo.
|
||||
* This is updated whenever Monitor refreshes the stack cache.
|
||||
* It will be empty if the file is defined directly in the stack config.
|
||||
*/
|
||||
remote_contents?: ComposeContents[];
|
||||
@@ -1726,6 +1770,11 @@ export interface ResourceSyncConfig {
|
||||
delete?: boolean;
|
||||
/** Whether incoming webhooks actually trigger action. */
|
||||
webhook_enabled: boolean;
|
||||
/**
|
||||
* Optionally provide an alternate webhook secret for this sync.
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
}
|
||||
|
||||
export type PendingSyncUpdatesData =
|
||||
@@ -5117,14 +5166,9 @@ export interface HetznerServerTemplateConfig {
|
||||
port: number;
|
||||
}
|
||||
|
||||
export interface ComposeServiceDeploy {
|
||||
replicas: number;
|
||||
}
|
||||
|
||||
export interface ComposeService {
|
||||
image?: string;
|
||||
container_name?: string;
|
||||
deploy?: ComposeServiceDeploy;
|
||||
}
|
||||
|
||||
/** Keeping this minimal for now as its only needed to parse the service names / container names */
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use monitor_client::entities::{
|
||||
stack::{ComposeContents, ComposeProject, Stack}, update::Log, SearchCombinator,
|
||||
stack::{ComposeContents, ComposeProject, Stack},
|
||||
update::Log,
|
||||
SearchCombinator,
|
||||
};
|
||||
use resolver_api::derive::Request;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -15,6 +17,25 @@ pub struct ListComposeProjects {}
|
||||
|
||||
//
|
||||
|
||||
/// Get the compose contents on the host, for stacks using
|
||||
/// `files_on_host`.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Request)]
|
||||
#[response(GetComposeContentsOnHostResponse)]
|
||||
pub struct GetComposeContentsOnHost {
|
||||
/// The name of the stack
|
||||
pub name: String,
|
||||
pub run_directory: String,
|
||||
pub file_paths: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct GetComposeContentsOnHostResponse {
|
||||
pub contents: Vec<ComposeContents>,
|
||||
pub errors: Vec<ComposeContents>,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// The stack folder must already exist for this to work
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Request)]
|
||||
#[response(Log)]
|
||||
|
||||
81
config_example/aio.compose.yaml
Normal file
81
config_example/aio.compose.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
######################
|
||||
# ALL IN ONE COMPOSE #
|
||||
######################
|
||||
|
||||
## This compose file will bring up both Core and Periphery in containers.
|
||||
## A "default" server pointing to the local Periphery will be waiting in the UI on first startup.
|
||||
|
||||
services:
|
||||
monitor-core:
|
||||
image: ghcr.io/mbecker20/monitor:latest ## use ghcr.io/mbecker20/monitor:latest-aarch64 for arm support
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- monitor-mongo
|
||||
logging:
|
||||
driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/`
|
||||
networks:
|
||||
- monitor-network
|
||||
ports:
|
||||
- 9120:9120
|
||||
environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml
|
||||
MONITOR_HOST: https://demo.monitor.dev # CHANGEME
|
||||
MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab.
|
||||
MONITOR_ENSURE_SERVER: http://monitor-periphery:8120 # Created the "default" server.
|
||||
## MONGO
|
||||
MONITOR_MONGO_ADDRESS: monitor-mongo:27017
|
||||
MONITOR_MONGO_USERNAME: admin # match db credentials ones below
|
||||
MONITOR_MONGO_PASSWORD: admin
|
||||
## KEYS
|
||||
MONITOR_PASSKEY: a_random_passkey # used to auth against periphery
|
||||
MONITOR_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks
|
||||
MONITOR_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart.
|
||||
## AUTH
|
||||
MONITOR_LOCAL_AUTH: true # the default is false.
|
||||
# MONITOR_GITHUB_OAUTH_ENABLED: true # also support google oauth
|
||||
# MONITOR_GITHUB_OAUTH_ID: your_oauth_id
|
||||
# MONITOR_GITHUB_OAUTH_SECRET: your_oauth_secret
|
||||
## AWS
|
||||
# MONITOR_AWS_ACCESS_KEY_ID: your_aws_key_id
|
||||
# MONITOR_AWS_SECRET_ACCESS_KEY: your_secret_access_key
|
||||
## HETZNER
|
||||
# MONITOR_HETZNER_TOKEN: your_hetzner_token
|
||||
|
||||
## Deploy periphery container using this block,
|
||||
## or deploy it on the host directly using https://github.com/mbecker20/monitor/tree/main/scripts
|
||||
monitor-periphery:
|
||||
image: ghcr.io/mbecker20/periphery:latest # use ghcr.io/mbecker20/periphery:latest-aarch64 for arm support
|
||||
logging:
|
||||
driver: local
|
||||
networks:
|
||||
- monitor-network
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- monitor-repos:/etc/monitor/repos # manage repos in a docker volume, or change it to an accessible host directory.
|
||||
# environment:
|
||||
# # If the disk size is overreporting, can use one of these to
|
||||
# # whitelist / blacklist the disks to filter them, whichever is easier.
|
||||
# PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos
|
||||
# PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap
|
||||
|
||||
monitor-mongo:
|
||||
image: mongo
|
||||
command: --quiet # suppress mongo logs a bit
|
||||
restart: unless-stopped
|
||||
logging:
|
||||
driver: local
|
||||
networks:
|
||||
- monitor-network
|
||||
ports:
|
||||
- 27017:27017
|
||||
volumes:
|
||||
- db-data:/data/db
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: admin # change these
|
||||
MONGO_INITDB_ROOT_PASSWORD: admin
|
||||
|
||||
volumes:
|
||||
db-data:
|
||||
monitor-repos:
|
||||
|
||||
networks:
|
||||
monitor-network: {}
|
||||
@@ -14,6 +14,7 @@ services:
|
||||
- host.docker.internal:host-gateway
|
||||
environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml
|
||||
MONITOR_HOST: https://demo.monitor.dev
|
||||
MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab.
|
||||
## MONGO
|
||||
MONITOR_MONGO_ADDRESS: monitor-mongo:27017
|
||||
MONITOR_MONGO_USERNAME: admin # match ones below
|
||||
|
||||
@@ -36,6 +36,12 @@ host = "https://monitor.dev"
|
||||
## Required to start Monitor, no default
|
||||
passkey = "a_random_passkey"
|
||||
|
||||
## Ensure a server with this address exists on Core
|
||||
## upon first startup. Used with AIO compose.
|
||||
## Optional, no default.
|
||||
## Env: MONITOR_ENSURE_SERVER
|
||||
# ensure_server = "http://monitor-periphery:8120"
|
||||
|
||||
## Disables write support on resources in the UI.
|
||||
## This protects users that that would normally have write priviledges during their UI usage,
|
||||
## when they intend to fully rely on ResourceSyncs to manage config.
|
||||
@@ -205,12 +211,12 @@ webhook_secret = "a_random_webhook_secret"
|
||||
# logging.stdio = "standard"
|
||||
|
||||
## Optionally specify a opentelemetry otlp endpoint to send traces to.
|
||||
## Env: MONITOR_OTLP_ENDPOINT
|
||||
## Env: MONITOR_LOGGING_OTLP_ENDPOINT
|
||||
# logging.otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Set the opentelemetry service name.
|
||||
## This will be attached to the telemetry Monitor will send.
|
||||
## Env: MONITOR_OPENTELEMETRY_SERVICE_NAME
|
||||
## Env: MONITOR_LOGGING_OPENTELEMETRY_SERVICE_NAME
|
||||
## Default: "Monitor"
|
||||
# logging.opentelemetry_service_name = "Monitor-02"
|
||||
|
||||
|
||||
@@ -3,38 +3,55 @@
|
||||
############################
|
||||
|
||||
## Optional. The port the server runs on. 8120 is default
|
||||
## Env: PERIPHERY_PORT
|
||||
# port = 8120
|
||||
|
||||
## Optional. /etc/monitor/repos is default.
|
||||
## The directory periphery will use to manage repos.
|
||||
## The periphery user must have write access to this directory.
|
||||
## Env: PERIPHERY_REPO_DIR
|
||||
# repo_dir = "/home/ubuntu/monitor/repos"
|
||||
|
||||
## Optional. /etc/monitor/stacks is default.
|
||||
## The directory periphery will use to manage stacks.
|
||||
## The periphery user must have write access to this directory.
|
||||
## Env: PERIPHERY_STACK_DIR
|
||||
# stack_dir = "/home/ubuntu/monitor/stacks"
|
||||
|
||||
## Optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded
|
||||
## Env: PERIPHERY_STATS_POLLING_RATE
|
||||
# stats_polling_rate = "1-sec"
|
||||
|
||||
## Whether stack actions should use `docker-compose ...`
|
||||
## instead of `docker compose ...`.
|
||||
## default: false
|
||||
## Env: PERIPHERY_LEGACY_COMPOSE_CLI
|
||||
# legacy_compose_cli = true
|
||||
|
||||
## Optional. Only include mounts at specific paths in the disc report.
|
||||
## Env: PERIPHERY_INCLUDE_DISK_MOUNTS
|
||||
# include_disk_mounts = ["/etc/monitor/repos"]
|
||||
|
||||
## Optional. Don't include these mounts in the disk report.
|
||||
## Env: PERIPHERY_EXCLUDE_DISK_MOUNTS
|
||||
# exclude_disk_mounts = ["/etc/monitor/repos"]
|
||||
|
||||
########
|
||||
# AUTH #
|
||||
########
|
||||
|
||||
## Optional. Limit the ip addresses which can call the periphery api.
|
||||
## Default is empty, which will not block any request by ip.
|
||||
## Env: PERIPHERY_ALLOWED_IPS
|
||||
# allowed_ips = ["127.0.0.1"]
|
||||
|
||||
## Optional. Require callers to provide on of the provided passkeys to access the periphery api.
|
||||
## Default is empty, which will not require any passkey to be passed by core.
|
||||
## Env: PERIPHERY_PASSKEYS
|
||||
# passkeys = ["abcdefghijk"]
|
||||
|
||||
|
||||
|
||||
###########
|
||||
# SECRETS #
|
||||
###########
|
||||
@@ -95,17 +112,21 @@
|
||||
## Specify the log level of the monitor core application
|
||||
## Default: info
|
||||
## Options: off, error, warn, info, debug, trace
|
||||
## Env: PERIPHERY_LOGGING_LEVEL
|
||||
# logging.level = "debug"
|
||||
|
||||
## Specify the logging format for stdout / stderr.
|
||||
## Default: standard
|
||||
## Options: standard, json, none
|
||||
## Env: PERIPHERY_LOGGING_STDIO
|
||||
# logging.stdio = "json"
|
||||
|
||||
## Specify a opentelemetry otlp endpoint to send traces to
|
||||
## Optional, default unassigned
|
||||
## Env: PERIPHERY_LOGGING_OTLP_ENDPOINT
|
||||
# logging.otlp_endpoint = "http://localhost:4317"
|
||||
|
||||
## Set the opentelemetry service name attached to the telemetry this periphery will send.
|
||||
## Default: "Monitor"
|
||||
## Env: PERIPHERY_LOGGING_OPENTELEMETRY_SERVICE_NAME
|
||||
# logging.opentelemetry_service_name = "Periphery-02"
|
||||
@@ -7,10 +7,17 @@ Connecting a server to monitor has 2 steps:
|
||||
|
||||
Once step 1. is complete, you can just connect the server to Monitor Core from the UI.
|
||||
|
||||
## Install the Periphery agent
|
||||
## Install
|
||||
|
||||
The easiest way to setup and update periphery is to use the setup script (as root user):
|
||||
You can install Periphery as a systemd managed process, run it as a [docker container](https://github.com/mbecker20/monitor/pkgs/container/periphery), or do whatever you want with the binary.
|
||||
|
||||
Some Periphery actions interact with your hosts file system, like cloning repos, or accessing local compose files.
|
||||
For this reason, runnning periphery in a container can be a bit more complicated.
|
||||
Additionally, Periphery in a container tends to overreport the disks by default, but this can be fixed via some configuration.
|
||||
|
||||
### Install the Periphery agent - systemd
|
||||
|
||||
As root user:
|
||||
```sh
|
||||
curl -sSL https://raw.githubusercontent.com/mbecker20/monitor/main/scripts/setup-periphery.py | python3
|
||||
```
|
||||
@@ -27,7 +34,28 @@ You can find more information (and view the script) in the [readme](https://gith
|
||||
This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Monitor version release, and it will update the periphery version.
|
||||
:::
|
||||
|
||||
## Manual install steps
|
||||
### Install the Periphery agent - container
|
||||
|
||||
You can use a docker compose file like this:
|
||||
```yaml
|
||||
services:
|
||||
monitor-periphery:
|
||||
image: ghcr.io/mbecker20/periphery:latest # use ghcr.io/mbecker20/periphery:latest-aarch64 for arm support
|
||||
logging:
|
||||
driver: local
|
||||
ports:
|
||||
- 8120:8120
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- monitor-repos:/etc/monitor/repos # manage repos in a docker volume, or change it to an accessible host directory.
|
||||
# environment:
|
||||
# # If the disk size is overreporting, can use one of these to
|
||||
# # whitelist / blacklist the disks to filter them, whichever is easier.
|
||||
# PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos
|
||||
# PERIPHERY_EXCLUDE_DISK_MOUNTS: /snap
|
||||
```
|
||||
|
||||
### Manual install steps - binaries
|
||||
|
||||
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases).
|
||||
|
||||
|
||||
@@ -2,26 +2,32 @@
|
||||
|
||||
To run Monitor Core, you will need Docker. See [the docker install docs](https://docs.docker.com/engine/install/).
|
||||
|
||||
:::info
|
||||
Monitor Core itself can really only run remote builds.
|
||||
You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers**
|
||||
in order to alert / deploy etc.
|
||||
|
||||
You can currently and always will be able to **connect as many servers an you like** using the Periphery agent.
|
||||
:::
|
||||
|
||||
### Deploy Monitor Core with Docker Compose
|
||||
|
||||
There is an example compose file here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml).
|
||||
|
||||
Copy the contents to a `compose.yaml`, and deploy it with `docker compose up -d`.
|
||||
|
||||
:::info
|
||||
Monitor Core itself can really only run remote builds.
|
||||
You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers**
|
||||
in order to alert / deploy etc.
|
||||
|
||||
If you **only need to connect on one server** (the one you are deploying Monitor Core on), you can do it all dockerized,
|
||||
and use the [**all-in-one compose file**](https://github.com/mbecker20/monitor/blob/main/config_example/aio.compose.yaml).
|
||||
This will deploy Monitor Core and Periphery, and automatically add the local periphery as a connected server.
|
||||
|
||||
Deploying with the AIO compose file **will not** stop you from connecting more servers later, and is really just for setup convenience.
|
||||
|
||||
You can currently and always will be able to **connect as many servers an you like** using the Periphery agent.
|
||||
:::
|
||||
|
||||
### Configuration
|
||||
|
||||
You can configure Monitor with environment variables, or using a config file.
|
||||
|
||||
The example config file in the Monitor repo documents all the configuration options, along with the corresponding environment variables.
|
||||
It can be found here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml).
|
||||
It can be found here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml).
|
||||
|
||||
Note that configuration passed in environment variables will take precedent over what is given in the file.
|
||||
|
||||
|
||||
48
docsite/docs/docker-compose.md
Normal file
48
docsite/docs/docker-compose.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Docker Compose
|
||||
|
||||
Monitor supports docker compose through the `Stack` resource.
|
||||
|
||||
## Define the compose file/s
|
||||
|
||||
Monitor supports 3 ways of defining the compose files:
|
||||
1. **Write them in the UI**, and Monitor will write them to your host at deploy-time.
|
||||
2. **Store them in a git repo**, and have Monitor clone it on the host to deploy.
|
||||
3. **Store the files anywhere on the host**, and Monitor will just run the compose commands on the existing files.
|
||||
|
||||
The recommended way to deploy Stacks is using compose files located in a git repo.
|
||||
|
||||
If you manage your compose files in git repos:
|
||||
|
||||
- All your files, across all servers, are available locally to edit in your favorite text editor.
|
||||
- All of your changes are tracked, and can be reverted.
|
||||
- You can use the git webhooks to do other automations when you change the compose file contents. Redeploying will be as easy as just `git push`.
|
||||
|
||||
:::info
|
||||
Many Monitor resources need access to git repos. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials.
|
||||
All resources which depend on git repos are able to use these credentials to access private repos.
|
||||
:::
|
||||
|
||||
## Importing Existing Compose projects
|
||||
|
||||
First create the Stack in Monitor, and ensure it has access to the compose files using one
|
||||
of the three methods above. Make sure to attach the server you wish to deploy on.
|
||||
|
||||
In order for Monitor to pick up a running project, it has to know the compose "project name".
|
||||
You can find the project name by running `docker compose ls` on the host.
|
||||
|
||||
By default, Monitor will assume the Stack name is the compose project name.
|
||||
If this is different than the project name on the host, you can configure a custom "Project Name" in the config.
|
||||
|
||||
## Pass Environment Variables
|
||||
|
||||
Monitor is able to pass custom environment variables to the docker compose process.
|
||||
This works by:
|
||||
|
||||
1. Write the variables to a ".env" file on the host at deploy-time.
|
||||
2. Pass the file to docker compose using the `--env-file` flag.
|
||||
|
||||
:::info
|
||||
Just like all other resources with Environments (Deployments, Repos, Builds),
|
||||
Stack Environments support **Variable and Secret interpolation**. Define global variables
|
||||
in the UI and share the values across environments.
|
||||
:::
|
||||
@@ -20,6 +20,11 @@ With Monitor you can:
|
||||
|
||||
Monitor is opinionated by design, and uses [docker](https://docs.docker.com/) as the container engine for building and deploying.
|
||||
|
||||
:::info
|
||||
Monitor also supports [**podman**](https://podman.io/) instead of docker by utilizing the `podman` -> `docker` alias.
|
||||
For Stack / docker compose support with podman, check out [**podman-compose**](https://github.com/containers/podman-compose). Thanks to `u/pup_kit` for checking this.
|
||||
:::
|
||||
|
||||
## Architecture and Components
|
||||
|
||||
Monitor is composed of a single core and any amount of connected servers running the periphery application.
|
||||
|
||||
@@ -25,16 +25,18 @@ All resources which depend on git repos / docker registries are able to use thes
|
||||
|
||||
-- Deploy with docker compose.<br></br>
|
||||
-- Provide the compose file in UI, or move the files to a git repo and use a webhook for auto redeploy on push.<br></br>
|
||||
-- Supports composing multiple compose files using `docker compose -f ... -f ...`.
|
||||
-- Supports composing multiple compose files using `docker compose -f ... -f ...`.<br></br>
|
||||
-- Pass environment variables usable within the compose file. Interpolate in app-wide variables / secrets.
|
||||
|
||||
## Repo
|
||||
|
||||
-- Put scripts in git repos, and run them on a server every time they are pushed to.
|
||||
-- Put scripts in git repos, and run them on a Server, or using a Builder.<br></br>
|
||||
-- Can build binaries, perform automation, really whatever you can think of.
|
||||
|
||||
## Build
|
||||
|
||||
-- Build application source into docker images, and push them to
|
||||
-- Build application source into docker images, and push them to the configured registry.<br></br>
|
||||
-- The source can be any git repo containing a Dockerfile.
|
||||
|
||||
## Builder
|
||||
|
||||
@@ -49,11 +51,12 @@ All resources which depend on git repos / docker registries are able to use thes
|
||||
## ResourceSync
|
||||
|
||||
-- Orchestrate all your configuration declaratively by defining it in `toml` files, which are checked into a git repo.<br></br>
|
||||
-- Can deploy **Deployments** and **Stacks** if changes are suggested. Specify deploy ordering with `after` array. (like docker compose `depends_on` but can span across servers.).
|
||||
-- Can deploy **Deployments** and **Stacks** if changes are suggested.<br></br>
|
||||
-- Specify deploy ordering with `after` array. (like docker compose `depends_on` but can span across servers.).
|
||||
|
||||
## Alerter
|
||||
|
||||
-- Route alerts to various endpoints<br></br>
|
||||
-- Route alerts to various endpoints.<br></br>
|
||||
-- Can configure rules on each Alerter, such as resource whitelist, blacklist, or alert type filter.
|
||||
|
||||
## ServerTemplate
|
||||
|
||||
35
docsite/docs/webhooks.md
Normal file
35
docsite/docs/webhooks.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Configuring Webhooks
|
||||
|
||||
Multiple Monitor resources can take advantage of webhooks from your git provider. Monitor supports incoming webhooks using the Github standard, which is also supported by other providers like Gitea.
|
||||
|
||||
:::note
|
||||
On Gitea, the default "Gitea" webhook type works with the Github standard 👍
|
||||
:::
|
||||
|
||||
## Copy the Resource Payload URL
|
||||
|
||||
Find the resource in UI, like a `Build`, `Repo`, or `Stack`.
|
||||
Scroll down to the bottom of Configuration area, and copy the webhook for the action you want.
|
||||
|
||||
## Create the webhook on the Git Provider
|
||||
|
||||
Navigate to the repo page on your git provider, and go to the settings for the Repo.
|
||||
Find Webhook settings, and click to create a new webhook.
|
||||
|
||||
You will have to input some information.
|
||||
|
||||
1. The `Payload URL` is the link that you copied in the step above, `Copy the Resource Payload URL`.
|
||||
2. For Content-type, choose `application/json`
|
||||
3. For Secret, input the secret you configured in the Monitor Core config (`MONITOR_WEBHOOK_SECRET`).
|
||||
4. Enable SSL Verification, if you have proper TLS setup to your git provider (recommended).
|
||||
5. For "events that trigger the webhook", just the push request is what post people want.
|
||||
6. Of course, make sure the webhook is "Active" and hit create.
|
||||
|
||||
## When does it trigger?
|
||||
|
||||
Your git provider will now push this webhook to Monitor on *every* push to *any* branch. However, your `Build`, `Repo`,
|
||||
etc. only cares about a specific branch of the repo.
|
||||
|
||||
Because of this, the webhook will trigger the action **only on pushes to the branch configured on the resource**.
|
||||
|
||||
For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch.
|
||||
@@ -44,7 +44,9 @@ const sidebars: SidebarsConfig = {
|
||||
// "deploy-containers/versioning",
|
||||
],
|
||||
},
|
||||
"docker-compose",
|
||||
"sync-resources",
|
||||
"webhooks",
|
||||
"permissioning",
|
||||
"version-upgrades",
|
||||
"api",
|
||||
|
||||
@@ -125,7 +125,7 @@ export const Config = <T,>({
|
||||
titleOther?: ReactNode;
|
||||
components: Record<
|
||||
string, // sidebar key
|
||||
ConfigComponent<T>[]
|
||||
ConfigComponent<T>[] | false | undefined
|
||||
>;
|
||||
}) => {
|
||||
const [show, setShow] = useState(keys(components)[0]);
|
||||
@@ -163,80 +163,84 @@ export const Config = <T,>({
|
||||
<div className="flex gap-4">
|
||||
{/** The sidebar when large */}
|
||||
<div className="hidden xl:flex flex-col gap-4 w-[300px]">
|
||||
{keys(components).map((tab) => (
|
||||
<Button
|
||||
key={tab}
|
||||
variant={show === tab ? "secondary" : "outline"}
|
||||
onClick={() => setShow(tab)}
|
||||
className="capitalize"
|
||||
>
|
||||
{tab}
|
||||
</Button>
|
||||
))}
|
||||
{Object.entries(components)
|
||||
.filter(([_, val]) => val)
|
||||
.map(([tab, _]) => (
|
||||
<Button
|
||||
key={tab}
|
||||
variant={show === tab ? "secondary" : "outline"}
|
||||
onClick={() => setShow(tab)}
|
||||
className="capitalize"
|
||||
>
|
||||
{tab}
|
||||
</Button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<div className="flex flex-col gap-6 min-h-[500px] w-full">
|
||||
{components[show].map(
|
||||
({
|
||||
label,
|
||||
labelHidden,
|
||||
icon,
|
||||
actions,
|
||||
description,
|
||||
hidden,
|
||||
contentHidden,
|
||||
components,
|
||||
}) =>
|
||||
!hidden && (
|
||||
<Card className="w-full grid gap-2" key={label}>
|
||||
{!labelHidden && (
|
||||
<CardHeader
|
||||
className={cn(
|
||||
"flex-row items-center justify-between w-full py-0 h-[60px] space-y-0",
|
||||
!contentHidden && "border-b"
|
||||
)}
|
||||
>
|
||||
<div className="flex items-center gap-4">
|
||||
<CardTitle className="flex gap-4">
|
||||
{icon}
|
||||
{label}
|
||||
</CardTitle>
|
||||
{description && (
|
||||
<HoverCard openDelay={200}>
|
||||
<HoverCardTrigger asChild>
|
||||
<Card className="px-3 py-2 hover:bg-accent/50 transition-colors cursor-pointer">
|
||||
<Info className="w-4 h-4" />
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start" side="right">
|
||||
{description}
|
||||
</HoverCardContent>
|
||||
</HoverCard>
|
||||
{components[show] && (
|
||||
<div className="flex flex-col gap-6 min-h-[500px] w-full">
|
||||
{components[show].map(
|
||||
({
|
||||
label,
|
||||
labelHidden,
|
||||
icon,
|
||||
actions,
|
||||
description,
|
||||
hidden,
|
||||
contentHidden,
|
||||
components,
|
||||
}) =>
|
||||
!hidden && (
|
||||
<Card className="w-full grid gap-2" key={label}>
|
||||
{!labelHidden && (
|
||||
<CardHeader
|
||||
className={cn(
|
||||
"flex-row items-center justify-between w-full py-0 h-[60px] space-y-0",
|
||||
!contentHidden && "border-b"
|
||||
)}
|
||||
</div>
|
||||
{actions}
|
||||
</CardHeader>
|
||||
)}
|
||||
{!contentHidden && (
|
||||
<CardContent
|
||||
className={cn(
|
||||
"flex flex-col gap-1 pb-3",
|
||||
labelHidden && "pt-3"
|
||||
)}
|
||||
>
|
||||
<ConfigAgain
|
||||
config={config}
|
||||
update={update}
|
||||
set={(u) => set((p) => ({ ...p, ...u }))}
|
||||
components={components}
|
||||
disabled={disabled}
|
||||
/>
|
||||
</CardContent>
|
||||
)}
|
||||
</Card>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
>
|
||||
<div className="flex items-center gap-4">
|
||||
<CardTitle className="flex gap-4">
|
||||
{icon}
|
||||
{label}
|
||||
</CardTitle>
|
||||
{description && (
|
||||
<HoverCard openDelay={200}>
|
||||
<HoverCardTrigger asChild>
|
||||
<Card className="px-3 py-2 hover:bg-accent/50 transition-colors cursor-pointer">
|
||||
<Info className="w-4 h-4" />
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start" side="right">
|
||||
{description}
|
||||
</HoverCardContent>
|
||||
</HoverCard>
|
||||
)}
|
||||
</div>
|
||||
{actions}
|
||||
</CardHeader>
|
||||
)}
|
||||
{!contentHidden && (
|
||||
<CardContent
|
||||
className={cn(
|
||||
"flex flex-col gap-1 pb-3",
|
||||
labelHidden && "pt-3"
|
||||
)}
|
||||
>
|
||||
<ConfigAgain
|
||||
config={config}
|
||||
update={update}
|
||||
set={(u) => set((p) => ({ ...p, ...u }))}
|
||||
components={components}
|
||||
disabled={disabled}
|
||||
/>
|
||||
</CardContent>
|
||||
)}
|
||||
</Card>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ConfigLayout>
|
||||
);
|
||||
@@ -280,8 +284,8 @@ export const ConfigAgain = <
|
||||
case "string":
|
||||
return (
|
||||
<ConfigInput
|
||||
key={args?.label ?? key.toString()}
|
||||
label={key.toString()}
|
||||
key={key.toString()}
|
||||
label={args?.label ?? key.toString()}
|
||||
value={value}
|
||||
onChange={(value) => set({ [key]: value } as Partial<T>)}
|
||||
disabled={disabled}
|
||||
|
||||
@@ -97,6 +97,9 @@ export const ConfigInput = ({
|
||||
placeholder,
|
||||
onChange,
|
||||
onBlur,
|
||||
className,
|
||||
inputLeft,
|
||||
inputRight,
|
||||
}: {
|
||||
label: string;
|
||||
boldLabel?: boolean;
|
||||
@@ -106,17 +109,36 @@ export const ConfigInput = ({
|
||||
placeholder?: string;
|
||||
onChange?: (value: string) => void;
|
||||
onBlur?: (value: string) => void;
|
||||
className?: string;
|
||||
inputLeft?: ReactNode;
|
||||
inputRight?: ReactNode;
|
||||
}) => (
|
||||
<ConfigItem label={label} boldLabel={boldLabel} description={description}>
|
||||
<Input
|
||||
className="max-w-[75%] lg:max-w-[400px]"
|
||||
type={typeof value === "number" ? "number" : undefined}
|
||||
value={value}
|
||||
onChange={(e) => onChange && onChange(e.target.value)}
|
||||
onBlur={(e) => onBlur && onBlur(e.target.value)}
|
||||
placeholder={placeholder}
|
||||
disabled={disabled}
|
||||
/>
|
||||
{inputLeft || inputRight ? (
|
||||
<div className="flex gap-2 items-center">
|
||||
{inputLeft}
|
||||
<Input
|
||||
className={cn("max-w-[75%] lg:max-w-[400px]", className)}
|
||||
type={typeof value === "number" ? "number" : undefined}
|
||||
value={value}
|
||||
onChange={(e) => onChange && onChange(e.target.value)}
|
||||
onBlur={(e) => onBlur && onBlur(e.target.value)}
|
||||
placeholder={placeholder}
|
||||
disabled={disabled}
|
||||
/>
|
||||
{inputRight}
|
||||
</div>
|
||||
) : (
|
||||
<Input
|
||||
className={cn("max-w-[75%] lg:max-w-[400px]", className)}
|
||||
type={typeof value === "number" ? "number" : undefined}
|
||||
value={value}
|
||||
onChange={(e) => onChange && onChange(e.target.value)}
|
||||
onBlur={(e) => onBlur && onBlur(e.target.value)}
|
||||
placeholder={placeholder}
|
||||
disabled={disabled}
|
||||
/>
|
||||
)}
|
||||
</ConfigItem>
|
||||
);
|
||||
|
||||
@@ -250,7 +272,7 @@ export const ProviderSelector = ({
|
||||
onBlur={() => setCustomMode(false)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter") {
|
||||
setCustomMode(false)
|
||||
setCustomMode(false);
|
||||
}
|
||||
}}
|
||||
autoFocus
|
||||
|
||||
@@ -2,6 +2,7 @@ import { Config } from "@components/config";
|
||||
import {
|
||||
AccountSelectorConfig,
|
||||
AddExtraArgMenu,
|
||||
ConfigInput,
|
||||
ConfigItem,
|
||||
ImageRegistryConfig,
|
||||
InputList,
|
||||
@@ -68,49 +69,24 @@ export const BuildConfig = ({
|
||||
label: "Version",
|
||||
labelHidden: true,
|
||||
components: {
|
||||
version: (version, set) => {
|
||||
const { major, minor, patch } = version ?? {
|
||||
major: 0,
|
||||
minor: 0,
|
||||
patch: 0,
|
||||
};
|
||||
version: (_version, set) => {
|
||||
const version =
|
||||
typeof _version === "object"
|
||||
? `${_version.major}.${_version.minor}.${_version.patch}`
|
||||
: _version;
|
||||
return (
|
||||
<ConfigItem
|
||||
<ConfigInput
|
||||
className="text-lg w-[200px]"
|
||||
label="Version"
|
||||
description="Increment the build's major / minor version. The patch number will be incremented for every build."
|
||||
>
|
||||
<div className="flex gap-4 items-center">
|
||||
<div className="text-xl">
|
||||
v{major}.{minor}.{patch}
|
||||
</div>
|
||||
{!disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set({
|
||||
version: { major: major + 1, minor: 0, patch: 0 },
|
||||
})
|
||||
}
|
||||
>
|
||||
+ Major
|
||||
</Button>
|
||||
)}
|
||||
{!disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set({
|
||||
version: { major, minor: minor + 1, patch: 0 },
|
||||
})
|
||||
}
|
||||
>
|
||||
+ Minor
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</ConfigItem>
|
||||
placeholder="0.0.0"
|
||||
value={version}
|
||||
onChange={(version) => set({ version: version as any })}
|
||||
disabled={disabled}
|
||||
boldLabel
|
||||
/>
|
||||
);
|
||||
},
|
||||
auto_increment_version: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -160,6 +136,14 @@ export const BuildConfig = ({
|
||||
{
|
||||
label: "Image",
|
||||
components: {
|
||||
image_registry: (registry, set) => (
|
||||
<ImageRegistryConfig
|
||||
registry={registry}
|
||||
setRegistry={(image_registry) => set({ image_registry })}
|
||||
resource_id={update.builder_id ?? config.builder_id}
|
||||
disabled={disabled}
|
||||
/>
|
||||
),
|
||||
build_path: {
|
||||
placeholder: ".",
|
||||
description:
|
||||
@@ -170,14 +154,6 @@ export const BuildConfig = ({
|
||||
description:
|
||||
"The path to the dockerfile, relative to the build path.",
|
||||
},
|
||||
image_registry: (registry, set) => (
|
||||
<ImageRegistryConfig
|
||||
registry={registry}
|
||||
setRegistry={(image_registry) => set({ image_registry })}
|
||||
resource_id={update.builder_id ?? config.builder_id}
|
||||
disabled={disabled}
|
||||
/>
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -287,16 +263,31 @@ export const BuildConfig = ({
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Github Webhook",
|
||||
label: "Git Webhook",
|
||||
description:
|
||||
"Configure your repo provider to send webhooks to Monitor",
|
||||
components: {
|
||||
["Guard" as any]: () => {
|
||||
if (update.branch ?? config.branch) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ConfigItem label="Configure Branch">
|
||||
<div>Must configure Branch before webhooks will work.</div>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
["build" as any]: () => (
|
||||
<ConfigItem label="Webhook Url">
|
||||
<CopyGithubWebhook path={`/build/${id}`} />
|
||||
</ConfigItem>
|
||||
),
|
||||
webhook_enabled: webhook !== undefined && !webhook.managed,
|
||||
webhook_secret: {
|
||||
description:
|
||||
"Provide a custom webhook secret for this resource, or use the global default.",
|
||||
placeholder: "Input custom secret",
|
||||
},
|
||||
["managed" as any]: () => {
|
||||
const inv = useInvalidate();
|
||||
const { toast } = useToast();
|
||||
|
||||
@@ -140,7 +140,7 @@ export const BuildComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
@@ -168,7 +168,7 @@ export const BuildComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
|
||||
@@ -192,7 +192,7 @@ const ProcedureConfigInner = ({
|
||||
<Section>
|
||||
<Card>
|
||||
<CardHeader className="p-4">
|
||||
<ConfigItem label="Github Webhook" className="items-start">
|
||||
<ConfigItem label="Git Webhook" className="items-start">
|
||||
<div className="flex flex-col gap-4">
|
||||
<div className="flex items-center gap-4">
|
||||
<div className="flex items-center gap-2">
|
||||
@@ -223,6 +223,19 @@ const ProcedureConfigInner = ({
|
||||
disabled={disabled}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex items-center justify-end gap-4 w-full">
|
||||
<div className="text-muted-foreground">Custom Secret:</div>
|
||||
<Input
|
||||
value={
|
||||
config.webhook_secret ?? procedure.config?.webhook_secret
|
||||
}
|
||||
onChange={(e) =>
|
||||
setConfig({ ...config, webhook_secret: e.target.value })
|
||||
}
|
||||
disabled={disabled}
|
||||
className="w-[400px] max-w-full"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</ConfigItem>
|
||||
</CardHeader>
|
||||
|
||||
@@ -150,10 +150,20 @@ export const RepoConfig = ({ id }: { id: string }) => {
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Github Webhooks",
|
||||
label: "Git Webhooks",
|
||||
description:
|
||||
"Configure your repo provider to send webhooks to Monitor",
|
||||
components: {
|
||||
["Guard" as any]: () => {
|
||||
if (update.branch ?? config.branch) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ConfigItem label="Configure Branch">
|
||||
<div>Must configure Branch before webhooks will work.</div>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
["pull" as any]: () => (
|
||||
<ConfigItem label="Pull">
|
||||
<CopyGithubWebhook path={`/repo/${id}/pull`} />
|
||||
@@ -170,6 +180,11 @@ export const RepoConfig = ({ id }: { id: string }) => {
|
||||
</ConfigItem>
|
||||
),
|
||||
webhook_enabled: webhooks !== undefined && !webhooks.managed,
|
||||
webhook_secret: {
|
||||
description:
|
||||
"Provide a custom webhook secret for this resource, or use the global default.",
|
||||
placeholder: "Input custom secret",
|
||||
},
|
||||
["managed" as any]: () => {
|
||||
const inv = useInvalidate();
|
||||
const { toast } = useToast();
|
||||
|
||||
@@ -126,7 +126,7 @@ export const RepoComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
@@ -155,7 +155,7 @@ export const RepoComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
|
||||
@@ -101,10 +101,20 @@ export const ResourceSyncConfig = ({
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Github Webhooks",
|
||||
label: "Git Webhooks",
|
||||
description:
|
||||
"Configure your repo provider to send webhooks to Monitor",
|
||||
components: {
|
||||
["Guard" as any]: () => {
|
||||
if (update.branch ?? config.branch) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ConfigItem label="Configure Branch">
|
||||
<div>Must configure Branch before webhooks will work.</div>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
["refresh" as any]: () => (
|
||||
<ConfigItem
|
||||
label="Refresh Pending"
|
||||
@@ -122,6 +132,11 @@ export const ResourceSyncConfig = ({
|
||||
</ConfigItem>
|
||||
),
|
||||
webhook_enabled: webhooks !== undefined && !webhooks.managed,
|
||||
webhook_secret: {
|
||||
description:
|
||||
"Provide a custom webhook secret for this resource, or use the global default.",
|
||||
placeholder: "Input custom secret",
|
||||
},
|
||||
["managed" as any]: () => {
|
||||
const inv = useInvalidate();
|
||||
const { toast } = useToast();
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import { Config } from "@components/config";
|
||||
import { InputList } from "@components/config/util";
|
||||
import { useInvalidate, useRead, useWrite } from "@lib/hooks";
|
||||
import { Types } from "@monitor/client";
|
||||
import { Button } from "@ui/button";
|
||||
import { PlusCircle } from "lucide-react";
|
||||
import { ReactNode, useState } from "react";
|
||||
|
||||
export const ServerConfig = ({
|
||||
@@ -61,12 +64,49 @@ export const ServerConfig = ({
|
||||
description:
|
||||
"Whether to store historical CPU, RAM, and disk usage.",
|
||||
},
|
||||
|
||||
auto_prune: {
|
||||
description:
|
||||
"Whether to prune unused images every day at UTC 00:00",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Ignore mounts",
|
||||
contentHidden:
|
||||
(update.ignore_mounts ?? config.ignore_mounts)?.length === 0,
|
||||
description:
|
||||
"If undesired mount points are coming through in server stats, filter them out here.",
|
||||
actions: !disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set((update) => ({
|
||||
...update,
|
||||
ignore_mounts: [
|
||||
...(update.ignore_mounts ?? config.ignore_mounts ?? []),
|
||||
"",
|
||||
],
|
||||
}))
|
||||
}
|
||||
className="flex items-center gap-2 w-[200px]"
|
||||
>
|
||||
<PlusCircle className="w-4 h-4" />
|
||||
Add Ignore
|
||||
</Button>
|
||||
),
|
||||
components: {
|
||||
ignore_mounts: (values, set) => (
|
||||
<InputList
|
||||
field="ignore_mounts"
|
||||
values={values ?? []}
|
||||
set={set}
|
||||
disabled={disabled}
|
||||
placeholder="Ignore Mounts"
|
||||
/>
|
||||
),
|
||||
},
|
||||
},
|
||||
],
|
||||
alerts: [
|
||||
{
|
||||
|
||||
@@ -47,6 +47,9 @@ export const StackConfig = ({
|
||||
if (!config) return null;
|
||||
|
||||
const disabled = global_disabled || perms !== Types.PermissionLevel.Write;
|
||||
const files_on_host = update.files_on_host ?? config.files_on_host;
|
||||
const ui_file_contents =
|
||||
(update.file_contents ?? config.file_contents ?? "").length > 0;
|
||||
|
||||
return (
|
||||
<Config
|
||||
@@ -85,8 +88,71 @@ export const StackConfig = ({
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Files on Server",
|
||||
labelHidden: true,
|
||||
components: {
|
||||
files_on_host: {
|
||||
label: "Files on Server",
|
||||
boldLabel: true,
|
||||
description:
|
||||
"Manage the compose files on server yourself. Just configure the Run Directory and File Paths to your files.",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Run Path",
|
||||
labelHidden: true,
|
||||
hidden: !files_on_host,
|
||||
components: {
|
||||
run_directory: {
|
||||
placeholder: "/path/to/folder",
|
||||
description:
|
||||
"Set the cwd when running compose up command. Should usually be the parent folder of the compose files.",
|
||||
boldLabel: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "File Paths",
|
||||
hidden: !files_on_host,
|
||||
description:
|
||||
"Add files to include using 'docker compose -f'. If empty, uses 'compose.yaml'. Relative to 'Run Directory'.",
|
||||
contentHidden:
|
||||
(update.file_paths ?? config.file_paths)?.length === 0,
|
||||
actions: !disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set((update) => ({
|
||||
...update,
|
||||
file_paths: [
|
||||
...(update.file_paths ?? config.file_paths ?? []),
|
||||
"",
|
||||
],
|
||||
}))
|
||||
}
|
||||
className="flex items-center gap-2 w-[200px]"
|
||||
>
|
||||
<PlusCircle className="w-4 h-4" />
|
||||
Add File
|
||||
</Button>
|
||||
),
|
||||
components: {
|
||||
file_paths: (value, set) => (
|
||||
<InputList
|
||||
field="file_paths"
|
||||
values={value ?? []}
|
||||
set={set}
|
||||
disabled={disabled}
|
||||
placeholder="compose.yaml"
|
||||
/>
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Compose File",
|
||||
hidden: files_on_host,
|
||||
description:
|
||||
"Paste the file contents here, or configure a git repo.",
|
||||
actions: (
|
||||
@@ -111,6 +177,7 @@ export const StackConfig = ({
|
||||
<Textarea
|
||||
ref={fileContentsRef}
|
||||
value={file_contents}
|
||||
disabled={disabled}
|
||||
onChange={(e) => set({ file_contents: e.target.value })}
|
||||
className="min-h-[300px] h-fit"
|
||||
placeholder="Paste compose file contents"
|
||||
@@ -147,7 +214,7 @@ export const StackConfig = ({
|
||||
label: "Extra Args",
|
||||
description: "Add extra args inserted after 'docker compose up -d'",
|
||||
contentHidden:
|
||||
(update.extra_args ?? config.extra_args)?.length === 0,
|
||||
((update.extra_args ?? config.extra_args)?.length ?? 0) === 0,
|
||||
actions: !disabled && (
|
||||
<AddExtraArgMenu
|
||||
type="Stack"
|
||||
@@ -212,83 +279,22 @@ export const StackConfig = ({
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
"Git Repo": [
|
||||
{
|
||||
label: "Git",
|
||||
label: "Ignore Services",
|
||||
description:
|
||||
"Provide config for repo-based compose files. Not necessary if file contents are configured in UI.",
|
||||
components: {
|
||||
git_provider: (provider, set) => {
|
||||
const https = update.git_https ?? config.git_https;
|
||||
return (
|
||||
<ProviderSelectorConfig
|
||||
account_type="git"
|
||||
selected={provider}
|
||||
disabled={disabled}
|
||||
onSelect={(git_provider) => set({ git_provider })}
|
||||
https={https}
|
||||
onHttpsSwitch={() => set({ git_https: !https })}
|
||||
/>
|
||||
);
|
||||
},
|
||||
git_account: (value, set) => {
|
||||
const server_id = update.server_id || config.server_id;
|
||||
return (
|
||||
<AccountSelectorConfig
|
||||
id={server_id}
|
||||
type={server_id ? "Server" : "None"}
|
||||
account_type="git"
|
||||
provider={update.git_provider ?? config.git_provider}
|
||||
selected={value}
|
||||
onSelect={(git_account) => set({ git_account })}
|
||||
disabled={disabled}
|
||||
placeholder="None"
|
||||
/>
|
||||
);
|
||||
},
|
||||
repo: {
|
||||
placeholder: "Enter repo",
|
||||
description:
|
||||
"The repo path on the provider. {namespace}/{repo_name}",
|
||||
},
|
||||
branch: {
|
||||
placeholder: "Enter branch",
|
||||
description: "Select a custom branch, or default to 'main'.",
|
||||
},
|
||||
commit: {
|
||||
placeholder: "Enter a specific commit hash. Optional.",
|
||||
description:
|
||||
"Switch to a specific hash after cloning the branch.",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Run Path",
|
||||
labelHidden: true,
|
||||
components: {
|
||||
run_directory: {
|
||||
placeholder: "Eg. './'",
|
||||
description:
|
||||
"Set the cwd when running compose up command. Relative to the repo root.",
|
||||
boldLabel: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "File Paths",
|
||||
description:
|
||||
"Add files to include using 'docker compose -f'. If empty, uses 'compose.yaml'.",
|
||||
"If your compose file has init services that exit early, ignore them here so your stack will report the correct health.",
|
||||
contentHidden:
|
||||
(update.file_paths ?? config.file_paths)?.length === 0,
|
||||
((update.ignore_services ?? config.ignore_services)?.length ?? 0) === 0,
|
||||
actions: !disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set((update) => ({
|
||||
...update,
|
||||
file_paths: [
|
||||
...(update.file_paths ?? config.file_paths ?? []),
|
||||
ignore_services: [
|
||||
...(update.ignore_services ??
|
||||
config.ignore_services ??
|
||||
[]),
|
||||
"",
|
||||
],
|
||||
}))
|
||||
@@ -296,186 +302,298 @@ export const StackConfig = ({
|
||||
className="flex items-center gap-2 w-[200px]"
|
||||
>
|
||||
<PlusCircle className="w-4 h-4" />
|
||||
Add File
|
||||
Add Service
|
||||
</Button>
|
||||
),
|
||||
components: {
|
||||
file_paths: (value, set) => (
|
||||
ignore_services: (values, set) => (
|
||||
<InputList
|
||||
field="file_paths"
|
||||
values={value ?? []}
|
||||
field="ignore_services"
|
||||
values={values ?? []}
|
||||
set={set}
|
||||
disabled={disabled}
|
||||
placeholder="compose.yaml"
|
||||
placeholder="Input service name"
|
||||
/>
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Git Webhooks",
|
||||
description:
|
||||
"Configure your repo provider to send webhooks to Monitor",
|
||||
components: {
|
||||
["Guard" as any]: () => {
|
||||
if (update.branch ?? config.branch) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ConfigItem label="Configure Branch">
|
||||
<div>Must configure Branch before webhooks will work.</div>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
["Refresh" as any]: () =>
|
||||
(update.branch ?? config.branch) && (
|
||||
<ConfigItem label="Refresh Cache">
|
||||
<CopyGithubWebhook path={`/stack/${id}/refresh`} />
|
||||
</ConfigItem>
|
||||
),
|
||||
["Deploy" as any]: () =>
|
||||
(update.branch ?? config.branch) && (
|
||||
<ConfigItem label="Auto Redeploy">
|
||||
<CopyGithubWebhook path={`/stack/${id}/deploy`} />
|
||||
</ConfigItem>
|
||||
),
|
||||
webhook_enabled:
|
||||
!!(update.branch ?? config.branch) &&
|
||||
webhooks !== undefined &&
|
||||
!webhooks.managed,
|
||||
["managed" as any]: () => {
|
||||
const inv = useInvalidate();
|
||||
const { toast } = useToast();
|
||||
const { mutate: createWebhook, isPending: createPending } =
|
||||
useWrite("CreateStackWebhook", {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Webhook Created" });
|
||||
inv(["GetStackWebhooksEnabled", { stack: id }]);
|
||||
},
|
||||
});
|
||||
const { mutate: deleteWebhook, isPending: deletePending } =
|
||||
useWrite("DeleteStackWebhook", {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Webhook Deleted" });
|
||||
inv(["GetStackWebhooksEnabled", { stack: id }]);
|
||||
},
|
||||
});
|
||||
|
||||
if (
|
||||
!(update.branch ?? config.branch) ||
|
||||
!webhooks ||
|
||||
!webhooks.managed
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<ConfigItem label="Manage Webhook">
|
||||
{webhooks.deploy_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention("Good")}
|
||||
>
|
||||
ENABLED
|
||||
</div>
|
||||
and will trigger
|
||||
<div
|
||||
className={text_color_class_by_intention("Neutral")}
|
||||
>
|
||||
DEPLOY
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Disable"
|
||||
icon={<Ban className="w-4 h-4" />}
|
||||
variant="destructive"
|
||||
onClick={() =>
|
||||
deleteWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Deploy,
|
||||
})
|
||||
}
|
||||
loading={deletePending}
|
||||
disabled={disabled || deletePending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{!webhooks.deploy_enabled && webhooks.refresh_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention("Good")}
|
||||
>
|
||||
ENABLED
|
||||
</div>
|
||||
and will trigger
|
||||
<div
|
||||
className={text_color_class_by_intention("Neutral")}
|
||||
>
|
||||
REFRESH
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Disable"
|
||||
icon={<Ban className="w-4 h-4" />}
|
||||
variant="destructive"
|
||||
onClick={() =>
|
||||
deleteWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Refresh,
|
||||
})
|
||||
}
|
||||
loading={deletePending}
|
||||
disabled={disabled || deletePending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{!webhooks.deploy_enabled && !webhooks.refresh_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention(
|
||||
"Critical"
|
||||
)}
|
||||
>
|
||||
DISABLED
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Enable Deploy"
|
||||
icon={<CirclePlus className="w-4 h-4" />}
|
||||
onClick={() =>
|
||||
createWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Deploy,
|
||||
})
|
||||
}
|
||||
loading={createPending}
|
||||
disabled={disabled || createPending}
|
||||
/>
|
||||
<ConfirmButton
|
||||
title="Enable Refresh"
|
||||
icon={<CirclePlus className="w-4 h-4" />}
|
||||
onClick={() =>
|
||||
createWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Refresh,
|
||||
})
|
||||
}
|
||||
loading={createPending}
|
||||
disabled={disabled || createPending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</ConfigItem>
|
||||
);
|
||||
],
|
||||
"Git Repo": !files_on_host &&
|
||||
!ui_file_contents && [
|
||||
{
|
||||
label: "Git",
|
||||
description:
|
||||
"Provide config for repo-based compose files. Not necessary if file contents are configured in UI.",
|
||||
components: {
|
||||
git_provider: (provider, set) => {
|
||||
const https = update.git_https ?? config.git_https;
|
||||
return (
|
||||
<ProviderSelectorConfig
|
||||
account_type="git"
|
||||
selected={provider}
|
||||
disabled={disabled}
|
||||
onSelect={(git_provider) => set({ git_provider })}
|
||||
https={https}
|
||||
onHttpsSwitch={() => set({ git_https: !https })}
|
||||
/>
|
||||
);
|
||||
},
|
||||
git_account: (value, set) => {
|
||||
const server_id = update.server_id || config.server_id;
|
||||
return (
|
||||
<AccountSelectorConfig
|
||||
id={server_id}
|
||||
type={server_id ? "Server" : "None"}
|
||||
account_type="git"
|
||||
provider={update.git_provider ?? config.git_provider}
|
||||
selected={value}
|
||||
onSelect={(git_account) => set({ git_account })}
|
||||
disabled={disabled}
|
||||
placeholder="None"
|
||||
/>
|
||||
);
|
||||
},
|
||||
repo: {
|
||||
placeholder: "Enter repo",
|
||||
description:
|
||||
"The repo path on the provider. {namespace}/{repo_name}",
|
||||
},
|
||||
branch: {
|
||||
placeholder: "Enter branch",
|
||||
description: "Select a custom branch, or default to 'main'.",
|
||||
},
|
||||
commit: {
|
||||
placeholder: "Enter a specific commit hash. Optional.",
|
||||
description:
|
||||
"Switch to a specific hash after cloning the branch.",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
{
|
||||
label: "Run Path",
|
||||
labelHidden: true,
|
||||
components: {
|
||||
run_directory: {
|
||||
placeholder: "./",
|
||||
description:
|
||||
"Set the cwd when running compose up command. Relative to the repo root.",
|
||||
boldLabel: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "File Paths",
|
||||
description:
|
||||
"Add files to include using 'docker compose -f'. If empty, uses 'compose.yaml'. Relative to 'Run Directory'.",
|
||||
contentHidden:
|
||||
(update.file_paths ?? config.file_paths)?.length === 0,
|
||||
actions: !disabled && (
|
||||
<Button
|
||||
variant="secondary"
|
||||
onClick={() =>
|
||||
set((update) => ({
|
||||
...update,
|
||||
file_paths: [
|
||||
...(update.file_paths ?? config.file_paths ?? []),
|
||||
"",
|
||||
],
|
||||
}))
|
||||
}
|
||||
className="flex items-center gap-2 w-[200px]"
|
||||
>
|
||||
<PlusCircle className="w-4 h-4" />
|
||||
Add File
|
||||
</Button>
|
||||
),
|
||||
components: {
|
||||
file_paths: (value, set) => (
|
||||
<InputList
|
||||
field="file_paths"
|
||||
values={value ?? []}
|
||||
set={set}
|
||||
disabled={disabled}
|
||||
placeholder="compose.yaml"
|
||||
/>
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Git Webhooks",
|
||||
description:
|
||||
"Configure your repo provider to send webhooks to Monitor",
|
||||
components: {
|
||||
["Guard" as any]: () => {
|
||||
if (update.branch ?? config.branch) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<ConfigItem label="Configure Branch">
|
||||
<div>
|
||||
Must configure Branch before webhooks will work.
|
||||
</div>
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
["Refresh" as any]: () =>
|
||||
(update.branch ?? config.branch) && (
|
||||
<ConfigItem label="Refresh Cache">
|
||||
<CopyGithubWebhook path={`/stack/${id}/refresh`} />
|
||||
</ConfigItem>
|
||||
),
|
||||
["Deploy" as any]: () =>
|
||||
(update.branch ?? config.branch) && (
|
||||
<ConfigItem label="Auto Redeploy">
|
||||
<CopyGithubWebhook path={`/stack/${id}/deploy`} />
|
||||
</ConfigItem>
|
||||
),
|
||||
webhook_enabled:
|
||||
!!(update.branch ?? config.branch) &&
|
||||
webhooks !== undefined &&
|
||||
!webhooks.managed,
|
||||
webhook_secret: {
|
||||
description:
|
||||
"Provide a custom webhook secret for this resource, or use the global default.",
|
||||
placeholder: "Input custom secret",
|
||||
},
|
||||
["managed" as any]: () => {
|
||||
const inv = useInvalidate();
|
||||
const { toast } = useToast();
|
||||
const { mutate: createWebhook, isPending: createPending } =
|
||||
useWrite("CreateStackWebhook", {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Webhook Created" });
|
||||
inv(["GetStackWebhooksEnabled", { stack: id }]);
|
||||
},
|
||||
});
|
||||
const { mutate: deleteWebhook, isPending: deletePending } =
|
||||
useWrite("DeleteStackWebhook", {
|
||||
onSuccess: () => {
|
||||
toast({ title: "Webhook Deleted" });
|
||||
inv(["GetStackWebhooksEnabled", { stack: id }]);
|
||||
},
|
||||
});
|
||||
|
||||
if (
|
||||
!(update.branch ?? config.branch) ||
|
||||
!webhooks ||
|
||||
!webhooks.managed
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<ConfigItem label="Manage Webhook">
|
||||
{webhooks.deploy_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention("Good")}
|
||||
>
|
||||
ENABLED
|
||||
</div>
|
||||
and will trigger
|
||||
<div
|
||||
className={text_color_class_by_intention(
|
||||
"Neutral"
|
||||
)}
|
||||
>
|
||||
DEPLOY
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Disable"
|
||||
icon={<Ban className="w-4 h-4" />}
|
||||
variant="destructive"
|
||||
onClick={() =>
|
||||
deleteWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Deploy,
|
||||
})
|
||||
}
|
||||
loading={deletePending}
|
||||
disabled={disabled || deletePending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{!webhooks.deploy_enabled && webhooks.refresh_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention("Good")}
|
||||
>
|
||||
ENABLED
|
||||
</div>
|
||||
and will trigger
|
||||
<div
|
||||
className={text_color_class_by_intention(
|
||||
"Neutral"
|
||||
)}
|
||||
>
|
||||
REFRESH
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Disable"
|
||||
icon={<Ban className="w-4 h-4" />}
|
||||
variant="destructive"
|
||||
onClick={() =>
|
||||
deleteWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Refresh,
|
||||
})
|
||||
}
|
||||
loading={deletePending}
|
||||
disabled={disabled || deletePending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
{!webhooks.deploy_enabled &&
|
||||
!webhooks.refresh_enabled && (
|
||||
<div className="flex items-center gap-4 flex-wrap">
|
||||
<div className="flex items-center gap-2">
|
||||
Incoming webhook is{" "}
|
||||
<div
|
||||
className={text_color_class_by_intention(
|
||||
"Critical"
|
||||
)}
|
||||
>
|
||||
DISABLED
|
||||
</div>
|
||||
</div>
|
||||
<ConfirmButton
|
||||
title="Enable Deploy"
|
||||
icon={<CirclePlus className="w-4 h-4" />}
|
||||
onClick={() =>
|
||||
createWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Deploy,
|
||||
})
|
||||
}
|
||||
loading={createPending}
|
||||
disabled={disabled || createPending}
|
||||
/>
|
||||
<ConfirmButton
|
||||
title="Enable Refresh"
|
||||
icon={<CirclePlus className="w-4 h-4" />}
|
||||
onClick={() =>
|
||||
createWebhook({
|
||||
stack: id,
|
||||
action: Types.StackWebhookAction.Refresh,
|
||||
})
|
||||
}
|
||||
loading={createPending}
|
||||
disabled={disabled || createPending}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</ConfigItem>
|
||||
);
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
environment: [
|
||||
{
|
||||
label: "Environment",
|
||||
|
||||
@@ -53,7 +53,8 @@ const StackIcon = ({ id, size }: { id?: string; size: number }) => {
|
||||
|
||||
const ConfigInfoServices = ({ id }: { id: string }) => {
|
||||
const [view, setView] = useLocalStorage("stack-tabs-v1", "Config");
|
||||
const state = useStack(id)?.info.state;
|
||||
const info = useStack(id)?.info;
|
||||
const state = info?.state;
|
||||
const stackDown =
|
||||
state === undefined ||
|
||||
state === Types.StackState.Unknown ||
|
||||
@@ -134,7 +135,7 @@ export const StackComponents: RequiredResourceComponents = {
|
||||
State: ({ id }) => {
|
||||
const state = useStack(id)?.info.state ?? Types.StackState.Unknown;
|
||||
const config = useFullStack(id)?.config;
|
||||
if (!config?.file_contents && !config?.repo) {
|
||||
if (!config?.files_on_host && !config?.file_contents && !config?.repo) {
|
||||
return null;
|
||||
}
|
||||
return <StatusBadge text={state} intent={stack_state_intention(state)} />;
|
||||
@@ -150,7 +151,7 @@ export const StackComponents: RequiredResourceComponents = {
|
||||
},
|
||||
NoConfig: ({ id }) => {
|
||||
const config = useFullStack(id)?.config;
|
||||
if (config?.file_contents || config?.repo) {
|
||||
if (config?.files_on_host || config?.file_contents || config?.repo) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
@@ -200,6 +201,30 @@ export const StackComponents: RequiredResourceComponents = {
|
||||
</HoverCard>
|
||||
);
|
||||
},
|
||||
RemoteErrors: ({ id }) => {
|
||||
const info = useFullStack(id)?.info;
|
||||
const errors = info?.remote_errors;
|
||||
if (!errors || errors.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<HoverCard openDelay={200}>
|
||||
<HoverCardTrigger asChild>
|
||||
<Card className="px-3 py-2 bg-destructive/75 hover:bg-destructive transition-colors cursor-pointer">
|
||||
<div className="text-sm text-nowrap overflow-hidden overflow-ellipsis">
|
||||
Remote Error
|
||||
</div>
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div>
|
||||
There are errors reading the remote file contents. See{" "}
|
||||
<span className="font-bold">Info</span> tab for details.
|
||||
</div>
|
||||
</HoverCardContent>
|
||||
</HoverCard>
|
||||
);
|
||||
},
|
||||
Deployed: ({ id }) => {
|
||||
const info = useStack(id)?.info;
|
||||
const fullInfo = useFullStack(id)?.info;
|
||||
@@ -226,7 +251,7 @@ export const StackComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
@@ -260,7 +285,7 @@ export const StackComponents: RequiredResourceComponents = {
|
||||
</Card>
|
||||
</HoverCardTrigger>
|
||||
<HoverCardContent align="start">
|
||||
<div className="grid">
|
||||
<div className="grid gap-2">
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="w-fit text-muted-foreground"
|
||||
|
||||
@@ -3,6 +3,7 @@ import { ReactNode } from "react";
|
||||
import { Card, CardHeader } from "@ui/card";
|
||||
import { useFullStack, useStack } from ".";
|
||||
import { Types } from "@monitor/client";
|
||||
import { updateLogToHtml } from "@lib/utils";
|
||||
|
||||
export const StackInfo = ({
|
||||
id,
|
||||
@@ -20,7 +21,7 @@ export const StackInfo = ({
|
||||
<Section titleOther={titleOther}>
|
||||
{!is_down && stack?.info?.deployed_contents && (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardHeader className="flex flex-col gap-2">
|
||||
deployed contents:{" "}
|
||||
{stack?.info?.deployed_contents?.map((content, i) => (
|
||||
<pre key={i} className="flex flex-col gap-2">
|
||||
@@ -34,7 +35,7 @@ export const StackInfo = ({
|
||||
|
||||
{stack?.config?.file_contents ? (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardHeader className="flex flex-col gap-2">
|
||||
latest contents:{" "}
|
||||
<pre className="flex flex-col gap-2">
|
||||
defined in UI:
|
||||
@@ -46,7 +47,7 @@ export const StackInfo = ({
|
||||
stack?.info?.remote_contents &&
|
||||
stack?.info?.remote_contents.length > 0 && (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardHeader className="flex flex-col gap-2">
|
||||
latest contents:{" "}
|
||||
{stack?.info?.remote_contents?.map((content, i) => (
|
||||
<pre key={i} className="flex flex-col gap-2">
|
||||
@@ -60,12 +61,17 @@ export const StackInfo = ({
|
||||
)}
|
||||
{stack?.info?.remote_errors && stack?.info?.remote_errors.length > 0 && (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardHeader className="flex flex-col gap-2">
|
||||
remote errors:{" "}
|
||||
{stack?.info?.remote_errors?.map((content, i) => (
|
||||
<pre key={i} className="flex flex-col gap-2">
|
||||
path: {content.path}
|
||||
<pre>{content.contents}</pre>
|
||||
<pre
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: updateLogToHtml(content.contents),
|
||||
}}
|
||||
className="max-h-[500px] overflow-y-auto"
|
||||
/>
|
||||
</pre>
|
||||
))}
|
||||
</CardHeader>
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
import { Section } from "@components/layouts";
|
||||
import {
|
||||
bg_color_class_by_intention,
|
||||
deployment_state_intention,
|
||||
stroke_color_class_by_intention,
|
||||
} from "@lib/color";
|
||||
import { snake_case_to_upper_space_case } from "@lib/formatting";
|
||||
import { useRead } from "@lib/hooks";
|
||||
import { cn } from "@lib/utils";
|
||||
import { DataTable, SortableHeader } from "@ui/data-table";
|
||||
@@ -14,6 +12,7 @@ import { ReactNode } from "react";
|
||||
import { Link } from "react-router-dom";
|
||||
import { Button } from "@ui/button";
|
||||
import { Layers2 } from "lucide-react";
|
||||
import { StatusBadge } from "@components/util";
|
||||
|
||||
export const StackServices = ({
|
||||
id,
|
||||
@@ -23,7 +22,11 @@ export const StackServices = ({
|
||||
titleOther: ReactNode;
|
||||
}) => {
|
||||
const state = useStack(id)?.info.state ?? Types.StackState.Unknown;
|
||||
const services = useRead("ListStackServices", { stack: id }).data;
|
||||
const services = useRead(
|
||||
"ListStackServices",
|
||||
{ stack: id },
|
||||
{ refetchInterval: 5000 }
|
||||
).data;
|
||||
if (
|
||||
!services ||
|
||||
services.length === 0 ||
|
||||
@@ -80,20 +83,11 @@ export const StackServices = ({
|
||||
),
|
||||
cell: ({ row }) => {
|
||||
const state = row.original.container?.state;
|
||||
const color = bg_color_class_by_intention(
|
||||
deployment_state_intention(state)
|
||||
);
|
||||
return (
|
||||
<p
|
||||
className={cn(
|
||||
"p-1 w-fit text-[10px] text-white rounded-md",
|
||||
color
|
||||
)}
|
||||
>
|
||||
{snake_case_to_upper_space_case(
|
||||
state ?? "Unknown"
|
||||
).toUpperCase()}
|
||||
</p>
|
||||
<StatusBadge
|
||||
text={state}
|
||||
intent={deployment_state_intention(state)}
|
||||
/>
|
||||
);
|
||||
},
|
||||
// size: 120,
|
||||
|
||||
@@ -28,7 +28,8 @@ import { Link } from "react-router-dom";
|
||||
import { fmt_duration, fmt_operation, fmt_version } from "@lib/formatting";
|
||||
import {
|
||||
cn,
|
||||
sanitizeOnlySpan,
|
||||
is_service_user,
|
||||
updateLogToHtml,
|
||||
usableResourcePath,
|
||||
version_is_none,
|
||||
} from "@lib/utils";
|
||||
@@ -50,12 +51,7 @@ export const UpdateUser = ({
|
||||
defaultAvatar?: boolean;
|
||||
muted?: boolean;
|
||||
}) => {
|
||||
if (
|
||||
user_id === "Procedure" ||
|
||||
user_id === "Github" ||
|
||||
user_id === "Auto Redeploy" ||
|
||||
user_id === "Resource Sync"
|
||||
) {
|
||||
if (is_service_user(user_id)) {
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
@@ -275,7 +271,7 @@ const UpdateDetailsContent = ({
|
||||
<CardDescription>stdout</CardDescription>
|
||||
<pre
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: sanitizeOnlySpan(log.stdout),
|
||||
__html: updateLogToHtml(log.stdout),
|
||||
}}
|
||||
className="max-h-[500px] overflow-y-auto"
|
||||
/>
|
||||
@@ -286,7 +282,7 @@ const UpdateDetailsContent = ({
|
||||
<CardDescription>stderr</CardDescription>
|
||||
<pre
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: sanitizeOnlySpan(log.stderr),
|
||||
__html: updateLogToHtml(log.stderr),
|
||||
}}
|
||||
className="max-h-[500px] overflow-y-auto"
|
||||
/>
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
body {
|
||||
@apply bg-background text-foreground;
|
||||
font-family: Inter;
|
||||
overflow-y: hidden;
|
||||
}
|
||||
pre {
|
||||
@apply bg-card text-card-foreground border rounded-xl min-h-full text-xs p-4 whitespace-pre-wrap scroll-m-4 break-all;
|
||||
|
||||
@@ -112,6 +112,16 @@ export const sanitizeOnlySpan = (log: string) => {
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Converts the ansi colors in an Update log to html.
|
||||
* sanitizes incoming log first for any eg. script tags.
|
||||
* @param log incoming log string
|
||||
*/
|
||||
export const updateLogToHtml = (log: string) => {
|
||||
if (!log) return "No log.";
|
||||
return convert.toHtml(sanitizeOnlySpan(log));
|
||||
};
|
||||
|
||||
const convert = new Convert();
|
||||
/**
|
||||
* Converts the ansi colors in log to html.
|
||||
@@ -197,3 +207,17 @@ export const sync_no_changes = (sync: Types.ResourceSync) => {
|
||||
!pending.data.user_group_updates
|
||||
);
|
||||
};
|
||||
|
||||
export const is_service_user = (user_id: string) => {
|
||||
return (
|
||||
user_id === "System" ||
|
||||
user_id === "Procedure" ||
|
||||
user_id === "Github" ||
|
||||
user_id === "Git Webhook" ||
|
||||
user_id === "Auto Redeploy" ||
|
||||
user_id === "Resource Sync" ||
|
||||
user_id === "Stack Wizard" ||
|
||||
user_id === "Build Manager" ||
|
||||
user_id === "Repo Manager"
|
||||
);
|
||||
};
|
||||
|
||||
@@ -51,9 +51,13 @@ export const Dashboard = () => {
|
||||
};
|
||||
|
||||
const ResourceRow = ({ type }: { type: UsableResource }) => {
|
||||
const recents = useUser().data?.recents?.[type]?.slice(0, 6);
|
||||
const resources = useRead(`List${type}s`, {})
|
||||
.data?.filter((r) => !recents?.includes(r.id))
|
||||
const _recents = useUser().data?.recents?.[type]?.slice(0, 6);
|
||||
const _resources = useRead(`List${type}s`, {}).data;
|
||||
const recents = _recents?.filter(
|
||||
(recent) => !_resources?.every((resource) => resource.id !== recent)
|
||||
);
|
||||
const resources = _resources
|
||||
?.filter((r) => !recents?.includes(r.id))
|
||||
.map((r) => r.id);
|
||||
const ids = [
|
||||
...(recents ?? []),
|
||||
@@ -91,7 +95,7 @@ const ResourceRow = ({ type }: { type: UsableResource }) => {
|
||||
type={type}
|
||||
id={id}
|
||||
className={
|
||||
i > 4
|
||||
i > 3
|
||||
? "hidden 2xl:flex"
|
||||
: i > 1
|
||||
? "hidden sm:flex lg:hidden xl:flex"
|
||||
|
||||
@@ -129,7 +129,7 @@ const ResourceHeader = ({ type, id }: { type: UsableResource; id: string }) => {
|
||||
</div>
|
||||
|
||||
<div className="flex items-center gap-2">
|
||||
<p className="text-sm text-muted-foreground">Description: </p>
|
||||
{/* <p className="text-sm text-muted-foreground">Description: </p> */}
|
||||
<ResourceDescription type={type} id={id} disabled={!canWrite} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -20,3 +20,4 @@ opentelemetry_sdk.workspace = true
|
||||
opentelemetry-otlp.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
opentelemetry-semantic-conventions.workspace = true
|
||||
@@ -1,46 +1,58 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::entities::logger::{LogConfig, StdioLogMode};
|
||||
use tracing::level_filters::LevelFilter;
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
use tracing_subscriber::{
|
||||
layer::SubscriberExt, util::SubscriberInitExt,
|
||||
layer::SubscriberExt, util::SubscriberInitExt, Registry,
|
||||
};
|
||||
|
||||
mod opentelemetry;
|
||||
mod otel;
|
||||
|
||||
pub fn init(config: &LogConfig) -> anyhow::Result<()> {
|
||||
let log_level: tracing::Level = config.level.into();
|
||||
|
||||
let registry =
|
||||
tracing_subscriber::registry().with(LevelFilter::from(log_level));
|
||||
Registry::default().with(LevelFilter::from(log_level));
|
||||
|
||||
match (config.stdio, &config.otlp_endpoint) {
|
||||
(StdioLogMode::Standard, Some(endpoint)) => registry
|
||||
.with(tracing_subscriber::fmt::layer())
|
||||
.with(opentelemetry::layer(
|
||||
(StdioLogMode::Standard, Some(endpoint)) => {
|
||||
let tracer = otel::tracer(
|
||||
endpoint,
|
||||
config.opentelemetry_service_name.clone(),
|
||||
))
|
||||
.try_init(),
|
||||
(StdioLogMode::Json, Some(endpoint)) => registry
|
||||
.with(tracing_subscriber::fmt::layer().json())
|
||||
.with(opentelemetry::layer(
|
||||
);
|
||||
registry
|
||||
.with(tracing_subscriber::fmt::layer())
|
||||
.with(OpenTelemetryLayer::new(tracer))
|
||||
.try_init()
|
||||
}
|
||||
|
||||
(StdioLogMode::Json, Some(endpoint)) => {
|
||||
let tracer = otel::tracer(
|
||||
endpoint,
|
||||
config.opentelemetry_service_name.clone(),
|
||||
))
|
||||
.try_init(),
|
||||
(StdioLogMode::None, Some(endpoint)) => registry
|
||||
.with(opentelemetry::layer(
|
||||
);
|
||||
registry
|
||||
.with(tracing_subscriber::fmt::layer().json())
|
||||
.with(OpenTelemetryLayer::new(tracer))
|
||||
.try_init()
|
||||
}
|
||||
|
||||
(StdioLogMode::None, Some(endpoint)) => {
|
||||
let tracer = otel::tracer(
|
||||
endpoint,
|
||||
config.opentelemetry_service_name.clone(),
|
||||
))
|
||||
.try_init(),
|
||||
);
|
||||
registry.with(OpenTelemetryLayer::new(tracer)).try_init()
|
||||
}
|
||||
|
||||
(StdioLogMode::Standard, None) => {
|
||||
registry.with(tracing_subscriber::fmt::layer()).try_init()
|
||||
}
|
||||
|
||||
(StdioLogMode::Json, None) => registry
|
||||
.with(tracing_subscriber::fmt::layer().json())
|
||||
.try_init(),
|
||||
|
||||
(StdioLogMode::None, None) => Ok(()),
|
||||
}
|
||||
.context("failed to init logger")
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_sdk::{
|
||||
trace::{self, RandomIdGenerator, Sampler, Tracer},
|
||||
Resource,
|
||||
};
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
|
||||
pub fn layer<S>(
|
||||
endpoint: &str,
|
||||
service_name: String,
|
||||
) -> OpenTelemetryLayer<S, Tracer>
|
||||
where
|
||||
S: tracing::Subscriber,
|
||||
for<'span> S: tracing_subscriber::registry::LookupSpan<'span>,
|
||||
{
|
||||
let tracer = opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(
|
||||
opentelemetry_otlp::new_exporter()
|
||||
.tonic()
|
||||
.with_endpoint(endpoint)
|
||||
.with_timeout(Duration::from_secs(3)),
|
||||
)
|
||||
.with_trace_config(
|
||||
trace::config()
|
||||
.with_sampler(Sampler::AlwaysOn)
|
||||
.with_id_generator(RandomIdGenerator::default())
|
||||
.with_resource(Resource::new(vec![KeyValue::new(
|
||||
"service.name",
|
||||
service_name,
|
||||
)])),
|
||||
)
|
||||
.install_batch(opentelemetry_sdk::runtime::Tokio)
|
||||
.expect("failed to init opentelemetry tracer");
|
||||
tracing_opentelemetry::layer().with_tracer(tracer)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user