Compare commits

..

102 Commits

Author SHA1 Message Date
mbecker20
d99cf87da0 update client to 1.12 2024-07-29 18:36:42 -07:00
mbecker20
8e19eb7b0f versions 2024-07-29 18:33:14 -07:00
mbecker20
78a0b56c73 migrator readme 2024-07-29 18:32:11 -07:00
mbecker20
bf5dc52237 fix upgrades docs 2024-07-29 18:29:49 -07:00
mbecker20
482ea59d4c add docsite upgrades 2024-07-29 18:28:12 -07:00
Maxwell Becker
7740d36f49 v1.12 Custom Git Providers / Docker Registries (#8)
* update deps

* remove patch when 0 for deployments using specific build version

* implement custom git provider and image registry support

* common providers api

* toml array alias

* username alias account

* get fe to build

* http or https

* fix frontend build

* improve registry / provider config

* frontend build

* rework deployment / builds image registry

* frontend builds

* update build config fe

* configure builder additional accounts / secrets

* guard against managing non-github repo webhooks

* fmt

* md size dashboard

* lowercase organization in image name

* update config docs

* update example env

* provider configuration

* distribute migrator

* fix casing mismatch

* docs
2024-07-29 18:23:58 -07:00
mbecker20
820754deda roadmap 2024-07-24 00:11:58 -07:00
mbecker20
4219884198 roadmapx 2024-07-24 00:11:08 -07:00
mbecker20
d9e24cc35a add roadmap 2024-07-24 00:10:32 -07:00
mbecker20
8d2ce884d9 1.11.1 updated hetzner instances 2024-07-20 02:49:38 -07:00
mbecker20
313b000e64 update hetzner server types 2024-07-20 01:16:52 -07:00
mbecker20
c2f9e29605 close failed procedure execution updates 2024-07-19 23:21:21 -07:00
Maxwell Becker
8c6f38cafb v1.11 Improve permission management (#6)
* add "all permissions" feature on user and user group schema

* prepare support for group all

* implement user.all and user_group.all for broad base permissioning

* clean up unused deps

* sync support user group permissions regex

* 1.11

* fix fe ? issue

* this doesn't work

* sync handle user group all set

* retain above non earlier

* remove permissions that already exist

* update docs

* add user group docs

* minimize user group permissions for execute

* sync toml

* add sync name to slack alert title

* add syncs to alerter white/blacklist

* use \\ instead of $reg

* share resource type base permissions api users and user groups

* manage user / group base permissions ui

* manage user / group base resource type permissions

* update api permission handling

* manage all resource permissions in table

* user show group membership

* update client to 1.11
2024-07-19 02:11:36 -07:00
mbecker20
4a03eba99a granular invalidations 2024-07-17 14:51:51 -07:00
mbecker20
79fe078e3b 1.10.5 cpu/mem only update alert if severity increases (or resolved) 2024-07-17 14:36:22 -07:00
mbecker20
6be032fcd4 update client to 1.10.4 2024-07-16 16:06:38 -07:00
mbecker20
d0c94278ec 1.10.4 fix EnvVar parsing when value contains '=' 2024-07-16 16:05:11 -07:00
mbecker20
03ae7268fd fix server table search when sorting by deployments 2024-07-10 12:09:42 -07:00
mbecker20
f443294818 add clear link to api docs 2024-07-10 02:33:14 -07:00
mbecker20
2202835d86 improve core setup docs 2024-07-10 02:26:58 -07:00
mbecker20
98fbc7a506 improve migrator and add Dockerfile 2024-07-10 02:25:44 -07:00
mbecker20
8ee89296e1 frontend only invalidate on update Complete 2024-07-09 13:50:03 -07:00
mbecker20
989c3d2d01 more compact webhook button labels 2024-07-09 02:26:50 -07:00
mbecker20
dc72883b90 update config example 2024-07-09 02:09:17 -07:00
mbecker20
e99364430f update local client version 2024-07-09 02:06:30 -07:00
mbecker20
e106e38cd9 1.10.3 support multiple github webhook app installations 2024-07-09 02:05:38 -07:00
mbecker20
e4d0c56e49 debug git logs 2024-07-09 00:50:24 -07:00
mbecker20
7427a158f4 full err too large for alert 2024-07-09 00:40:11 -07:00
mbecker20
b926f89954 log on build unsuccessful and alerting 2024-07-09 00:20:03 -07:00
mbecker20
e666a22f08 debug instrument git calls 2024-07-09 00:09:06 -07:00
mbecker20
4107f779a5 fix build increment major version 2024-07-08 13:15:52 -07:00
mbecker20
828d6cdfed improve responsive 2024-07-05 20:19:20 -07:00
mbecker20
fe82400a99 1.10.2 ResourceSync manage repo webhooks 2024-07-05 20:02:20 -07:00
mbecker20
e37fc6adde publish 1.10.1 2024-07-05 03:32:24 -07:00
mbecker20
c21c8f99ae manage webhooks working 2024-07-05 03:29:23 -07:00
mbecker20
78a63f92bb build repo webhook management 2024-07-05 03:17:29 -07:00
mbecker20
ce67655021 core info provide owners 2024-07-05 02:26:18 -07:00
mbecker20
2ccecf38f2 default pk path /github/private-key.pem 2024-07-05 02:15:35 -07:00
mbecker20
1ddae31aad update config example 2024-07-05 02:06:27 -07:00
mbecker20
097fbefa63 1.10.1 2024-07-05 02:02:59 -07:00
mbecker20
b51442a661 ts types 2024-07-05 02:02:25 -07:00
mbecker20
a21d49d224 build / repo webhook write api 2024-07-05 02:02:03 -07:00
mbecker20
c99a33880e Create / Delete webhook api 2024-07-05 01:31:15 -07:00
mbecker20
6ee55262ba webhook management api aware if repo can be managed 2024-07-05 01:18:21 -07:00
mbecker20
878b9b55bb see whether webhooks enabled 2024-07-05 01:05:27 -07:00
mbecker20
af6193f83a update async_timing_util 2024-07-04 21:15:38 -07:00
mbecker20
b8fefddd8b EC2 2024-07-04 19:13:49 -07:00
mbecker20
7f490f5bf2 tweak 2024-07-04 19:12:02 -07:00
mbecker20
efa7c13286 docs 2024-07-04 19:08:48 -07:00
mbecker20
f913be7a0b builder setup guide 2024-07-04 19:03:43 -07:00
mbecker20
35901ef7ea actions can wrap 2024-07-04 17:53:24 -07:00
mbecker20
5b938490fc response 2024-07-04 17:29:45 -07:00
mbecker20
a7326a0116 user group toml export replace target ids with names 2024-07-04 17:10:36 -07:00
mbecker20
877bda91d7 improve log responsiveness 2024-07-04 16:49:08 -07:00
mbecker20
439a091e50 improve resource responsive 2024-07-04 16:29:13 -07:00
mbecker20
b0e89f4963 fix dashboard 2024-07-04 15:46:43 -07:00
mbecker20
b1e4b55ba1 more responsive 2024-07-04 14:41:40 -07:00
mbecker20
d4a1891c70 delete user group 2024-07-04 14:17:03 -07:00
mbecker20
9db7592d7e all_resources tables use right search 2024-07-04 01:25:40 -07:00
mbecker20
84fb603951 1.10 2024-07-01 03:18:26 -07:00
mbecker20
55bac0dd13 check right thing for empty 2024-07-01 03:12:22 -07:00
mbecker20
b143f42363 update mungos 2024-07-01 02:47:06 -07:00
mbecker20
007efd136a 1.10.0 pre 2024-07-01 02:38:24 -07:00
mbecker20
b329767f9e 1.10.0-pre-0 2024-07-01 02:33:01 -07:00
mbecker20
b4231957d5 config for secret args 2024-07-01 02:31:53 -07:00
mbecker20
b4dc446f95 interpolate core variables / secrets into build secret_args 2024-07-01 02:27:03 -07:00
mbecker20
c92515cecc combine into router 2024-07-01 01:44:07 -07:00
mbecker20
f3712feea2 finish periphery clean 2024-07-01 01:39:03 -07:00
mbecker20
0e81d17860 shrink periphery implementation 2024-07-01 01:19:25 -07:00
mbecker20
c3f1557b83 fix mem alert 2024-06-30 00:27:37 -07:00
mbecker20
5f88e4b436 seperate webhook actions 2024-06-25 01:22:38 -07:00
mbecker20
473c6b3867 dont send failed build alert on build cancel 2024-06-24 16:59:34 -07:00
mbecker20
c10edaa5d1 fix builder toml export 2024-06-23 03:00:31 -07:00
mbecker20
9418a6d963 update client to 1.9.0 2024-06-23 02:30:50 -07:00
mbecker20
57646b750f clean up 2024-06-23 02:29:47 -07:00
mbecker20
0d57f9411c can deploy ecr 2024-06-23 02:27:19 -07:00
mbecker20
7d396dd539 clean up ecr 2024-06-23 02:22:14 -07:00
mbecker20
bfe762b71a install unzip 2024-06-23 01:37:12 -07:00
mbecker20
16ede84bac install aws cli core 2024-06-23 01:31:15 -07:00
mbecker20
4524db94db get ecr token using cli 2024-06-23 01:23:56 -07:00
mbecker20
580dab4acd improve error log formatting 2024-06-23 01:02:52 -07:00
mbecker20
645382856a update only flattens one level deep 2024-06-22 23:56:01 -07:00
mbecker20
5c4e6a6dbb select aws config 2024-06-22 23:33:35 -07:00
mbecker20
66810e1efb add method to get availabel aws ecr labels 2024-06-22 23:29:02 -07:00
mbecker20
69a84882f0 1.9.0 2024-06-22 23:06:53 -07:00
mbecker20
41648436a5 default periphery method fields 2024-06-22 22:59:51 -07:00
mbecker20
083a88aa7b implement aws ecr image registry 2024-06-22 22:57:26 -07:00
mbecker20
750f95c90d improve shortcut menu 2024-06-22 18:24:38 -07:00
mbecker20
129f3ecd82 add more kb shortcuts and shortcut menu 2024-06-22 02:56:57 -07:00
mbecker20
1b754f80ab fix double emojis 2024-06-22 01:54:45 -07:00
mbecker20
968a882012 fix alerter table 2024-06-22 01:29:31 -07:00
mbecker20
696ebdb26f label blacklist correctly 2024-06-22 01:25:38 -07:00
mbecker20
8fee04607d imporve slack alerting 2024-06-22 01:10:13 -07:00
mbecker20
6fe250244b add alerter blacklist 2024-06-22 00:30:43 -07:00
mbecker20
b530af0eec send_alerts for sync alert 2024-06-21 23:09:38 -07:00
mbecker20
21e9361079 remove unused 2024-06-21 02:28:35 -07:00
mbecker20
524d2d956b fix alerts usage 2024-06-21 02:23:42 -07:00
mbecker20
aca9633941 add links and errors to slack messages 2024-06-21 01:12:46 -07:00
mbecker20
32e1bd2dda add badges for tag filter shortcuts 2024-06-21 00:15:40 -07:00
mbecker20
cb363d1559 add shift + T and shift + C to manage tags 2024-06-20 23:51:12 -07:00
mbecker20
63eb74b9c8 Add and configure build alerts 2024-06-20 23:41:28 -07:00
mbecker20
bbcc27704f bump rust builder version 2024-06-16 16:00:57 -07:00
209 changed files with 9854 additions and 4192 deletions

804
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.8.0"
version = "1.12.0"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -15,7 +15,7 @@ monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.8.0"
monitor_client = "1.12.0"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
@@ -24,24 +24,24 @@ git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.4.3", default-features = false }
serror = { version = "0.4.6", default-features = false }
slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
async_timing_util = "0.1.14"
async_timing_util = "1.0.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
mongo_indexed = "1.0.0"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
mungos = "1.0.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
tokio = { version = "1.38.1", features = ["full"] }
reqwest = { version = "0.12.5", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
@@ -51,18 +51,18 @@ axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.23.0"
tokio-tungstenite = "0.23.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.117"
toml = "0.8.13"
serde = { version = "1.0.204", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.120"
toml = "0.8.15"
# ERROR
anyhow = "1.0.86"
thiserror = "1.0.61"
thiserror = "1.0.63"
# LOGGING
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
@@ -73,13 +73,14 @@ opentelemetry = "0.23.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.4", features = ["derive"] }
clap = { version = "4.5.9", features = ["derive"] }
dotenv = "0.15.0"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.10.0", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
@@ -90,14 +91,18 @@ hex = "0.4.3"
# SYSTEM
bollard = "0.16.1"
sysinfo = "0.30.12"
sysinfo = "0.30.13"
# CLOUD
aws-config = "1.5.0"
aws-sdk-ec2 = "1.46.0"
aws-config = "1.5.4"
aws-sdk-ec2 = "1.60.0"
aws-sdk-ecr = "1.35.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.3"
octorust = "0.7.0"
colored = "2.1.0"
bson = "2.10.0"
regex = "1.10.5"
bson = "2.11.0"

View File

@@ -20,7 +20,7 @@ pub fn name_to_build() -> &'static HashMap<String, Build> {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.expect("failed to get builds")
.into_iter()
.map(|build| (build.name.clone(), build))
.collect()
@@ -34,7 +34,7 @@ pub fn id_to_build() -> &'static HashMap<String, Build> {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.expect("failed to get builds")
.into_iter()
.map(|build| (build.id.clone(), build))
.collect()
@@ -48,7 +48,7 @@ pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.expect("failed to get deployments")
.into_iter()
.map(|deployment| (deployment.name.clone(), deployment))
.collect()
@@ -62,7 +62,7 @@ pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.expect("failed to get deployments")
.into_iter()
.map(|deployment| (deployment.id.clone(), deployment))
.collect()
@@ -76,7 +76,7 @@ pub fn name_to_server() -> &'static HashMap<String, Server> {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.expect("failed to get servers")
.into_iter()
.map(|server| (server.name.clone(), server))
.collect()
@@ -90,7 +90,7 @@ pub fn id_to_server() -> &'static HashMap<String, Server> {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.expect("failed to get servers")
.into_iter()
.map(|server| (server.id.clone(), server))
.collect()
@@ -104,7 +104,7 @@ pub fn name_to_builder() -> &'static HashMap<String, Builder> {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.expect("failed to get builders")
.into_iter()
.map(|builder| (builder.name.clone(), builder))
.collect()
@@ -118,7 +118,7 @@ pub fn id_to_builder() -> &'static HashMap<String, Builder> {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.expect("failed to get builders")
.into_iter()
.map(|builder| (builder.id.clone(), builder))
.collect()
@@ -132,7 +132,7 @@ pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.expect("failed to get alerters")
.into_iter()
.map(|alerter| (alerter.name.clone(), alerter))
.collect()
@@ -146,7 +146,7 @@ pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.expect("failed to get alerters")
.into_iter()
.map(|alerter| (alerter.id.clone(), alerter))
.collect()
@@ -160,7 +160,7 @@ pub fn name_to_repo() -> &'static HashMap<String, Repo> {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.expect("failed to get repos")
.into_iter()
.map(|repo| (repo.name.clone(), repo))
.collect()
@@ -174,7 +174,7 @@ pub fn id_to_repo() -> &'static HashMap<String, Repo> {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.expect("failed to get repos")
.into_iter()
.map(|repo| (repo.id.clone(), repo))
.collect()
@@ -188,7 +188,7 @@ pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.expect("failed to get procedures")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
@@ -202,7 +202,7 @@ pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.expect("failed to get procedures")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
@@ -218,7 +218,7 @@ pub fn name_to_server_template(
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.expect("failed to get server templates")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
@@ -234,7 +234,7 @@ pub fn id_to_server_template(
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.expect("failed to get server templates")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
@@ -249,7 +249,7 @@ pub fn name_to_resource_sync(
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.expect("failed to get syncs")
.into_iter()
.map(|sync| (sync.name.clone(), sync))
.collect()
@@ -264,7 +264,7 @@ pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.expect("failed to get syncs")
.into_iter()
.map(|sync| (sync.id.clone(), sync))
.collect()
@@ -278,7 +278,7 @@ pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
futures::executor::block_on(
monitor_client().read(read::ListUserGroups::default()),
)
.expect("failed to get user groups from monitor")
.expect("failed to get user groups")
.into_iter()
.map(|user_group| (user_group.name.clone(), user_group))
.collect()
@@ -292,8 +292,7 @@ pub fn name_to_variable() -> &'static HashMap<String, Variable> {
futures::executor::block_on(
monitor_client().read(read::ListVariables::default()),
)
.expect("failed to get user groups from monitor")
.variables
.expect("failed to get variables")
.into_iter()
.map(|variable| (variable.name.clone(), variable))
.collect()
@@ -307,7 +306,7 @@ pub fn id_to_user() -> &'static HashMap<String, User> {
futures::executor::block_on(
monitor_client().read(read::ListUsers::default()),
)
.expect("failed to get users from monitor")
.expect("failed to get users")
.into_iter()
.map(|user| (user.id.clone(), user))
.collect()
@@ -320,7 +319,7 @@ pub fn id_to_tag() -> &'static HashMap<String, Tag> {
futures::executor::block_on(
monitor_client().read(read::ListTags::default()),
)
.expect("failed to get tags from monitor")
.expect("failed to get tags")
.into_iter()
.map(|tag| (tag.id.clone(), tag))
.collect()

View File

@@ -84,7 +84,7 @@ impl ResourceSync for Deployment {
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: version.clone(),
version: *version,
};
}

View File

@@ -29,6 +29,7 @@ derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
parse_csl.workspace = true
mungos.workspace = true
slack.workspace = true
@@ -37,15 +38,18 @@ svi.workspace = true
ordered_hash_map.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-sdk-ecr.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
typeshare.workspace = true
octorust.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenv.workspace = true
bcrypt.workspace = true
@@ -54,6 +58,7 @@ tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,5 +1,5 @@
# Build Core
FROM rust:1.78.0-bookworm as core-builder
FROM rust:1.79.0-bookworm as core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
@@ -16,8 +16,11 @@ RUN cd frontend && yarn link @monitor/client && yarn && yarn build
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git ca-certificates
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install
# Copy
COPY ./config_example/core.config.example.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /

View File

@@ -1,21 +1,24 @@
use std::{collections::HashSet, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::muted;
use formatting::{format_serror, muted};
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
entities::{
alert::{Alert, AlertData},
all_logs_success,
build::{Build, CloudRegistryConfig, ImageRegistry},
build::{Build, ImageRegistry, StandardRegistryConfig},
builder::{AwsBuilderConfig, Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
permission::PermissionLevel,
server::Server,
server::{stats::SeverityLevel, Server},
server_template::aws::AwsServerTemplateConfig,
to_monitor_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
},
@@ -33,19 +36,22 @@ use periphery_client::{
PeripheryClient,
};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
ecr,
},
BuildCleanupData,
},
config::core_config,
helpers::{
alert::send_alerts,
channel::build_cancel_channel,
periphery_client,
query::{get_deployment_state, get_global_variables},
@@ -73,9 +79,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
)
.await?;
let registry_token =
validate_account_extract_registry_token(&build)?;
// get the action state for the build (or insert default).
let action_state =
action_states().build.get_or_insert_default(&build.id).await;
@@ -85,8 +88,11 @@ impl Resolve<RunBuild, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.building = true)?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
build.config.version.increment();
update.version = build.config.version.clone();
update.version = build.config.version;
update_update(update.clone()).await?;
let cancel = CancellationToken::new();
@@ -137,9 +143,12 @@ impl Resolve<RunBuild, (User, Update)> for State {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
serialize_error_pretty(&e),
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update).await;
return handle_early_return(
update, build.id, build.name, false,
)
.await;
}
};
@@ -147,17 +156,26 @@ impl Resolve<RunBuild, (User, Update)> for State {
let variables = get_global_variables().await?;
// CLONE REPO
let github_token = core_config
.github_accounts
.get(&build.config.github_account)
.cloned();
let git_token = core_config
.git_providers
.iter()
.find(|provider| provider.domain == build.config.git_provider)
.and_then(|provider| {
build.config.git_https = provider.https;
provider
.accounts
.iter()
.find(|account| {
account.username == build.config.git_account
})
.map(|account| account.token.clone())
});
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
github_token,
git_token,
}) => res,
_ = cancel.cancelled() => {
info!("build cancelled during clone, cleaning up builder");
@@ -165,7 +183,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -176,8 +194,10 @@ impl Resolve<RunBuild, (User, Update)> for State {
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update
.push_error_log("clone repo", serialize_error_pretty(&e));
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
}
}
@@ -187,6 +207,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut secret_replacers_for_log = HashSet::new();
// Interpolate into build args
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
@@ -205,10 +228,40 @@ impl Resolve<RunBuild, (User, Update)> for State {
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.iter().map(|(_, variable)| variable.clone()),
);
secret_replacers.extend(more_replacers);
arg.value = res;
}
// Interpolate into secret args
for arg in &mut build.config.secret_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.into_iter().map(|(_, variable)| variable),
);
// Secret args don't need to be in replacers sent to periphery.
// The secret args don't end up in the command like build args do.
arg.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
@@ -220,12 +273,12 @@ impl Resolve<RunBuild, (User, Update)> for State {
.join("\n"),
);
}
if !secret_replacers.is_empty() {
if !secret_replacers_for_log.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
secret_replacers_for_log
.into_iter()
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
@@ -236,6 +289,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
.request(api::build::Build {
build: build.clone(),
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
@@ -243,7 +297,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -254,7 +308,10 @@ impl Resolve<RunBuild, (User, Update)> for State {
}
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log("build", serialize_error_pretty(&e))
update.push_error_log(
"build",
format_serror(&e.context("failed to build").into()),
)
}
};
}
@@ -275,7 +332,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
"info.last_built_at": monitor_timestamp(),
}
},
None,
)
.await;
}
@@ -307,7 +363,26 @@ impl Resolve<RunBuild, (User, Update)> for State {
// don't hold response up for user
tokio::spawn(async move {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
version,
},
};
send_alerts(&[alert]).await
});
}
@@ -318,6 +393,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
@@ -335,6 +413,27 @@ async fn handle_early_return(
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build_id,
name: build_name,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
@@ -348,24 +447,28 @@ pub async fn validate_cancel_build(
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
db.updates
.find_one(doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
@@ -637,20 +740,12 @@ async fn handle_post_build_redeploy(build_id: &str) {
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
for res in join_all(futures).await {
let Some((id, res)) = res else {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures
.push(format!("{id}: {}", serialize_error_pretty(&e))),
};
if let Err(e) = res {
warn!("failed post build redeploy for deployment {id}: {e:#}");
}
}
}
@@ -688,30 +783,73 @@ fn start_aws_builder_log(
}
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token matching requirements (otherwise it is left to periphery)
fn validate_account_extract_registry_token(
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<Option<String>> {
match &build.config.image_registry {
ImageRegistry::None(_) => Ok(None),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use DockerHub image registry"
));
}
Ok(core_config().docker_accounts.get(account).cloned())
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
let (domain, account) = match &build.config.image_registry {
// Early return for None
ImageRegistry::None(_) => return Ok((None, None)),
// Early return for AwsEcr
ImageRegistry::AwsEcr(label) => {
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| &reg.label == label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
access_key_id,
secret_access_key,
..
}) => {
let token = ecr::get_ecr_token(
region,
access_key_id,
secret_access_key,
)
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_monitor_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,
)
.await
.context("failed to create aws ecr repo")?;
Some(token)
}
None => None,
};
return Ok((token, config.map(AwsEcrConfig::from)));
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use GithubContainerRegistry"
));
}
Ok(core_config().github_accounts.get(account).cloned())
}
ImageRegistry::Custom(_) => todo!(),
ImageRegistry::Standard(StandardRegistryConfig {
domain,
account,
..
}) => (domain.as_str(), account),
};
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use registry provider {domain}"
));
}
Ok((
core_config()
.docker_registries
.iter()
.find(|provider| provider.domain == domain)
.and_then(|provider| {
provider
.accounts
.iter()
.find(|_account| &_account.username == account)
.map(|account| account.token.clone())
}),
None,
))
}

View File

@@ -1,12 +1,16 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::*,
entities::{
build::{Build, ImageRegistry},
deployment::{Deployment, DeploymentImage},
config::core::AwsEcrConfig,
deployment::{
extract_registry_domain, Deployment, DeploymentImage,
},
get_image_name,
permission::PermissionLevel,
server::ServerState,
@@ -18,9 +22,9 @@ use monitor_client::{
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
periphery_client,
@@ -78,33 +82,101 @@ impl Resolve<Deploy, (User, Update)> for State {
let periphery = periphery_client(&server)?;
// This block gets the version of the image to deploy in the Build case.
// It also gets the name of the image from the build and attaches it directly.
let version = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build);
let version = if version.is_none() {
build.config.version
} else {
version
};
// replace image with corresponding build image.
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version}"),
};
// set image registry to match build docker account if it's not overridden by deployment
if matches!(
&deployment.config.image_registry,
ImageRegistry::None(_)
) {
deployment.config.image_registry =
build.config.image_registry;
let (version, registry_token, aws_ecr) =
match &deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(build_id).await?;
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.iter()
.find(|reg| &reg.label == label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
*version
};
// Remove ending patch if it is 0, this means use latest patch.
let version_str = if version.patch == 0 {
format!("{}.{}", version.major, version.minor)
} else {
version.to_string()
};
// replace image with corresponding build image.
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version_str}"),
};
match build.config.image_registry {
ImageRegistry::None(_) => (version, None, None),
ImageRegistry::AwsEcr(label) => {
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| reg.label == label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
let token = ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?;
(version, Some(token), Some(AwsEcrConfig::from(config)))
}
ImageRegistry::Standard(params) => {
if deployment.config.image_registry_account.is_empty() {
deployment.config.image_registry_account =
params.account
}
let token = core_config()
.docker_registries
.iter()
.find(|registry| registry.domain == params.domain)
.and_then(|provider| {
provider
.accounts
.iter()
.find(|account| {
account.username
== deployment.config.image_registry_account
})
.map(|account| account.token.clone())
});
(version, token, None)
}
}
}
version
}
DeploymentImage::Image { .. } => Version::default(),
};
DeploymentImage::Image { image } => {
let domain = extract_registry_domain(image)?;
let token =
(!deployment.config.image_registry_account.is_empty())
.then(|| {
core_config()
.docker_registries
.iter()
.find(|registry| registry.domain == domain)
.and_then(|provider| {
provider
.accounts
.iter()
.find(|account| {
account.username
== deployment.config.image_registry_account
})
.map(|account| account.token.clone())
})
})
.flatten();
(Version::default(), token, None)
}
};
let variables = get_global_variables().await?;
let core_config = core_config();
@@ -161,25 +233,13 @@ impl Resolve<Deploy, (User, Update)> for State {
update.version = version;
update_update(update.clone()).await?;
let registry_token = match &deployment.config.image_registry {
ImageRegistry::None(_) => None,
ImageRegistry::DockerHub(params) => {
core_config.docker_accounts.get(&params.account).cloned()
}
ImageRegistry::Ghcr(params) => {
core_config.github_accounts.get(&params.account).cloned()
}
ImageRegistry::Custom(_) => {
return Err(anyhow!("Custom ImageRegistry not yet supported"))
}
};
match periphery
.request(api::container::Deploy {
deployment,
stop_signal,
stop_time,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
})
.await
@@ -188,7 +248,9 @@ impl Resolve<Deploy, (User, Update)> for State {
Err(e) => {
update.push_error_log(
"deploy container",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to deploy container").into(),
),
);
}
};
@@ -248,9 +310,10 @@ impl Resolve<StartContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("start container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
@@ -318,9 +381,10 @@ impl Resolve<StopContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
@@ -403,9 +467,11 @@ impl Resolve<StopAllContainers, (User, Update)> for State {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format!(
"failed to stop container {name} ({id})\n\n{}",
serialize_error_pretty(&e)
format_serror(
&e.context(format!(
"failed to stop container {name} ({id})"
))
.into(),
),
);
}
@@ -474,9 +540,10 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);

View File

@@ -2,6 +2,7 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -12,7 +13,7 @@ use monitor_client::{
use mungos::by_id::find_one_by_id;
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::{serialize_error_pretty, Json};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -92,7 +93,7 @@ async fn handler(
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", serialize_error_pretty(&e))
Log::error("task error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);

View File

@@ -1,6 +1,6 @@
use std::pin::Pin;
use formatting::{bold, colored, muted, Color};
use formatting::{bold, colored, format_serror, muted, Color};
use monitor_client::{
api::execute::RunProcedure,
entities::{
@@ -10,7 +10,6 @@ use monitor_client::{
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio::sync::Mutex;
use crate::{
@@ -87,14 +86,8 @@ fn resolve_inner(
),
);
}
Err(e) => update.push_error_log(
"execution error",
format!(
"{}: {}",
colored("ERROR", Color::Red),
serialize_error_pretty(&e)
),
),
Err(e) => update
.push_error_log("execution error", format_serror(&e.into())),
}
update.finalize();

View File

@@ -1,4 +1,5 @@
use anyhow::anyhow;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -16,7 +17,6 @@ use mungos::{
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
config::core_config,
@@ -32,7 +32,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
CloneRepo { repo }: CloneRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
@@ -57,21 +57,32 @@ impl Resolve<CloneRepo, (User, Update)> for State {
let periphery = periphery_client(&server)?;
let github_token = core_config()
.github_accounts
.get(&repo.config.github_account)
.cloned();
let git_token = core_config()
.git_providers
.iter()
.find(|provider| provider.domain == repo.config.git_provider)
.and_then(|provider| {
repo.config.git_https = provider.https;
provider
.accounts
.iter()
.find(|account| account.username == repo.config.git_account)
.map(|account| account.token.clone())
});
let logs = match periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
github_token,
git_token,
})
.await
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("clone repo", serialize_error_pretty(&e))]
vec![Log::error(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
)]
}
};
@@ -129,7 +140,10 @@ impl Resolve<PullRepo, (User, Update)> for State {
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("pull repo", serialize_error_pretty(&e))]
vec![Log::error(
"pull repo",
format_serror(&e.context("failed to pull repo").into()),
)]
}
};
@@ -175,7 +189,6 @@ async fn update_last_pulled_time(repo_name: &str) {
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -11,7 +12,6 @@ use monitor_client::{
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{periphery_client, update::update_update},
@@ -54,9 +54,12 @@ impl Resolve<PruneContainers, (User, Update)> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune containers", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune containers",
format_serror(
&e.context("failed to prune containers").into(),
),
),
};
update.success = log.success;
@@ -105,9 +108,10 @@ impl Resolve<PruneNetworks, (User, Update)> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune networks", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune networks",
format_serror(&e.context("failed to prune networks").into()),
),
};
update.success = log.success;

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
@@ -11,10 +12,11 @@ use monitor_client::{
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::{aws::launch_ec2_instance, hetzner::launch_hetzner_server},
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
helpers::update::update_update,
resource,
state::{db_client, State},
@@ -34,12 +36,9 @@ impl Resolve<LaunchServer, (User, Update)> for State {
if db_client()
.await
.servers
.find_one(
doc! {
"name": &name
},
None,
)
.find_one(doc! {
"name": &name
})
.await
.context("failed to query db for servers")?
.is_some()
@@ -130,10 +129,7 @@ impl Resolve<LaunchServer, (User, Update)> for State {
Err(e) => {
update.push_error_log(
"create server",
format!(
"failed to create server\n\n{}",
serialize_error_pretty(&e)
),
format_serror(&e.context("failed to create server").into()),
);
}
};

View File

@@ -1,5 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::{colored, Color};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
@@ -20,7 +20,6 @@ use monitor_client::{
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -359,9 +358,9 @@ impl Resolve<RunSync, (User, Update)> for State {
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
update.push_error_log(
"refresh sync",
format!(
"failed to refresh sync pending after run | {}",
serialize_error_pretty(&e)
format_serror(
&e.context("failed to refresh sync pending after run")
.into(),
),
);
}

View File

@@ -14,7 +14,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
state::{db_client, State},
};
@@ -28,13 +28,13 @@ impl Resolve<ListAlerts, User> for State {
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
let server_ids = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
let deployment_ids = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Deployment,
)
.await?;

View File

@@ -1,6 +1,5 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
@@ -10,12 +9,11 @@ use monitor_client::{
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -61,26 +59,21 @@ impl Resolve<GetAlertersSummary, User> for State {
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::Alerter,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.alerters
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {

View File

@@ -1,7 +1,4 @@
use std::{
collections::{HashMap, HashSet},
sync::OnceLock,
};
use std::collections::{HashMap, HashSet};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
@@ -10,6 +7,7 @@ use monitor_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
user::User,
@@ -20,12 +18,14 @@ use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::{Resolve, ResolveToString};
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, build_state_cache, db_client, State},
state::{
action_states, build_state_cache, db_client, github_client, State,
},
};
impl Resolve<GetBuild, User> for State {
@@ -147,16 +147,13 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
.find(doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
None,
)
"operation": Operation::RunBuild.to_string(),
})
.await
.context("failed to get updates cursor")?;
@@ -193,16 +190,16 @@ fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}
impl Resolve<GetBuildVersions, User> for State {
impl Resolve<ListBuildVersions, User> for State {
async fn resolve(
&self,
GetBuildVersions {
ListBuildVersions {
build,
major,
minor,
patch,
limit,
}: GetBuildVersions,
}: ListBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
let build = resource::get_check_permissions::<Build>(
@@ -250,42 +247,6 @@ impl Resolve<GetBuildVersions, User> for State {
}
}
fn github_organizations() -> &'static String {
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
GITHUB_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().github_organizations)
.expect("failed to serialize github organizations")
})
}
impl ResolveToString<ListGithubOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListGithubOrganizations {}: ListGithubOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(github_organizations().clone())
}
}
fn docker_organizations() -> &'static String {
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
DOCKER_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().docker_organizations)
.expect("failed to serialize docker organizations")
})
}
impl ResolveToString<ListDockerOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListDockerOrganizations {}: ListDockerOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(docker_organizations().clone())
}
}
impl Resolve<ListCommonBuildExtraArgs, User> for State {
async fn resolve(
&self,
@@ -310,3 +271,78 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
Ok(res)
}
}
impl Resolve<GetBuildWebhookEnabled, User> for State {
async fn resolve(
&self,
GetBuildWebhookEnabled { build }: GetBuildWebhookEnabled,
user: User,
) -> anyhow::Result<GetBuildWebhookEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
if build.config.git_provider != "github.com"
|| build.config.repo.is_empty()
{
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: true,
});
}
}
Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: false,
})
}
}

View File

@@ -1,21 +1,19 @@
use std::{collections::HashSet, str::FromStr};
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::{self, *},
api::read::*,
entities::{
builder::{Builder, BuilderConfig, BuilderListItem},
builder::{Builder, BuilderListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -61,26 +59,21 @@ impl Resolve<GetBuildersSummary, User> for State {
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::Builder,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {
@@ -89,52 +82,3 @@ impl Resolve<GetBuildersSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetBuilderAvailableAccounts, User> for State {
async fn resolve(
&self,
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
user: User,
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
let builder = resource::get_check_permissions::<Builder>(
&builder,
&user,
PermissionLevel::Read,
)
.await?;
let (github, docker) = match builder.config {
BuilderConfig::Aws(config) => {
(config.github_accounts, config.docker_accounts)
}
BuilderConfig::Server(config) => {
let res = self
.resolve(
read::GetAvailableAccounts {
server: Some(config.server_id),
},
user,
)
.await?;
(res.github, res.docker)
}
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
Ok(GetBuilderAvailableAccountsResponse { github, docker })
}
}

View File

@@ -1,16 +1,33 @@
use std::time::Instant;
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::read::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use monitor_client::{
api::read::*,
entities::{
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
repo::Repo,
server::Server,
sync::ResourceSync,
update::ResourceTarget,
user::User,
},
};
use resolver_api::{
derive::Resolver, Resolve, ResolveToString, Resolver,
};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, config::core_config, state::State};
use crate::{
auth::auth_request, config::core_config, helpers::periphery_client,
resource, state::State,
};
mod alert;
mod alerter;
@@ -37,16 +54,24 @@ mod variable;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
#[to_string_resolver]
GetVersion(GetVersion),
#[to_string_resolver]
GetCoreInfo(GetCoreInfo),
#[to_string_resolver]
ListAwsEcrLabels(ListAwsEcrLabels),
ListSecrets(ListSecrets),
ListGitProviders(ListGitProviders),
ListDockerRegistries(ListDockerRegistries),
// ==== USER ====
ListUsers(ListUsers),
GetUsername(GetUsername),
GetPermissionLevel(GetPermissionLevel),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
GetPermissionLevel(GetPermissionLevel),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
@@ -65,15 +90,13 @@ enum ReadRequest {
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
GetServerTemplatesSummary(GetServerTemplatesSummary),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
ListFullServers(ListFullServers),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
@@ -81,55 +104,53 @@ enum ReadRequest {
GetDockerNetworks(GetDockerNetworks),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
GetAvailableAccounts(GetAvailableAccounts),
GetAvailableSecrets(GetAvailableSecrets),
ListServers(ListServers),
ListFullServers(ListFullServers),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
// ==== BUILD ====
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
ListBuildVersions(ListBuildVersions),
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
#[to_string_resolver]
ListGithubOrganizations(ListGithubOrganizations),
#[to_string_resolver]
ListDockerOrganizations(ListDockerOrganizations),
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
GetRepoActionState(GetRepoActionState),
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
GetRepoActionState(GetRepoActionState),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
GetResourceSyncActionState(GetResourceSyncActionState),
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
GetResourceSyncActionState(GetResourceSyncActionState),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
ListBuilders(ListBuilders),
ListFullBuilders(ListFullBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
@@ -198,36 +219,344 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res?))
}
impl Resolve<GetVersion, User> for State {
#[instrument(name = "GetVersion", level = "debug", skip(self))]
async fn resolve(
fn version() -> &'static String {
static VERSION: OnceLock<String> = OnceLock::new();
VERSION.get_or_init(|| {
serde_json::to_string(&GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
.context("failed to serialize GetVersionResponse")
.unwrap()
})
}
impl ResolveToString<GetVersion, User> for State {
async fn resolve_to_string(
&self,
GetVersion {}: GetVersion,
_: User,
) -> anyhow::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
) -> anyhow::Result<String> {
Ok(version().to_string())
}
}
impl Resolve<GetCoreInfo, User> for State {
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
async fn resolve(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<GetCoreInfoResponse> {
fn core_info() -> &'static String {
static CORE_INFO: OnceLock<String> = OnceLock::new();
CORE_INFO.get_or_init(|| {
let config = core_config();
Ok(GetCoreInfoResponse {
let info = GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
github_webhook_base_url: config
.github_webhook_base_url
webhook_base_url: config
.webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
})
github_webhook_owners: config
.github_webhook_app
.installations
.iter()
.map(|i| i.namespace.to_string())
.collect(),
};
serde_json::to_string(&info)
.context("failed to serialize GetCoreInfoResponse")
.unwrap()
})
}
impl ResolveToString<GetCoreInfo, User> for State {
async fn resolve_to_string(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<String> {
Ok(core_info().to_string())
}
}
fn ecr_labels() -> &'static String {
static ECR_LABELS: OnceLock<String> = OnceLock::new();
ECR_LABELS.get_or_init(|| {
serde_json::to_string(
&core_config()
.aws_ecr_registries
.iter()
.map(|reg| reg.label.clone())
.collect::<Vec<_>>(),
)
.context("failed to serialize ecr registries")
.unwrap()
})
}
impl ResolveToString<ListAwsEcrLabels, User> for State {
async fn resolve_to_string(
&self,
ListAwsEcrLabels {}: ListAwsEcrLabels,
_: User,
) -> anyhow::Result<String> {
Ok(ecr_labels().to_string())
}
}
impl Resolve<ListSecrets, User> for State {
async fn resolve(
&self,
ListSecrets { target }: ListSecrets,
_: User,
) -> anyhow::Result<ListSecretsResponse> {
let mut secrets = core_config()
.secrets
.keys()
.cloned()
.collect::<HashSet<_>>();
if let Some(target) = target {
let server_id = match target {
ResourceTarget::Server(id) => Some(id),
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => Some(config.server_id),
BuilderConfig::Aws(config) => {
secrets.extend(config.secrets);
None
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
};
if let Some(id) = server_id {
let server = resource::get::<Server>(&id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListSecrets {})
.await
.with_context(|| {
format!(
"failed to get secrets from server {}",
server.name
)
})?;
secrets.extend(more);
}
}
let mut secrets = secrets.into_iter().collect::<Vec<_>>();
secrets.sort();
Ok(secrets)
}
}
impl Resolve<ListGitProviders, User> for State {
async fn resolve(
&self,
ListGitProviders { target }: ListGitProviders,
user: User,
) -> anyhow::Result<ListGitProvidersResponse> {
let mut providers = core_config().git_providers.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_git_providers_for_server(&mut providers, &id).await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_git_providers_for_server(
&mut providers,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_git_providers(
&mut providers,
config.git_providers,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user
),
resource::list_full_for_user::<Repo>(Default::default(), &user),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user
),
)?;
for build in builds {
if !providers
.iter()
.any(|provider| provider.domain == build.config.git_provider)
{
providers.push(GitProvider {
domain: build.config.git_provider,
https: build.config.git_https,
accounts: Default::default(),
});
}
}
for repo in repos {
if !providers
.iter()
.any(|provider| provider.domain == repo.config.git_provider)
{
providers.push(GitProvider {
domain: repo.config.git_provider,
https: repo.config.git_https,
accounts: Default::default(),
});
}
}
for sync in syncs {
if !providers
.iter()
.any(|provider| provider.domain == sync.config.git_provider)
{
providers.push(GitProvider {
domain: sync.config.git_provider,
https: sync.config.git_https,
accounts: Default::default(),
});
}
}
providers.sort();
Ok(providers)
}
}
impl Resolve<ListDockerRegistries, User> for State {
async fn resolve(
&self,
ListDockerRegistries { target }: ListDockerRegistries,
_: User,
) -> anyhow::Result<ListDockerRegistriesResponse> {
let mut registries = core_config().docker_registries.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_docker_registries_for_server(&mut registries, &id)
.await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_docker_registries_for_server(
&mut registries,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_docker_registries(
&mut registries,
config.docker_registries,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
registries.sort();
Ok(registries)
}
}
async fn merge_git_providers_for_server(
providers: &mut Vec<GitProvider>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListGitProviders {})
.await
.with_context(|| {
format!(
"failed to get git providers from server {}",
server.name
)
})?;
merge_git_providers(providers, more);
Ok(())
}
fn merge_git_providers(
providers: &mut Vec<GitProvider>,
more: Vec<GitProvider>,
) {
for incoming_provider in more {
if let Some(provider) = providers
.iter_mut()
.find(|provider| provider.domain == incoming_provider.domain)
{
for account in incoming_provider.accounts {
if !provider.accounts.contains(&account) {
provider.accounts.push(account);
}
}
} else {
providers.push(incoming_provider);
}
}
}
async fn merge_docker_registries_for_server(
registries: &mut Vec<DockerRegistry>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListDockerRegistries {})
.await
.with_context(|| {
format!(
"failed to get docker registries from server {}",
server.name
)
})?;
merge_docker_registries(registries, more);
Ok(())
}
fn merge_docker_registries(
registries: &mut Vec<DockerRegistry>,
more: Vec<DockerRegistry>,
) {
for incoming_registry in more {
if let Some(registry) = registries
.iter_mut()
.find(|registry| registry.domain == incoming_registry.domain)
{
for account in incoming_registry.accounts {
if !registry.accounts.contains(&account) {
registry.accounts.push(account);
}
}
} else {
registries.push(incoming_registry);
}
}
}

View File

@@ -44,7 +44,7 @@ impl Resolve<GetPermissionLevel, User> for State {
return Ok(PermissionLevel::Write);
}
let (variant, id) = target.extract_variant_id();
get_user_permission_on_resource(&user.id, variant, id).await
get_user_permission_on_resource(&user, variant, id).await
}
}

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
user::User,
@@ -10,8 +11,9 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, repo_state_cache, State},
state::{action_states, github_client, repo_state_cache, State},
};
impl Resolve<GetRepo, User> for State {
@@ -118,3 +120,88 @@ impl Resolve<GetReposSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetRepoWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetRepoWebhooksEnabled { repo }: GetRepoWebhooksEnabled,
user: User,
) -> anyhow::Result<GetRepoWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await?;
if repo.config.git_provider != "github.com"
|| repo.config.repo.is_empty()
{
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let clone_url =
format!("{host}/listener/github/repo/{}/clone", repo.id);
let pull_url =
format!("{host}/listener/github/repo/{}/pull", repo.id);
let mut clone_enabled = false;
let mut pull_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == clone_url {
clone_enabled = true
}
if webhook.active && webhook.config.url == pull_url {
pull_enabled = true
}
}
Ok(GetRepoWebhooksEnabledResponse {
managed: true,
clone_enabled,
pull_enabled,
})
}
}

View File

@@ -1,5 +1,5 @@
use std::{
collections::{HashMap, HashSet},
collections::HashMap,
sync::{Arc, OnceLock},
};
@@ -23,12 +23,11 @@ use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api::{self, GetAccountsResponse};
use periphery_client::api as periphery;
use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
config::core_config,
helpers::periphery_client,
resource,
state::{action_states, db_client, server_status_cache, State},
@@ -192,7 +191,7 @@ impl ResolveToString<GetSystemInformation, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemInformation {})
.request(periphery::stats::GetSystemInformation {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -259,7 +258,7 @@ impl ResolveToString<GetSystemProcesses, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemProcesses {})
.request(periphery::stats::GetSystemProcesses {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -342,7 +341,7 @@ impl Resolve<GetDockerImages, User> for State {
)
.await?;
periphery_client(&server)?
.request(api::build::GetImageList {})
.request(periphery::build::GetImageList {})
.await
}
}
@@ -360,7 +359,7 @@ impl Resolve<GetDockerNetworks, User> for State {
)
.await?;
periphery_client(&server)?
.request(api::network::GetNetworkList {})
.request(periphery::network::GetNetworkList {})
.await
}
}
@@ -378,74 +377,7 @@ impl Resolve<GetDockerContainers, User> for State {
)
.await?;
periphery_client(&server)?
.request(api::container::GetContainerList {})
.request(periphery::container::GetContainerList {})
.await
}
}
impl Resolve<GetAvailableAccounts, User> for State {
async fn resolve(
&self,
GetAvailableAccounts { server }: GetAvailableAccounts,
user: User,
) -> anyhow::Result<GetAvailableAccountsResponse> {
let (github, docker) = match server {
Some(server) => {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
(github, docker)
}
None => Default::default(),
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
let res = GetAvailableAccountsResponse { github, docker };
Ok(res)
}
}
impl Resolve<GetAvailableSecrets, User> for State {
async fn resolve(
&self,
GetAvailableSecrets { server }: GetAvailableSecrets,
user: User,
) -> anyhow::Result<GetAvailableSecretsResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let mut secrets = periphery_client(&server)?
.request(api::GetSecrets {})
.await
.context("failed to get accounts from periphery")?;
secrets.sort();
Ok(secrets)
}
}

View File

@@ -1,6 +1,5 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
@@ -8,11 +7,11 @@ use monitor_client::{
update::ResourceTargetVariant, user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -58,26 +57,21 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::ServerTemplate,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.server_templates
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
@@ -13,8 +14,11 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, resource_sync_state_cache, State},
state::{
action_states, github_client, resource_sync_state_cache, State,
},
};
impl Resolve<GetResourceSync, User> for State {
@@ -137,3 +141,88 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetSyncWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetSyncWebhooksEnabled { sync }: GetSyncWebhooksEnabled,
user: User,
) -> anyhow::Result<GetSyncWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
if sync.config.git_provider != "github.com"
|| sync.config.repo.is_empty()
{
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/sync/{}/refresh", sync.id);
let sync_url =
format!("{host}/listener/github/sync/{}/sync", sync.id);
let mut refresh_enabled = false;
let mut sync_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == sync_url {
sync_enabled = true
}
}
Ok(GetSyncWebhooksEnabledResponse {
managed: true,
refresh_enabled,
sync_enabled,
})
}
}

View File

@@ -230,7 +230,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
// replace server id of builder
if let BuilderConfig::Server(config) = &mut builder.config {
config.server_id.clone_from(
names.servers.get(&id).unwrap_or(&String::new()),
names
.servers
.get(&config.server_id)
.unwrap_or(&String::new()),
)
}
res
@@ -309,7 +312,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
};
}
add_user_groups(user_groups, &mut res, &user)
add_user_groups(user_groups, &mut res, &names, &user)
.await
.context("failed to add user groups")?;
@@ -421,6 +424,8 @@ struct ResourceNames {
deployments: HashMap<String, String>,
procedures: HashMap<String, String>,
syncs: HashMap<String, String>,
alerters: HashMap<String, String>,
templates: HashMap<String, String>,
}
impl ResourceNames {
@@ -475,6 +480,18 @@ impl ResourceNames {
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
alerters: find_collect(&db.alerters, None, None)
.await
.context("failed to get all alerters")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
templates: find_collect(&db.server_templates, None, None)
.await
.context("failed to get all server templates")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
})
}
}
@@ -482,6 +499,7 @@ impl ResourceNames {
async fn add_user_groups(
user_groups: Vec<String>,
res: &mut ResourcesToml,
names: &ResourceNames,
user: &User,
) -> anyhow::Result<()> {
let db = db_client().await;
@@ -509,9 +527,43 @@ async fn add_user_groups(
)
.await?
.into_iter()
.map(|permission| PermissionToml {
target: permission.resource_target,
level: permission.level,
.map(|mut permission| {
match &mut permission.resource_target {
ResourceTarget::Build(id) => {
*id = names.builds.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = names.builders.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id =
names.deployments.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = names.servers.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = names.repos.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = names.alerters.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id =
names.procedures.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = names.templates.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = names.syncs.get(id).cloned().unwrap_or_default()
}
ResourceTarget::System(_) => {}
}
PermissionToml {
target: permission.resource_target,
level: permission.level,
}
})
.collect();
res.user_groups.push(UserGroupToml {
@@ -521,6 +573,7 @@ async fn add_user_groups(
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect(),
all: ug.all,
permissions,
});
}

View File

@@ -29,7 +29,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -45,58 +45,124 @@ impl Resolve<ListUpdates, User> for State {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
let server_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Deployment,
)
.await?;
let build_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let build_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Build,
)
.await?;
let repo_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Repo,
)
.await?;
let procedure_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
get_resource_ids_for_user(&user, ResourceTargetVariant::Repo)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Procedure,
)
.await?;
let builder_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let builder_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Builder,
)
.await?;
let alerter_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Alerter,
)
.await?;
let server_template_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::ServerTemplate,
)
.await?;
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::ResourceSync,
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = query.unwrap_or_default();
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
{ "target.type": "ServerTemplate", "target.id": { "$in": &server_template_ids } },
server_query,
build_query,
deployment_query,
repo_query,
procedure_query,
alerter_query,
builder_query,
server_template_query,
resource_sync_query,
]
});
query.into()

View File

@@ -1,9 +1,10 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{
GetUsername, GetUsernameResponse, ListApiKeys,
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
ListApiKeysResponse, ListUsers, ListUsersResponse,
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
ListApiKeys, ListApiKeysForServiceUser,
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
};
@@ -14,7 +15,10 @@ use mungos::{
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
impl Resolve<GetUsername, User> for State {
async fn resolve(
@@ -40,6 +44,19 @@ impl Resolve<GetUsername, User> for State {
}
}
impl Resolve<FindUser, User> for State {
async fn resolve(
&self,
FindUser { user }: FindUser,
admin: User,
) -> anyhow::Result<FindUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
get_user(&user).await
}
}
impl Resolve<ListUsers, User> for State {
async fn resolve(
&self,
@@ -87,22 +104,21 @@ impl Resolve<ListApiKeys, User> for State {
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
ListApiKeysForServiceUser { user }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for users")?
.context("user at id not found")?;
let user = get_user(&user).await?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": user_id },
doc! { "user_id": &user.id },
None,
)
.await

View File

@@ -37,7 +37,7 @@ impl Resolve<GetUserGroup, User> for State {
db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")

View File

@@ -11,7 +11,6 @@ use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_variable,
state::{db_client, State},
};
@@ -32,16 +31,12 @@ impl Resolve<ListVariables, User> for State {
ListVariables {}: ListVariables,
_: User,
) -> anyhow::Result<ListVariablesResponse> {
let variables = find_collect(
find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")?;
Ok(ListVariablesResponse {
variables,
secrets: core_config().secrets.keys().cloned().collect(),
})
.context("failed to query db for variables")
}
}

View File

@@ -11,10 +11,7 @@ use monitor_client::{
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
user::User,
},
entities::{api_key::ApiKey, monitor_timestamp, user::User},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
@@ -90,33 +87,21 @@ impl Resolve<PushRecentlyViewed, User> for State {
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
let (resource_type, id) = resource.extract_variant_id();
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { format!("recents.{resource_type}"): to_bson(&recents)? }
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
None => {
doc! { format!("recents.{resource_type}"): [id] }
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
@@ -124,7 +109,9 @@ impl Resolve<PushRecentlyViewed, User> for State {
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
.with_context(|| {
format!("failed to update recents.{resource_type}")
})?;
Ok(PushRecentlyViewedResponse {})
}
@@ -187,7 +174,7 @@ impl Resolve<CreateApiKey, User> for State {
db_client()
.await
.api_keys
.insert_one(api_key, None)
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
@@ -208,7 +195,7 @@ impl Resolve<DeleteApiKey, User> for State {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
@@ -217,7 +204,7 @@ impl Resolve<DeleteApiKey, User> for State {
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})

View File

@@ -1,10 +1,24 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{build::Build, permission::PermissionLevel, user::User},
entities::{
build::{Build, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
user::User,
NoData,
},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
resource,
state::{github_client, State},
};
impl Resolve<CreateBuild, User> for State {
#[instrument(name = "CreateBuild", skip(self, user))]
@@ -56,3 +70,181 @@ impl Resolve<UpdateBuild, User> for State {
resource::update::<Build>(&id, config, &user).await
}
}
impl Resolve<CreateBuildWebhook, User> for State {
#[instrument(name = "CreateBuildWebhook", skip(self, user))]
async fn resolve(
&self,
CreateBuildWebhook { build }: CreateBuildWebhook,
user: User,
) -> anyhow::Result<CreateBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !build.config.webhook_enabled {
self
.resolve(
UpdateBuild {
id: build.id,
config: PartialBuildConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update build to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteBuildWebhook, User> for State {
#[instrument(name = "DeleteBuildWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteBuildWebhook { build }: DeleteBuildWebhook,
user: User,
) -> anyhow::Result<DeleteBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't delete webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -50,6 +50,7 @@ pub enum WriteRequest {
// ==== PERMISSIONS ====
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
@@ -75,6 +76,8 @@ pub enum WriteRequest {
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
@@ -93,6 +96,8 @@ pub enum WriteRequest {
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
@@ -112,6 +117,8 @@ pub enum WriteRequest {
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== TAG ====
CreateTag(CreateTag),

View File

@@ -3,8 +3,10 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
UpdatePermissionOnResourceType,
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
UpdatePermissionOnTargetResponse, UpdateUserBasePermissions,
UpdateUserBasePermissionsResponse,
},
entities::{
permission::{UserTarget, UserTargetVariant},
@@ -41,6 +43,7 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query mongo for user")?
@@ -73,6 +76,73 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
}
}
impl Resolve<UpdatePermissionOnResourceType, User> for State {
#[instrument(
name = "UpdatePermissionOnResourceType",
skip(self, admin)
)]
async fn resolve(
&self,
UpdatePermissionOnResourceType {
user_target,
resource_type,
permission,
}: UpdatePermissionOnResourceType,
admin: User,
) -> anyhow::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
if !user.enabled {
return Err(anyhow!("user not enabled"));
}
}
let (user_target_variant, user_target_id) =
extract_user_target_with_validation(&user_target).await?;
let id = ObjectId::from_str(&user_target_id)
.context("id is not ObjectId")?;
let field = format!("all.{resource_type}");
let filter = doc! { "_id": id };
let update = doc! { "$set": { &field: permission.as_ref() } };
match user_target_variant {
UserTargetVariant::User => {
db_client()
.await
.users
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
UserTargetVariant::UserGroup => {
db_client()
.await
.user_groups
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
}
Ok(UpdatePermissionOnResourceTypeResponse {})
}
}
impl Resolve<UpdatePermissionOnTarget, User> for State {
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
async fn resolve(
@@ -129,8 +199,8 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.with_options(UpdateOptions::builder().upsert(true).build())
.await?;
Ok(UpdatePermissionOnTargetResponse {})
@@ -150,7 +220,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for users")?
.context("no matching user found")?
@@ -165,7 +235,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
@@ -192,7 +262,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builds
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
@@ -207,7 +277,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builders
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
@@ -222,7 +292,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.deployments
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
@@ -237,7 +307,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.servers
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
@@ -252,7 +322,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.repos
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
@@ -267,7 +337,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.alerters
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
@@ -282,7 +352,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.procedures
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
@@ -297,7 +367,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
@@ -312,7 +382,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?

View File

@@ -1,10 +1,24 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{permission::PermissionLevel, repo::Repo, user::User},
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo},
user::User,
NoData,
},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
resource,
state::{github_client, State},
};
impl Resolve<CreateRepo, User> for State {
#[instrument(name = "CreateRepo", skip(self, user))]
@@ -56,3 +70,196 @@ impl Resolve<UpdateRepo, User> for State {
resource::update::<Repo>(&id, config, &user).await
}
}
impl Resolve<CreateRepoWebhook, User> for State {
#[instrument(name = "CreateRepoWebhook", skip(self, user))]
async fn resolve(
&self,
CreateRepoWebhook { repo, action }: CreateRepoWebhook,
user: User,
) -> anyhow::Result<CreateRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo_name, &request)
.await
.context("failed to create webhook")?;
if !repo.config.webhook_enabled {
self
.resolve(
UpdateRepo {
id: repo.id,
config: PartialRepoConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update repo to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteRepoWebhook, User> for State {
#[instrument(name = "DeleteRepoWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteRepoWebhook { repo, action }: DeleteRepoWebhook,
user: User,
) -> anyhow::Result<DeleteRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo_name, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
@@ -13,7 +14,6 @@ use monitor_client::{
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -112,8 +112,10 @@ impl Resolve<CreateNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("create network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
),
};
update.finalize();
@@ -149,8 +151,10 @@ impl Resolve<DeleteNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("delete network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"delete network",
format_serror(&e.context("failed to delete network").into()),
),
};
update.finalize();

View File

@@ -51,17 +51,14 @@ impl Resolve<CreateServiceUser, User> for State {
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user, None)
.insert_one(&user)
.await
.context("failed to create service user on db")?
.inserted_id
@@ -91,7 +88,7 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("no user with given username")?;
@@ -102,12 +99,11 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("user with username not found")
@@ -155,7 +151,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
@@ -168,7 +164,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.delete_one(doc! { "key": key })
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
@@ -7,6 +8,7 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::Builder,
config::core::CoreConfig,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
@@ -14,23 +16,28 @@ use monitor_client::{
server::{stats::SeverityLevel, Server},
server_template::ServerTemplate,
sync::{
PendingSyncUpdates, PendingSyncUpdatesData,
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
ResourceSync,
PartialResourceSyncConfig, PendingSyncUpdates,
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
PendingSyncUpdatesDataOk, ResourceSync,
},
update::ResourceTarget,
user::User,
NoData,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
config::core_config,
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::{
deployment,
@@ -38,7 +45,7 @@ use crate::{
},
},
resource,
state::{db_client, State},
state::{db_client, github_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
@@ -234,7 +241,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
message: None,
data: PendingSyncUpdatesData::Err(
PendingSyncUpdatesDataErr {
message: serialize_error_pretty(&e),
message: format_serror(&e.into()),
},
),
},
@@ -261,14 +268,11 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
let Some(existing) = db_client()
.await
.alerts
.find_one(
doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.find_one(doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
})
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
@@ -289,11 +293,12 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.insert_one(&alert)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
@@ -321,3 +326,196 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
crate::resource::get::<ResourceSync>(&sync.id).await
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(
&self,
CreateSyncWebhook { sync, action }: CreateSyncWebhook,
user: User,
) -> anyhow::Result<CreateSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !sync.config.webhook_enabled {
self
.resolve(
UpdateResourceSync {
id: sync.id,
config: PartialResourceSyncConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update sync to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteSyncWebhook, User> for State {
#[instrument(name = "DeleteSyncWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteSyncWebhook { sync, action }: DeleteSyncWebhook,
user: User,
) -> anyhow::Result<DeleteSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -46,7 +46,7 @@ impl Resolve<CreateTag, User> for State {
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.insert_one(&tag)
.await
.context("failed to create tag on db")?
.inserted_id

View File

@@ -29,13 +29,14 @@ impl Resolve<CreateUserGroup, User> for State {
let user_group = UserGroup {
id: Default::default(),
users: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
name,
};
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.insert_one(user_group)
.await
.context("failed to create UserGroup on db")?
.inserted_id
@@ -99,7 +100,7 @@ impl Resolve<DeleteUserGroup, User> for State {
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
})
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
@@ -125,7 +126,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -138,12 +139,11 @@ impl Resolve<AddUserToUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -171,7 +171,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -184,12 +184,11 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -229,15 +228,11 @@ impl Resolve<SetUsersInUserGroup, User> for State {
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.update_one(filter.clone(), doc! { "$set": { "users": users } })
.await
.context("failed to add user to group on db")?;
.context("failed to set users on user group")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")

View File

@@ -44,7 +44,7 @@ impl Resolve<CreateVariable, User> for State {
db_client()
.await
.variables
.insert_one(&variable, None)
.insert_one(&variable)
.await
.context("failed to create variable on db")?;
@@ -86,7 +86,6 @@ impl Resolve<UpdateVariableValue, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
None,
)
.await
.context("failed to update variable value on db")?;
@@ -127,7 +126,6 @@ impl Resolve<UpdateVariableDescription, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
None,
)
.await
.context("failed to update variable description on db")?;
@@ -148,7 +146,7 @@ impl Resolve<DeleteVariable, User> for State {
db_client()
.await
.variables
.delete_one(doc! { "name": &name }, None)
.delete_one(doc! { "name": &name })
.await
.context("failed to delete variable on db")?;

View File

@@ -2,6 +2,7 @@ use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
user::{User, UserConfig},
@@ -66,7 +67,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.find_one(doc! { "config.data.github_id": &github_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -76,7 +77,7 @@ async fn callback(
None => {
let ts = monitor_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
@@ -86,11 +87,8 @@ async fn callback(
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Github {
github_id,
avatar: github_user.avatar_url,
@@ -98,7 +96,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -3,6 +3,7 @@ use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
@@ -75,7 +76,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.find_one(doc! { "config.data.google_id": &google_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -85,7 +86,7 @@ async fn callback(
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
@@ -101,11 +102,8 @@ async fn callback(
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Google {
google_id,
avatar: google_user.picture,
@@ -113,7 +111,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -3,6 +3,7 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use mongo_indexed::Document;
use monitor_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
@@ -46,7 +47,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.find_one(Document::new())
.await?
.is_none();
@@ -61,18 +62,15 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
};
let user_id = db_client()
.await
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user")?
.inserted_id
@@ -102,7 +100,7 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed at db query for users")?
.with_context(|| {

View File

@@ -127,7 +127,7 @@ pub async fn auth_api_key_get_user_id(
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.find_one(doc! { "key": key })
.await
.context("failed to query db")?
.context("no api key matching key")?;

View File

@@ -1,9 +1,8 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use aws_config::BehaviorVersion;
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,

View File

@@ -0,0 +1,82 @@
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ecr::Client as EcrClient;
use run_command::async_run_command;
#[tracing::instrument(skip(access_key_id, secret_access_key))]
async fn make_ecr_client(
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> EcrClient {
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
EcrClient::new(&config)
}
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn maybe_create_repo(
repo: &str,
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<()> {
let client =
make_ecr_client(region, access_key_id, secret_access_key).await;
let existing = client
.describe_repositories()
.send()
.await
.context("failed to describe existing repositories")?
.repositories
.unwrap_or_default();
if existing.iter().any(|r| {
if let Some(name) = r.repository_name() {
name == repo
} else {
false
}
}) {
return Ok(());
};
client
.create_repository()
.repository_name(repo)
.send()
.await
.context("failed to create repository")?;
Ok(())
}
/// Gets a token docker login.
///
/// Requires the aws cli be installed on the host
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn get_ecr_token(
region: &str,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<String> {
let log = async_run_command(&format!(
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
))
.await;
if log.success() {
Ok(log.stdout)
} else {
Err(
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
.context("failed to get aws ecr login token"),
)
}
}

View File

@@ -0,0 +1,2 @@
pub mod ec2;
pub mod ecr;

View File

@@ -233,32 +233,30 @@ pub enum HetznerActionStatus {
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cx11")]
SharedIntel1Core2Ram20Disk,
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx21")]
#[serde(rename = "cx22")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx31")]
SharedIntel2Core8Ram80Disk,
#[serde(rename = "cx32")]
SharedIntel4Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx41")]
SharedIntel4Core16Ram160Disk,
#[serde(rename = "cx42")]
SharedIntel8Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx51")]
SharedIntel8Core32Ram240Disk,
#[serde(rename = "cx52")]
SharedIntel16Core32Ram320Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated

View File

@@ -218,9 +218,6 @@ fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedIntel1Core2Ram20Disk => {
common::HetznerServerType::SharedIntel1Core2Ram20Disk
}
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
@@ -236,8 +233,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel2Core8Ram80Disk => {
common::HetznerServerType::SharedIntel2Core8Ram80Disk
HetznerServerType::SharedIntel4Core8Ram80Disk => {
common::HetznerServerType::SharedIntel4Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
@@ -245,8 +242,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel4Core16Ram160Disk => {
common::HetznerServerType::SharedIntel4Core16Ram160Disk
HetznerServerType::SharedIntel8Core16Ram160Disk => {
common::HetznerServerType::SharedIntel8Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
@@ -254,8 +251,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel8Core32Ram240Disk => {
common::HetznerServerType::SharedIntel8Core32Ram240Disk
HetznerServerType::SharedIntel16Core32Ram320Disk => {
common::HetznerServerType::SharedIntel16Core32Ram320Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk

View File

@@ -4,8 +4,9 @@ use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, HetznerCredentials, MongoConfig,
OauthCredentials,
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
GithubWebhookAppInstallationConfig, HetznerCredentials,
MongoConfig, OauthCredentials,
},
logger::LogConfig,
};
@@ -36,15 +37,40 @@ pub fn frontend_path() -> &'static String {
pub fn core_config() -> &'static CoreConfig {
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
CORE_CONFIG.get_or_init(|| {
let env: Env = envy::from_env()
.context("failed to parse core Env")
.unwrap();
let env: Env = match envy::from_env()
.context("failed to parse core Env") {
Ok(env) => env,
Err(e) => {
panic!("{e:#?}");
}
};
let config_path = &env.monitor_config_path;
let config =
parse_config_file::<CoreConfig>(config_path.as_str())
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
let installations = match (env.monitor_github_webhook_app_installations_ids, env.monitor_github_webhook_app_installations_namespaces) {
(Some(ids), Some(namespaces)) => {
if ids.len() != namespaces.len() {
panic!("MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
}
ids
.into_iter()
.zip(namespaces)
.map(|(id, namespace)| GithubWebhookAppInstallationConfig {
id,
namespace
})
.collect()
},
(Some(_), None) | (None, Some(_)) => {
panic!("Got only one of MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
}
(None, None) => {
config.github_webhook_app.installations
}
};
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.monitor_title.unwrap_or(config.title),
@@ -69,17 +95,12 @@ pub fn core_config() -> &'static CoreConfig {
keep_alerts_for_days: env
.monitor_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days),
github_webhook_secret: env
.monitor_github_webhook_secret
.unwrap_or(config.github_webhook_secret),
github_webhook_base_url: env
.monitor_github_webhook_base_url
.or(config.github_webhook_base_url),
github_organizations: env.monitor_github_organizations
.unwrap_or(config.github_organizations),
docker_organizations: env
.monitor_docker_organizations
.unwrap_or(config.docker_organizations),
webhook_secret: env
.monitor_webhook_secret
.unwrap_or(config.webhook_secret),
webhook_base_url: env
.monitor_webhook_base_url
.or(config.webhook_base_url),
transparent_mode: env
.monitor_transparent_mode
.unwrap_or(config.transparent_mode),
@@ -109,6 +130,15 @@ pub fn core_config() -> &'static CoreConfig {
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: env
.monitor_github_webhook_app_app_id
.unwrap_or(config.github_webhook_app.app_id),
pk_path: env
.monitor_github_webhook_app_pk_path
.unwrap_or(config.github_webhook_app.pk_path),
installations,
},
aws: AwsCredentials {
access_key_id: env
.monitor_aws_access_key_id
@@ -155,8 +185,9 @@ pub fn core_config() -> &'static CoreConfig {
// These can't be overridden on env
secrets: config.secrets,
github_accounts: config.github_accounts,
docker_accounts: config.docker_accounts,
git_providers: config.git_providers,
docker_registries: config.docker_registries,
aws_ecr_registries: config.aws_ecr_registries,
}
})
}

View File

@@ -112,7 +112,7 @@ impl DbClient {
}
}
async fn resource_collection<T>(
async fn resource_collection<T: Send + Sync>(
db: &Database,
collection_name: &str,
) -> anyhow::Result<Collection<T>> {

View File

@@ -6,11 +6,12 @@ use monitor_client::entities::{
alerter::*,
deployment::DeploymentState,
server::stats::SeverityLevel,
update::ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use slack::types::Block;
use crate::state::db_client;
use crate::{config::core_config, state::db_client};
#[instrument]
pub async fn send_alerts(alerts: &[Alert]) {
@@ -18,20 +19,18 @@ pub async fn send_alerts(alerts: &[Alert]) {
return;
}
let alerters = match find_collect(
let Ok(alerters) = find_collect(
&db_client().await.alerters,
doc! { "config.enabled": true },
None,
)
.await
{
Ok(alerters) => alerters,
Err(e) => {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
return;
}
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
let handles =
@@ -61,7 +60,12 @@ async fn send_alert(alerters: &[Alerter], alert: &Alert) {
return Ok(());
}
// Don't send if resource target not configured on the alerter
// Don't send if resource is in the blacklist
if alerter.config.except_resources.contains(&alert.target) {
return Ok(());
}
// Don't send if whitelist configured and target is not included
if !alerter.config.resources.is_empty()
&& !alerter.config.resources.contains(&alert.target)
{
@@ -126,7 +130,12 @@ async fn send_slack_alert(
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let (text, blocks): (_, Option<_>) = match &alert.data {
AlertData::ServerUnreachable { name, region, .. } => {
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
@@ -143,10 +152,18 @@ async fn send_slack_alert(
SeverityLevel::Critical => {
let text =
format!("{level} | *{name}*{region} is *unreachable* ❌");
let err = err
.as_ref()
.map(|e| format!("\nerror: {e:#?}"))
.unwrap_or_default();
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is *unreachable* ❌"
"*{name}*{region} is *unreachable* ❌{err}"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
@@ -155,70 +172,136 @@ async fn send_slack_alert(
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
..
} => {
let region = fmt_region(region);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨");
let blocks = vec![
Block::header(format!("{level} 🚨")),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text =
format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿 🚨"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
}
}
AlertData::ContainerStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_docker_container_state(to);
@@ -226,7 +309,11 @@ async fn send_slack_alert(
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}"
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
)),
];
(text, blocks.into())
@@ -236,24 +323,39 @@ async fn send_slack_alert(
message,
} => {
let text = format!(
"{level} | Failed to terminated AWS builder instance"
"{level} | Failed to terminated AWS builder instance "
);
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"instance id: **{instance_id}**\n{message}"
"instance id: *{instance_id}*\n{message}"
)),
];
(text, blocks.into())
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | There are pending resource sync updates");
format!("{level} | Pending resource sync updates on {name}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"sync id: **{id}**\nsync name: **{name}**"
"sync id: *{id}*\nsync name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::ResourceSync,
id,
)),
];
(text, blocks.into())
}
AlertData::BuildFailed { id, name, version } => {
let text = format!("{level} | Build {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}",
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
@@ -286,7 +388,41 @@ fn fmt_docker_container_state(state: &DeploymentState) -> String {
fn fmt_level(level: SeverityLevel) -> &'static str {
match level {
SeverityLevel::Critical => "CRITICAL 🚨",
SeverityLevel::Warning => "WARNING 🚨",
SeverityLevel::Warning => "WARNING ‼️",
SeverityLevel::Ok => "OK ✅",
}
}
fn resource_link(
resource_type: ResourceTargetVariant,
id: &str,
) -> String {
let path = match resource_type {
ResourceTargetVariant::System => unreachable!(),
ResourceTargetVariant::Build => format!("/builds/{id}"),
ResourceTargetVariant::Builder => {
format!("/builders/{id}")
}
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}
ResourceTargetVariant::Repo => format!("/repos/{id}"),
ResourceTargetVariant::Alerter => {
format!("/alerters/{id}")
}
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}
};
format!("{}{path}", core_config().host)
}

View File

@@ -1,13 +1,14 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use mongo_indexed::Document;
use monitor_client::entities::{
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
update::ResourceTarget,
user::User,
};
use mungos::mongodb::bson::doc;
use mungos::mongodb::bson::{doc, Bson};
use periphery_client::PeripheryClient;
use rand::{thread_rng, Rng};
@@ -61,7 +62,6 @@ where
}
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")
@@ -103,17 +103,34 @@ pub async fn create_permission<T>(
if let Err(e) = db_client()
.await
.permissions
.insert_one(
Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
},
None,
)
.insert_one(Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
})
.await
{
error!("failed to create permission for {target:?} | {e:#}");
};
}
/// Flattens a document only one level deep
///
/// eg `{ config: { label: "yes", thing: { field1: "ok", field2: "ok" } } }` ->
/// `{ "config.label": "yes", "config.thing": { field1: "ok", field2: "ok" } }`
pub fn flatten_document(doc: Document) -> Document {
let mut target = Document::new();
for (outer_field, bson) in doc {
if let Bson::Document(doc) = bson {
for (inner_field, bson) in doc {
target.insert(format!("{outer_field}.{inner_field}"), bson);
}
} else {
target.insert(outer_field, bson);
}
}
target
}

View File

@@ -1,18 +1,24 @@
use std::time::{Duration, Instant};
use anyhow::{anyhow, Context, Ok};
use formatting::{bold, colored, muted, Color};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::execute::Execution,
entities::{
procedure::Procedure, update::Update, user::procedure_user,
procedure::Procedure,
update::{Log, Update},
user::procedure_user,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{api::execute::ExecuteRequest, state::State};
use crate::{
api::execute::ExecuteRequest,
state::{db_client, State},
};
use super::update::{init_execution_update, update_update};
@@ -49,8 +55,7 @@ pub async fn execute_procedure(
.await
.with_context(|| {
format!(
"{}: failed stage '{}' execution after {:?}",
colored("ERROR", Color::Red),
"failed stage '{}' execution after {:?}",
bold(&stage.name),
timer.elapsed(),
)
@@ -130,10 +135,15 @@ async fn execute_execution(
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunProcedure")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunProcedure"),
&update_id,
)
.await?
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
@@ -141,10 +151,15 @@ async fn execute_execution(
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunBuild")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunBuild"),
&update_id,
)
.await?
}
Execution::Deploy(req) => {
let req = ExecuteRequest::Deploy(req);
@@ -152,10 +167,15 @@ async fn execute_execution(
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at Deploy")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at Deploy"),
&update_id,
)
.await?
}
Execution::StartContainer(req) => {
let req = ExecuteRequest::StartContainer(req);
@@ -163,10 +183,15 @@ async fn execute_execution(
let ExecuteRequest::StartContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StartContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StartContainer"),
&update_id,
)
.await?
}
Execution::StopContainer(req) => {
let req = ExecuteRequest::StopContainer(req);
@@ -174,10 +199,15 @@ async fn execute_execution(
let ExecuteRequest::StopContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopContainer"),
&update_id,
)
.await?
}
Execution::StopAllContainers(req) => {
let req = ExecuteRequest::StopAllContainers(req);
@@ -185,10 +215,15 @@ async fn execute_execution(
let ExecuteRequest::StopAllContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopAllContainers")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopAllContainers"),
&update_id,
)
.await?
}
Execution::RemoveContainer(req) => {
let req = ExecuteRequest::RemoveContainer(req);
@@ -196,10 +231,15 @@ async fn execute_execution(
let ExecuteRequest::RemoveContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer"),
&update_id,
)
.await?
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
@@ -207,10 +247,15 @@ async fn execute_execution(
let ExecuteRequest::CloneRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo"),
&update_id,
)
.await?
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
@@ -218,10 +263,15 @@ async fn execute_execution(
let ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo"),
&update_id,
)
.await?
}
Execution::PruneNetworks(req) => {
let req = ExecuteRequest::PruneNetworks(req);
@@ -229,10 +279,15 @@ async fn execute_execution(
let ExecuteRequest::PruneNetworks(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneNetworks")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneNetworks"),
&update_id,
)
.await?
}
Execution::PruneImages(req) => {
let req = ExecuteRequest::PruneImages(req);
@@ -240,10 +295,15 @@ async fn execute_execution(
let ExecuteRequest::PruneImages(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneImages")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneImages"),
&update_id,
)
.await?
}
Execution::PruneContainers(req) => {
let req = ExecuteRequest::PruneContainers(req);
@@ -251,10 +311,15 @@ async fn execute_execution(
let ExecuteRequest::PruneContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers"),
&update_id,
)
.await?
}
Execution::RunSync(req) => {
let req = ExecuteRequest::RunSync(req);
@@ -262,10 +327,15 @@ async fn execute_execution(
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunSync")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunSync"),
&update_id,
)
.await?
}
Execution::Sleep(req) => {
tokio::time::sleep(Duration::from_millis(
@@ -289,6 +359,30 @@ async fn execute_execution(
}
}
/// If the call to .resolve returns Err, the update may not be closed.
/// This will ensure it is closed with error log attached.
async fn handle_resolve_result(
res: anyhow::Result<Update>,
update_id: &str,
) -> anyhow::Result<Update> {
match res {
Ok(res) => Ok(res),
Err(e) => {
let log =
Log::error("execution error", format_serror(&e.into()));
let mut update =
find_one_by_id(&db_client().await.updates, update_id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
}
/// ASSUMES FIRST LOG IS ALREADY CREATED
#[instrument(level = "debug")]
async fn add_line_to_update(update: &Mutex<Update>, line: &str) {

View File

@@ -68,12 +68,9 @@ async fn prune_stats() -> anyhow::Result<()> {
let res = db_client()
.await
.stats
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} stats from db", res.deleted_count);
Ok(())
@@ -89,12 +86,9 @@ async fn prune_alerts() -> anyhow::Result<()> {
let res = db_client()
.await
.alerts
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} alerts from db", res.deleted_count);
Ok(())

View File

@@ -11,11 +11,11 @@ use monitor_client::entities::{
tag::Tag,
update::{ResourceTargetVariant, Update},
user::{admin_service_user, User},
user_group::UserGroup,
variable::Variable,
Operation,
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
@@ -26,14 +26,18 @@ use mungos::{
use crate::{config::core_config, resource, state::db_client};
#[instrument(level = "debug")]
pub async fn get_user(user_id: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user_id) {
// user: Id or username
pub async fn get_user(user: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user) {
return Ok(user);
}
find_one_by_id(&db_client().await.users, user_id)
db_client()
.await
.users
.find_one(id_or_username_filter(user))
.await
.context("failed to query mongo for user")?
.with_context(|| format!("no user found with id {user_id}"))
.with_context(|| format!("no user found with {user}"))
}
#[instrument(level = "debug")]
@@ -89,7 +93,7 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
db_client()
.await
.tags
.find_one(query, None)
.find_one(query)
.await
.context("failed to query mongo for tag")?
.with_context(|| format!("no tag found matching {id_or_name}"))
@@ -120,10 +124,10 @@ pub async fn get_id_to_tags(
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
pub async fn get_user_user_groups(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = find_collect(
) -> anyhow::Result<Vec<UserGroup>> {
find_collect(
&db_client().await.user_groups,
doc! {
"users": user_id
@@ -131,50 +135,84 @@ pub async fn get_user_user_group_ids(
None,
)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|ug| ug.id)
.collect();
.context("failed to query db for user groups")
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = get_user_user_groups(user_id)
.await?
.into_iter()
.map(|ug| ug.id)
.collect();
Ok(res)
}
/// Returns Vec of all queries on permissions that match against the user
/// or any user groups that the user is a part of.
/// Result used with Mongodb '$or'.
#[instrument(level = "debug")]
pub async fn user_target_query(
pub fn user_target_query(
user_id: &str,
user_groups: &[UserGroup],
) -> anyhow::Result<Vec<Document>> {
let mut user_target_query = vec![
doc! { "user_target.type": "User", "user_target.id": user_id },
];
let user_groups = get_user_user_group_ids(user_id)
.await?
.into_iter()
.map(|ug_id| {
doc! {
"user_target.type": "UserGroup", "user_target.id": ug_id,
}
});
let user_groups = user_groups.iter().map(|ug| {
doc! {
"user_target.type": "UserGroup", "user_target.id": &ug.id,
}
});
user_target_query.extend(user_groups);
Ok(user_target_query)
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource(
user_id: &str,
user: &User,
resource_variant: ResourceTargetVariant,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
let lowest_permission = if core_config().transparent_mode {
if user.admin {
return Ok(PermissionLevel::Write);
}
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_variant).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_variant).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_variant.as_ref(),
"resource_target.id": resource_id
},
@@ -184,7 +222,7 @@ pub async fn get_user_permission_on_resource(
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(lowest_permission, |level, permission| {
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
@@ -194,15 +232,39 @@ pub async fn get_user_permission_on_resource(
Ok(permission)
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_non_admin(
user_id: &str,
pub async fn get_resource_ids_for_user(
user: &User,
resource_type: ResourceTargetVariant,
) -> anyhow::Result<Vec<String>> {
let permissions = find_collect(
) -> anyhow::Result<Option<Vec<ObjectId>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
}
// Check user 'all' on variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
}
// Get specific ids
let ids = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
@@ -213,8 +275,12 @@ pub async fn get_resource_ids_for_non_admin(
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
Ok(permissions.into_iter().collect())
.collect::<HashSet<_>>()
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
Ok(Some(ids))
}
pub fn id_or_name_filter(id_or_name: &str) -> Document {
@@ -224,6 +290,13 @@ pub fn id_or_name_filter(id_or_name: &str) -> Document {
}
}
pub fn id_or_username_filter(id_or_username: &str) -> Document {
match ObjectId::from_str(id_or_username) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": id_or_username },
}
}
pub async fn get_global_variables(
) -> anyhow::Result<HashMap<String, String>> {
Ok(
@@ -240,7 +313,7 @@ pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
db_client()
.await
.variables
.find_one(doc! { "name": &name }, None)
.find_one(doc! { "name": &name })
.await
.context("failed at call to db")?
.with_context(|| {
@@ -256,12 +329,12 @@ pub async fn get_latest_update(
db_client()
.await
.updates
.find_one(
doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
},
.find_one(doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -4,7 +4,7 @@ use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::{execute::Deploy, read::GetBuildVersions},
api::{execute::Deploy, read::ListBuildVersions},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
@@ -423,7 +423,7 @@ fn extract_to_deploy_and_state<'a>(
// Needs to only check config fields that affect docker run
let changed = diff.server_id.is_some()
|| diff.image.is_some()
|| diff.image_registry.is_some()
|| diff.image_registry_account.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.network.is_some()
|| diff.restart.is_some()
@@ -467,7 +467,7 @@ fn extract_to_deploy_and_state<'a>(
None => {
let Some(version) = State
.resolve(
GetBuildVersions {
ListBuildVersions {
build: build_id.to_string(),
limit: Some(1),
..Default::default()
@@ -849,7 +849,7 @@ impl ResourceSync for Deployment {
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: version.clone(),
version: *version,
};
}

View File

@@ -19,20 +19,27 @@ pub async fn get_remote_resources(
String,
)> {
let name = to_monitor_name(&sync.name);
let clone_args: CloneArgs = sync.into();
let mut clone_args: CloneArgs = sync.into();
let config = core_config();
let github_token = clone_args
.github_account
.as_ref()
.map(|account| {
config.github_accounts.get(account).ok_or_else(|| {
anyhow!("did not find github token for account {account}")
let access_token = match (&clone_args.account, &clone_args.provider) {
(None, _) => None,
(Some(_), None) => return Err(anyhow!("Account is configured, but provider is empty")),
(Some(username), Some(provider)) => config
.git_providers
.iter()
.find(|_provider| {
&_provider.domain == provider
})
})
.transpose()?
.cloned();
.and_then(|provider| {
clone_args.https = provider.https;
provider.accounts.iter().find(|account| &account.username == username).map(|account| &account.token)
})
.with_context(|| format!("did not find git token for account {username} | provider: {provider}"))?
.to_owned()
.into(),
};
fs::create_dir_all(&config.sync_directory)
.context("failed to create sync directory")?;
@@ -44,7 +51,7 @@ pub async fn get_remote_resources(
let _lock = lock.lock().await;
let mut logs =
git::clone(clone_args, &config.sync_directory, github_token)
git::clone(clone_args, &config.sync_directory, access_token)
.await
.context("failed to clone resource repo")?;

View File

@@ -7,18 +7,19 @@ use monitor_client::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
UpdatePermissionOnResourceType, UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
permission::{PermissionLevel, UserTarget},
sync::SyncUpdate,
toml::{PermissionToml, UserGroupToml},
update::{Log, ResourceTarget},
update::{Log, ResourceTarget, ResourceTargetVariant},
user::sync_user,
},
};
use mungos::find::find_collect;
use regex::Regex;
use resolver_api::Resolve;
use crate::state::{db_client, State};
@@ -28,7 +29,7 @@ use super::resource::AllResourcesById;
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
}
pub struct DeleteItem {
@@ -72,19 +73,49 @@ pub async fn get_updates_for_view(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
if user_group.all.is_empty() {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
} else {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("base permissions"),
user_group.all,
muted("permissions"),
user_group.permissions,
));
}
continue;
}
};
@@ -107,6 +138,7 @@ pub async fn get_updates_for_view(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -185,22 +217,27 @@ pub async fn get_updates_for_view(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_all = !all_diff.is_empty();
let update_permissions =
user_group.permissions != original_permissions;
// only add log after diff detected
if update_users || update_permissions {
if update_users || update_all || update_permissions {
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
bold(&user_group.name),
));
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
@@ -230,12 +267,36 @@ pub async fn get_updates_for_view(
muted("adding"),
))
}
if update_all {
let updates = all_diff
.into_iter()
.map(|(variant, (orig, incoming))| {
format!(
"{}: {} {} {}",
bold(variant),
colored(orig, Color::Red),
muted("->"),
colored(incoming, Color::Green)
)
})
.collect::<Vec<_>>()
.join("\n");
lines.push(format!(
"{}: 'base permission'\n{updates}",
muted("field"),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
// add if original has no exising permission on the target
!original_permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
@@ -244,10 +305,35 @@ pub async fn get_updates_for_view(
} else {
colored(&adding.join(", "), Color::Green)
};
let updating = user_group
.permissions
.iter()
.filter(|permission| {
// update if original has exising permission on the target with different level
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
return false;
};
permission.level != level
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let updating = if updating.is_empty() {
String::from("None")
} else {
colored(&updating.join(", "), Color::Blue)
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
// remove if incoming has no permission on the target
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
@@ -257,12 +343,14 @@ pub async fn get_updates_for_view(
colored(&removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"{}: 'permissions'\n{}: {removing}\n{}: {updating}\n{}: {adding}",
muted("field"),
muted("removing"),
muted("updating"),
muted("adding"),
))
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
@@ -326,6 +414,22 @@ pub async fn get_updates_for_execution(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
@@ -352,6 +456,7 @@ pub async fn get_updates_for_execution(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -430,19 +535,55 @@ pub async fn get_updates_for_execution(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
// Extend permissions with any existing that have no target in incoming
let to_remove = original_permissions
.iter()
.filter(|permission| {
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| PermissionToml {
target: permission.target.clone(),
level: PermissionLevel::None,
})
.collect::<Vec<_>>();
user_group.permissions.extend(to_remove);
// remove any permissions that already exist on original
user_group.permissions.retain(|permission| {
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
// not in original, keep it
return true;
};
// keep it if level doesn't match
level != permission.level
});
// only push update after diff detected
if update_users
|| !all_diff.is_empty()
|| !user_group.permissions.is_empty()
{
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
all_diff: all_diff
.into_iter()
.map(|(k, (_, v))| (k, v))
.collect(),
});
}
}
@@ -516,6 +657,13 @@ pub async fn run_updates(
&mut has_error,
)
.await;
run_update_all(
user_group.name.clone(),
user_group.all,
&mut log,
&mut has_error,
)
.await;
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -529,7 +677,7 @@ pub async fn run_updates(
for UpdateItem {
user_group,
update_users,
update_permissions,
all_diff,
} in to_update
{
if update_users {
@@ -541,7 +689,16 @@ pub async fn run_updates(
)
.await;
}
if update_permissions {
if !all_diff.is_empty() {
run_update_all(
user_group.name.clone(),
all_diff,
&mut log,
&mut has_error,
)
.await;
}
if !user_group.permissions.is_empty() {
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -616,6 +773,41 @@ async fn set_users(
}
}
async fn run_update_all(
user_group: String,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
log: &mut String,
has_error: &mut bool,
) {
for (resource_type, permission) in all_diff {
if let Err(e) = State
.resolve(
UpdatePermissionOnResourceType {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_type,
permission,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set base permissions on {resource_type} in group {} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' base permissions on {resource_type}",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
@@ -636,17 +828,188 @@ async fn run_update_permissions(
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set permssion in group {} | target: {target:?} | {e:#}",
"\n{}: failed to set permission in group {} | target: {target:?} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' permissions",
"\n{}: {} user group '{}' permissions | {}: {target:?} | {}: {level}",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
bold(&user_group),
muted("target"),
muted("level")
))
}
}
}
/// Expands any regex defined targets into the full list
async fn expand_user_group_permissions(
permissions: Vec<PermissionToml>,
all_resources: &AllResourcesById,
) -> anyhow::Result<Vec<PermissionToml>> {
let mut expanded =
Vec::<PermissionToml>::with_capacity(permissions.capacity());
for permission in permissions {
let (variant, id) = permission.target.extract_variant_id();
if id.is_empty() {
continue;
}
if id.starts_with('\\') && id.ends_with('\\') {
let inner = &id[1..(id.len() - 1)];
let regex = Regex::new(inner)
.with_context(|| format!("invalid regex. got: {inner}"))?;
match variant {
ResourceTargetVariant::Build => {
let permissions = all_resources
.builds
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Build(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Builder => {
let permissions = all_resources
.builders
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Builder(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Deployment => {
let permissions = all_resources
.deployments
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Deployment(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Server => {
let permissions = all_resources
.servers
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Server(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Repo => {
let permissions = all_resources
.repos
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Repo(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Alerter => {
let permissions = all_resources
.alerters
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Alerter(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Procedure => {
let permissions = all_resources
.procedures
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Procedure(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ServerTemplate => {
let permissions = all_resources
.templates
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ServerTemplate(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ResourceSync => {
let permissions = all_resources
.syncs
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ResourceSync(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::System => {}
}
} else {
// No regex
expanded.push(permission);
}
}
Ok(expanded)
}
type AllDiff =
HashMap<ResourceTargetVariant, (PermissionLevel, PermissionLevel)>;
/// diffs user_group.all
fn diff_group_all(
original: &HashMap<ResourceTargetVariant, PermissionLevel>,
incoming: &HashMap<ResourceTargetVariant, PermissionLevel>,
) -> AllDiff {
let mut to_update = HashMap::new();
// need to compare both forward and backward because either hashmap could be sparse.
// forward direction
for (variant, level) in incoming {
let original_level = original.get(variant).unwrap_or_default();
if level == original_level {
continue;
}
to_update.insert(*variant, (*original_level, *level));
}
// backward direction
for (variant, level) in original {
let incoming_level = incoming.get(variant).unwrap_or_default();
if level == incoming_level {
continue;
}
to_update.insert(*variant, (*level, *incoming_level));
}
to_update
}

View File

@@ -45,7 +45,7 @@ pub async fn add_update(
update.id = db_client()
.await
.updates
.insert_one(&update, None)
.insert_one(&update)
.await
.context("failed to insert update into db")?
.inserted_id

View File

@@ -1,425 +0,0 @@
use std::sync::{Arc, OnceLock};
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use monitor_client::{
api::{execute, write::RefreshResourceSyncPending},
entities::{
build::Build, procedure::Procedure, repo::Repo,
sync::ResourceSync, user::github_user,
},
};
use resolver_api::Resolve;
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{
cache::Cache, random_duration, update::init_execution_update,
},
resource,
state::State,
};
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunBuild(
execute::RunBuild { build: build_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::CloneRepo(
execute::CloneRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(
execute::PullRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunProcedure(
execute::RunProcedure {
procedure: procedure_id,
},
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunProcedure(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::RunSync(execute::RunSync {
sync: sync_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().github_webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}

View File

@@ -0,0 +1,51 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunBuild,
entities::{build::Build, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,204 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{cache::Cache, random_duration},
};
mod build;
mod procedure;
mod repo;
mod sync;
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = build::handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = repo::handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = repo::handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = procedure::handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = sync::handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = sync::handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;

View File

@@ -0,0 +1,55 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunProcedure,
entities::{procedure::Procedure, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure_id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,86 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::{CloneRepo, PullRepo},
entities::{repo::Repo, user::github_user},
};
use resolver_api::Resolve;
use crate::{
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
pub async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
pub async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,78 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{sync::ResourceSync, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}
pub async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
pub async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -74,7 +74,7 @@ pub async fn alert_deployments(
return;
}
send_alerts(&alerts).await;
let res = db_client().await.alerts.insert_many(alerts, None).await;
let res = db_client().await.alerts.insert_many(alerts).await;
if let Err(e) = res {
error!("failed to record deployment status alerts to db | {e:#}");
}

View File

@@ -42,7 +42,7 @@ pub async fn alert_servers(
let mut alerts_to_open = Vec::<(Alert, SendAlerts)>::new();
let mut alerts_to_update = Vec::<(Alert, SendAlerts)>::new();
let mut alert_ids_to_close = Vec::<(String, SendAlerts)>::new();
let mut alert_ids_to_close = Vec::<(Alert, SendAlerts)>::new();
for server_status in server_statuses {
let Some(server) = servers.remove(&server_status.id) else {
@@ -101,13 +101,10 @@ pub async fn alert_servers(
}
// Close an open alert
(
ServerState::Ok | ServerState::Disabled,
Some(health_alert),
) => alert_ids_to_close.push((
health_alert.id.clone(),
server.info.send_unreachable_alerts,
)),
(ServerState::Ok | ServerState::Disabled, Some(alert)) => {
alert_ids_to_close
.push((alert.clone(), server.info.send_unreachable_alerts));
}
_ => {}
}
@@ -149,8 +146,8 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
// modify alert level
if alert.level != health.cpu {
// modify alert level only if it has increased
if alert.level < health.cpu {
alert.level = health.cpu;
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
@@ -165,8 +162,20 @@ pub async fn alert_servers(
alerts_to_update.push((alert, server.info.send_cpu_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_cpu_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_cpu_alerts))
}
_ => {}
}
@@ -185,7 +194,7 @@ pub async fn alert_servers(
ts,
resolved: false,
resolved_ts: None,
level: health.cpu,
level: health.mem,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerMem {
id: server_status.id.clone(),
@@ -209,7 +218,8 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
if alert.level != health.mem {
// modify alert level only if it has increased
if alert.level < health.mem {
alert.level = health.mem;
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
@@ -229,8 +239,25 @@ pub async fn alert_servers(
alerts_to_update.push((alert, server.info.send_mem_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_mem_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
total_gb: server_status
.stats
.as_ref()
.map(|s| s.mem_total_gb)
.unwrap_or(0.0),
used_gb: server_status
.stats
.as_ref()
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_mem_alerts))
}
_ => {}
}
@@ -273,6 +300,7 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
// Disk is persistent, update alert if health changes regardless of direction
if *health != alert.level {
let disk =
server_status.stats.as_ref().and_then(|stats| {
@@ -291,8 +319,23 @@ pub async fn alert_servers(
.push((alert, server.info.send_disk_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_disk_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alert_ids_to_close
.push((alert, server.info.send_disk_alerts))
}
_ => {}
}
}
@@ -316,7 +359,7 @@ async fn open_alerts(alerts: &[(Alert, SendAlerts)]) {
let open = || async {
let ids = db
.alerts
.insert_many(alerts.iter().map(|(alert, _)| alert), None)
.insert_many(alerts.iter().map(|(alert, _)| alert))
.await?
.inserted_ids
.into_iter()
@@ -402,22 +445,20 @@ async fn update_alerts(alerts: &[(Alert, SendAlerts)]) {
}
#[instrument(level = "debug")]
async fn resolve_alerts(alert_ids: &[(String, SendAlerts)]) {
if alert_ids.is_empty() {
async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
if alerts.is_empty() {
return;
}
let send_alerts_map =
alert_ids.iter().cloned().collect::<HashMap<_, _>>();
let close = || async {
let alert_ids = alert_ids
let close = || async move {
let alert_ids = alerts
.iter()
.map(|(id, _)| {
ObjectId::from_str(id)
.map(|(alert, _)| {
ObjectId::from_str(&alert.id)
.context("failed to convert alert id to ObjectId")
})
.collect::<anyhow::Result<Vec<_>>>()?;
db_client()
.await
.alerts
@@ -429,31 +470,25 @@ async fn resolve_alerts(alert_ids: &[(String, SendAlerts)]) {
"resolved_ts": monitor_timestamp()
}
},
None,
)
.await
.context("failed to resolve alerts on db")?;
let mut closed = find_collect(
&db_client().await.alerts,
doc! { "_id": { "$in": &alert_ids } },
None,
)
.await
.context("failed to get closed alerts from db")?;
.context("failed to resolve alerts on db")
.inspect_err(|e| warn!("{e:#}"))
.ok();
for closed in &mut closed {
closed.level = SeverityLevel::Ok;
}
let ts = monitor_timestamp();
let closed = closed
.into_iter()
.filter(|closed| {
if let ResourceTarget::Server(id) = &closed.target {
send_alerts_map.get(id).cloned().unwrap_or(true)
} else {
error!("got resource target other than server in resolve_server_alerts");
true
}
let closed = alerts
.iter()
.filter(|(_, send)| *send)
.map(|(alert, _)| {
let mut alert = alert.clone();
alert.resolved = true;
alert.resolved_ts = Some(ts);
alert.level = SeverityLevel::Ok;
alert
})
.collect::<Vec<_>>();

View File

@@ -30,8 +30,7 @@ pub async fn record_server_stats(ts: i64) {
})
.collect::<Vec<_>>();
if !records.is_empty() {
let res =
db_client().await.stats.insert_many(records, None).await;
let res = db_client().await.stats.insert_many(records).await;
if let Err(e) = res {
error!("failed to record server stats | {e:#}");
}

View File

@@ -53,6 +53,7 @@ impl super::MonitorResource for Build {
info: BuildListItemInfo {
last_built_at: build.info.last_built_at,
version: build.config.version,
git_provider: build.config.git_provider,
repo: build.config.repo,
branch: build.config.branch,
state,

View File

@@ -137,7 +137,6 @@ impl super::MonitorResource for Builder {
mungos::update::Update::Set(
doc! { "config.builder.params.builder_id": "" },
),
None,
)
.await
.context("failed to update_many builds on database")?;

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::entities::{
build::Build,
deployment::{
@@ -16,7 +17,6 @@ use monitor_client::entities::{
};
use mungos::mongodb::Collection;
use periphery_client::api::container::RemoveContainer;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -187,10 +187,12 @@ impl super::MonitorResource for Deployment {
Err(e) => {
update.push_error_log(
"remove container",
format!(
"failed to retrieve server at {} from db.\n\nerror: {}",
deployment.config.server_id,
serialize_error_pretty(&e)
format_serror(
&e.context(format!(
"failed to retrieve server at {} from db.",
deployment.config.server_id
))
.into(),
),
);
return Ok(());
@@ -211,9 +213,8 @@ impl super::MonitorResource for Deployment {
// Leaving it for completeness sake
update.push_error_log(
"remove container",
format!(
"failed to remove container on periphery.\n\nerror: {}",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to get periphery client").into(),
),
);
return Ok(());
@@ -230,9 +231,8 @@ impl super::MonitorResource for Deployment {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"remove container",
format!(
"failed to remove container.\n\nerror: {}",
serialize_error_pretty(&e)
format_serror(
&e.context("failed to remove container").into(),
),
),
};
@@ -270,7 +270,7 @@ async fn validate_config(
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
config.image = Some(DeploymentImage::Build {
build_id: build.id,
version: version.clone(),
version: *version,
});
}
}

View File

@@ -1,6 +1,7 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::write::CreateTag,
@@ -26,14 +27,13 @@ use mungos::{
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use serde::{de::DeserializeOwned, Serialize};
use serror::serialize_error_pretty;
use crate::{
config::core_config,
helpers::{
create_permission,
create_permission, flatten_document,
query::{
get_resource_ids_for_non_admin, get_tag,
get_resource_ids_for_user, get_tag,
get_user_permission_on_resource, id_or_name_filter,
},
update::{add_update, make_update},
@@ -187,7 +187,7 @@ pub async fn get<T: MonitorResource>(
) -> anyhow::Result<Resource<T::Config, T::Info>> {
T::coll()
.await
.find_one(id_or_name_filter(id_or_name), None)
.find_one(id_or_name_filter(id_or_name))
.await
.context("failed to query db for resource")?
.with_context(|| {
@@ -211,7 +211,7 @@ pub async fn get_check_permissions<T: MonitorResource>(
return Ok(resource);
}
let permissions = get_user_permission_on_resource(
&user.id,
user,
T::resource_type(),
&resource.id,
)
@@ -265,13 +265,9 @@ async fn list_full_for_user_using_document<T: MonitorResource>(
mut filters: Document,
user: &User,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
if !user.admin && !core_config().transparent_mode {
let ids =
get_resource_ids_for_non_admin(&user.id, T::resource_type())
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
if let Some(ids) =
get_resource_ids_for_user(user, T::resource_type()).await?
{
filters.insert("_id", doc! { "$in": ids });
}
find_collect(
@@ -336,7 +332,7 @@ pub async fn create<T: MonitorResource>(
let resource_id = T::coll()
.await
.insert_one(&resource, None)
.insert_one(&resource)
.await
.with_context(|| {
format!("failed to add {} to db", T::resource_type())
@@ -422,10 +418,12 @@ pub async fn update<T: MonitorResource>(
let config_doc = T::update_document(resource, config)
.context("failed to serialize config to bson document")?;
let update_doc = flatten_document(doc! { "config": config_doc });
update_one_by_id(
T::coll().await,
&id,
mungos::update::Update::FlattenSet(doc! { "config": config_doc }),
doc! { "$set": update_doc },
None,
)
.await
@@ -487,7 +485,6 @@ pub async fn update_description<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "description": description } },
None,
)
.await?;
Ok(())
@@ -522,7 +519,6 @@ pub async fn update_tags<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "tags": tags } },
None,
)
.await?;
Ok(())
@@ -533,7 +529,7 @@ pub async fn remove_tag_from_all<T: MonitorResource>(
) -> anyhow::Result<()> {
T::coll()
.await
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } }, None)
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
.await
.context("failed to remove tag from resources")?;
Ok(())
@@ -580,7 +576,7 @@ pub async fn delete<T: MonitorResource>(
);
if let Err(e) = T::post_delete(&resource, &mut update).await {
update.push_error_log("post delete", serialize_error_pretty(&e));
update.push_error_log("post delete", format_serror(&e.into()));
}
update.finalize();
@@ -612,13 +608,10 @@ where
if let Err(e) = db_client()
.await
.permissions
.delete_many(
doc! {
"resource_target.type": variant.as_ref(),
"resource_target.id": &id
},
None,
)
.delete_many(doc! {
"resource_target.type": variant.as_ref(),
"resource_target.id": &id
})
.await
{
warn!("failed to delete_many permissions matching target {target:?} | {e:#}");
@@ -650,7 +643,6 @@ where
recent_field: id
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")

View File

@@ -337,12 +337,12 @@ async fn get_procedure_state_from_db(id: &str) -> ProcedureState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
},
.find_one(doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -1,6 +1,7 @@
use std::time::Duration;
use anyhow::Context;
use formatting::format_serror;
use monitor_client::entities::{
permission::PermissionLevel,
repo::{
@@ -18,7 +19,6 @@ use mungos::{
mongodb::{bson::doc, options::FindOneOptions, Collection},
};
use periphery_client::api::git::DeleteRepo;
use serror::serialize_error_pretty;
use crate::{
helpers::periphery_client,
@@ -60,6 +60,7 @@ impl super::MonitorResource for Repo {
info: RepoListItemInfo {
server_id: repo.config.server_id,
last_pulled_at: repo.info.last_pulled_at,
git_provider: repo.config.git_provider,
repo: repo.config.repo,
branch: repo.config.branch,
state,
@@ -158,7 +159,7 @@ impl super::MonitorResource for Repo {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"delete repo on periphery",
serialize_error_pretty(&e),
format_serror(&e.context("failed to delete repo").into()),
),
}
@@ -244,15 +245,15 @@ async fn get_repo_state_from_db(id: &str) -> RepoState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
},
.find_one(doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -130,7 +130,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.params.server_id": &id },
doc! { "$set": { "config.params.server_id": "" } },
None,
)
.await
.context("failed to detach server from builders")?;
@@ -139,7 +138,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from deployments")?;
@@ -148,7 +146,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from repos")?;
@@ -160,7 +157,6 @@ impl super::MonitorResource for Server {
"resolved": true,
"resolved_ts": monitor_timestamp()
} },
None,
)
.await
.context("failed to detach server from repos")?;

View File

@@ -54,6 +54,7 @@ impl super::MonitorResource for ResourceSync {
tags: resource_sync.tags,
resource_type: ResourceTargetVariant::ResourceSync,
info: ResourceSyncListItemInfo {
git_provider: resource_sync.config.git_provider,
repo: resource_sync.config.repo,
branch: resource_sync.config.branch,
last_sync_ts: resource_sync.info.last_sync_ts,
@@ -213,12 +214,12 @@ async fn get_resource_sync_state_from_db(
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
},
.find_one(doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -1,10 +1,20 @@
use std::sync::{Arc, OnceLock};
use std::{
collections::HashMap,
sync::{Arc, OnceLock},
};
use anyhow::{anyhow, Context};
use monitor_client::entities::{
build::BuildState, deployment::DeploymentState,
procedure::ProcedureState, repo::RepoState,
build::BuildState,
config::core::{CoreConfig, GithubWebhookAppConfig},
deployment::DeploymentState,
procedure::ProcedureState,
repo::RepoState,
sync::ResourceSyncState,
};
use octorust::auth::{
Credentials, InstallationTokenGenerator, JWTCredentials,
};
use tokio::sync::{Mutex, OnceCell};
use crate::{
@@ -36,6 +46,63 @@ pub fn jwt_client() -> &'static JwtClient {
JWT_CLIENT.get_or_init(|| JwtClient::new(core_config()))
}
pub fn github_client(
) -> Option<&'static HashMap<String, octorust::Client>> {
static GITHUB_CLIENT: OnceLock<
Option<HashMap<String, octorust::Client>>,
> = OnceLock::new();
GITHUB_CLIENT
.get_or_init(|| {
let CoreConfig {
github_webhook_app:
GithubWebhookAppConfig {
app_id,
installations,
pk_path,
..
},
..
} = core_config();
if *app_id == 0 || installations.is_empty() {
return None;
}
let private_key = std::fs::read(pk_path)
.context("github webhook app | failed to load private key")
.unwrap();
let private_key = nom_pem::decode_block(&private_key)
.map_err(|e| anyhow!("{e:?}"))
.context("github webhook app | failed to decode private key")
.unwrap();
let jwt = JWTCredentials::new(*app_id, private_key.data)
.context(
"github webhook app | failed to make github JWTCredentials",
)
.unwrap();
let mut clients =
HashMap::with_capacity(installations.capacity());
for installation in installations {
let token_generator = InstallationTokenGenerator::new(
installation.id,
jwt.clone(),
);
let client = octorust::Client::new(
"github-app",
Credentials::InstallationToken(token_generator),
)
.context("failed to initialize github client")
.unwrap();
clients.insert(installation.namespace.to_string(), client);
}
Some(clients)
})
.as_ref()
}
pub fn action_states() -> &'static ActionStates {
static ACTION_STATES: OnceLock<ActionStates> = OnceLock::new();
ACTION_STATES.get_or_init(ActionStates::default)

View File

@@ -208,7 +208,7 @@ async fn user_can_see_update(
}
let (variant, id) = update_target.extract_variant_id();
let permissions =
get_user_permission_on_resource(&user.id, variant, id).await?;
get_user_permission_on_resource(user, variant, id).await?;
if permissions > PermissionLevel::None {
Ok(())
} else {

16
bin/migrator/Dockerfile Normal file
View File

@@ -0,0 +1,16 @@
FROM rust:1.79.0-bookworm AS builder
WORKDIR /builder
COPY . .
RUN cargo build -p migrator --release
# Final Image
FROM gcr.io/distroless/cc-debian12
COPY --from=builder /builder/target/release/migrator /
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="Database migrator for Monitor version upgrades"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./migrator"]

View File

@@ -1,5 +1,39 @@
# Migrator
Upgrade data between periphery versions.
Performs schema changes on the Monitor database
Supports v0.x -> v1.x migration.
## v1.7 - v1.11 migration
Run this before upgrading to latest from versions 1.7 to 1.11.
```sh
docker run --rm --name monitor-migrator \
--env MIGRATION="v1.11" \
--env TARGET_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env TARGET_DB_NAME="<DB_NAME>" \
ghcr.io/mbecker20/monitor_migrator
```
## v1.0 - v1.6 migration
Run this before upgrading to latest from versions 1.0 to 1.6.
```sh
docker run --rm --name monitor-migrator \
--env MIGRATION="v1.6" \
--env TARGET_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env TARGET_DB_NAME="<DB_NAME>" \
ghcr.io/mbecker20/monitor_migrator
```
## v0.X migration
Run this before upgrading to latest from version 0.X.
Note. As this is a major upgrade, this migration is not "in place".
It will create another database (TARGET) and migrate resources over, leaving the original database (LEGACY) unchanged.
```sh
docker run --rm --name monitor-migrator \
--env MIGRATION="v0" \
--env TARGET_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env TARGET_DB_NAME="<TARGET_DB_NAME>" \
--env LEGACY_URI="mongodb://<USERNAME>:<PASSWORD>@<ADDRESS>" \
--env LEGACY_DB_NAME="<LEGACY_DB_NAME>" \
ghcr.io/mbecker20/monitor_migrator
```

View File

@@ -1,3 +1,4 @@
#[allow(unused)]
pub mod v0;
pub mod v1_11;
pub mod v1_6;

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use monitor_client::entities::build::{
BuildConfig, BuildInfo, CloudRegistryConfig, ImageRegistry,
BuildConfig, BuildInfo, ImageRegistry, StandardRegistryConfig,
};
use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id;
use serde::{Deserialize, Serialize};
@@ -198,11 +198,6 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
id: value.id,
name: value.name,
description: value.description,
// permissions: value
// .permissions
// .into_iter()
// .map(|(id, p)| (id, p.into()))
// .collect(),
updated_at: unix_from_monitor_ts(&value.updated_at)?,
tags: Vec::new(),
info: BuildInfo {
@@ -212,11 +207,14 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
builder_id: String::new(),
skip_secret_interp: value.skip_secret_interp,
version: value.version.into(),
git_provider: String::from("github.com"),
git_https: true,
repo: value.repo.unwrap_or_default(),
branch: value.branch.unwrap_or_default(),
github_account: value.github_account.unwrap_or_default(),
image_registry: ImageRegistry::DockerHub(
CloudRegistryConfig {
git_account: value.github_account.unwrap_or_default(),
image_registry: ImageRegistry::Standard(
StandardRegistryConfig {
domain: String::from("docker.io"),
account: value.docker_account.unwrap_or_default(),
organization: value
.docker_organization
@@ -233,6 +231,7 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
build_path,
dockerfile_path,
build_args,
secret_args: Default::default(),
extra_args,
use_buildx,
labels: Default::default(),

View File

@@ -1,5 +1,5 @@
use monitor_client::entities::{
build::{CloudRegistryConfig, ImageRegistry},
build::{ImageRegistry, StandardRegistryConfig},
NoData,
};
use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id;
@@ -378,15 +378,10 @@ impl TryFrom<Deployment>
.post_image
.unwrap_or_default(),
extra_args: value.docker_run_args.extra_args,
image_registry: match value.docker_run_args.docker_account {
Some(account) => {
ImageRegistry::DockerHub(CloudRegistryConfig {
account,
..Default::default()
})
}
None => ImageRegistry::None(NoData {}),
},
image_registry_account: value
.docker_run_args
.docker_account
.unwrap_or_default(),
labels: Default::default(),
},
};

View File

@@ -106,11 +106,8 @@ impl TryFrom<User> for monitor_client::entities::user::User {
create_server_permissions: value.create_server_permissions,
create_build_permissions: value.create_build_permissions,
last_update_view: Default::default(),
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
updated_at: unix_from_monitor_ts(&value.updated_at)?,
};
Ok(user)

View File

@@ -0,0 +1,253 @@
use monitor_client::entities::{
build::StandardRegistryConfig, EnvironmentVar, NoData,
SystemCommand, Version, I64,
};
use serde::{Deserialize, Serialize};
use super::resource::Resource;
pub type Build = Resource<BuildConfig, BuildInfo>;
impl From<Build> for monitor_client::entities::build::Build {
fn from(value: Build) -> Self {
monitor_client::entities::build::Build {
id: value.id,
name: value.name,
description: value.description,
updated_at: value.updated_at,
tags: value.tags,
info: monitor_client::entities::build::BuildInfo {
last_built_at: value.info.last_built_at,
},
config: value.config.into(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct BuildInfo {
pub last_built_at: I64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BuildConfig {
/// Which builder is used to build the image.
#[serde(default, alias = "builder")]
pub builder_id: String,
/// The current version of the build.
#[serde(default)]
pub version: Version,
/// The Github repo used as the source of the build.
#[serde(default)]
pub repo: String,
/// The branch of the repo.
#[serde(default = "default_branch")]
pub branch: String,
/// Optionally set a specific commit hash.
#[serde(default)]
pub commit: String,
/// The github account used to clone (used to access private repos).
/// Empty string is public clone (only public repos).
#[serde(default)]
pub github_account: String,
/// The optional command run after repo clone and before docker build.
#[serde(default)]
pub pre_build: SystemCommand,
/// Configuration for the registry to push the built image to.
#[serde(default)]
pub image_registry: ImageRegistry,
/// The path of the docker build context relative to the root of the repo.
/// Default: "." (the root of the repo).
#[serde(default = "default_build_path")]
pub build_path: String,
/// The path of the dockerfile relative to the build path.
#[serde(default = "default_dockerfile_path")]
pub dockerfile_path: String,
/// Whether to skip secret interpolation in the build_args.
#[serde(default)]
pub skip_secret_interp: bool,
/// Whether to use buildx to build (eg `docker buildx build ...`)
#[serde(default)]
pub use_buildx: bool,
/// Whether incoming webhooks actually trigger action.
#[serde(default = "default_webhook_enabled")]
pub webhook_enabled: bool,
/// Any extra docker cli arguments to be included in the build command
#[serde(default)]
pub extra_args: Vec<String>,
/// Docker build arguments.
///
/// These values are visible in the final image by running `docker inspect`.
#[serde(
default,
deserialize_with = "monitor_client::entities::env_vars_deserializer"
)]
pub build_args: Vec<EnvironmentVar>,
/// Secret arguments.
///
/// These values remain hidden in the final image by using
/// docker secret mounts. See `<https://docs.docker.com/build/building/secrets>`.
///
/// The values can be used in RUN commands:
/// ```
/// RUN --mount=type=secret,id=SECRET_KEY \
/// SECRET_KEY=$(cat /run/secrets/SECRET_KEY) ...
/// ```
#[serde(
default,
deserialize_with = "monitor_client::entities::env_vars_deserializer"
)]
pub secret_args: Vec<EnvironmentVar>,
/// Docker labels
#[serde(
default,
deserialize_with = "monitor_client::entities::env_vars_deserializer"
)]
pub labels: Vec<EnvironmentVar>,
}
impl From<BuildConfig>
for monitor_client::entities::build::BuildConfig
{
fn from(value: BuildConfig) -> Self {
monitor_client::entities::build::BuildConfig {
builder_id: value.builder_id,
skip_secret_interp: value.skip_secret_interp,
version: monitor_client::entities::Version {
major: value.version.major,
minor: value.version.minor,
patch: value.version.patch,
},
git_provider: String::from("github.com"),
git_https: true,
repo: value.repo,
branch: value.branch,
commit: value.commit,
git_account: value.github_account,
pre_build: monitor_client::entities::SystemCommand {
path: value.pre_build.path,
command: value.pre_build.command,
},
build_path: value.build_path,
dockerfile_path: value.dockerfile_path,
build_args: value
.build_args
.into_iter()
.map(Into::into)
.collect(),
secret_args: Default::default(),
labels: value.labels.into_iter().map(Into::into).collect(),
extra_args: value.extra_args,
use_buildx: value.use_buildx,
webhook_enabled: value.webhook_enabled,
image_registry: value.image_registry.into(),
}
}
}
fn default_branch() -> String {
String::from("main")
}
fn default_build_path() -> String {
String::from(".")
}
fn default_dockerfile_path() -> String {
String::from("Dockerfile")
}
fn default_webhook_enabled() -> bool {
true
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
#[serde(tag = "type", content = "params")]
pub enum ImageRegistry {
/// Don't push the image to any registry
None(NoData),
/// Push the image to DockerHub
DockerHub(CloudRegistryConfig),
/// Push the image to the Github Container Registry.
///
/// See [the Github docs](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#pushing-container-images)
/// for information on creating an access token
Ghcr(CloudRegistryConfig),
/// Push the image to Aws Elastic Container Registry
///
/// The string held in 'params' should match a label of an `aws_ecr_registry` in the core config.
AwsEcr(String),
/// Todo. Will point to a custom "Registry" resource by id
Custom(String),
}
impl Default for ImageRegistry {
fn default() -> Self {
Self::None(NoData {})
}
}
impl From<ImageRegistry>
for monitor_client::entities::build::ImageRegistry
{
fn from(value: ImageRegistry) -> Self {
match value {
ImageRegistry::None(_) | ImageRegistry::Custom(_) => {
monitor_client::entities::build::ImageRegistry::None(
NoData {},
)
}
ImageRegistry::DockerHub(params) => {
monitor_client::entities::build::ImageRegistry::Standard(
StandardRegistryConfig {
domain: String::from("docker.io"),
account: params.account,
organization: params.organization,
},
)
}
ImageRegistry::Ghcr(params) => {
monitor_client::entities::build::ImageRegistry::Standard(
StandardRegistryConfig {
domain: String::from("ghcr.io"),
account: params.account,
organization: params.organization,
},
)
}
ImageRegistry::AwsEcr(label) => {
monitor_client::entities::build::ImageRegistry::AwsEcr(label)
}
}
}
}
#[derive(
Debug, Clone, Default, PartialEq, Serialize, Deserialize,
)]
pub struct CloudRegistryConfig {
/// Specify an account to use with the cloud registry.
#[serde(default)]
pub account: String,
/// Optional. Specify an organization to push the image under.
/// Empty string means no organization.
#[serde(default)]
pub organization: String,
}

View File

@@ -0,0 +1,167 @@
use monitor_client::entities::{
deployment::{
conversions_deserializer, term_labels_deserializer, Conversion,
DeploymentImage, RestartMode, TerminationSignal,
TerminationSignalLabel,
},
env_vars_deserializer, EnvironmentVar,
};
use serde::{Deserialize, Serialize};
use super::{build::ImageRegistry, resource::Resource};
pub type Deployment = Resource<DeploymentConfig, ()>;
impl From<Deployment>
for monitor_client::entities::deployment::Deployment
{
fn from(value: Deployment) -> Self {
monitor_client::entities::deployment::Deployment {
id: value.id,
name: value.name,
description: value.description,
updated_at: value.updated_at,
tags: value.tags,
info: (),
config: value.config.into(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct DeploymentConfig {
/// The id of server the deployment is deployed on.
#[serde(default, alias = "server")]
pub server_id: String,
/// The image which the deployment deploys.
/// Can either be a user inputted image, or a Monitor build.
#[serde(default)]
pub image: DeploymentImage,
/// Configure the registry used to pull the image from the registry.
/// Used with `docker login`.
///
/// When using attached build as image source:
/// - If the field is `None` variant, will use the same ImageRegistry config as the build.
/// - Otherwise, it must match the variant of the ImageRegistry build config.
/// - Only the account is used, the organization is not needed here
#[serde(default)]
pub image_registry: ImageRegistry,
/// Whether to skip secret interpolation into the deployment environment variables.
#[serde(default)]
pub skip_secret_interp: bool,
/// Whether to redeploy the deployment whenever the attached build finishes.
#[serde(default)]
pub redeploy_on_build: bool,
/// Whether to send ContainerStateChange alerts for this deployment.
#[serde(default = "default_send_alerts")]
pub send_alerts: bool,
/// The network attached to the container.
/// Default is `host`.
#[serde(default = "default_network")]
pub network: String,
/// The restart mode given to the container.
#[serde(default)]
pub restart: RestartMode,
/// This is interpolated at the end of the `docker run` command,
/// which means they are either passed to the containers inner process,
/// or replaces the container command, depending on use of ENTRYPOINT or CMD in dockerfile.
/// Empty is no command.
#[serde(default)]
pub command: String,
/// The default termination signal to use to stop the deployment. Defaults to SigTerm (default docker signal).
#[serde(default)]
pub termination_signal: TerminationSignal,
/// The termination timeout.
#[serde(default = "default_termination_timeout")]
pub termination_timeout: i32,
/// Extra args which are interpolated into the `docker run` command,
/// and affect the container configuration.
#[serde(default)]
pub extra_args: Vec<String>,
/// Labels attached to various termination signal options.
/// Used to specify different shutdown functionality depending on the termination signal.
#[serde(
default = "default_term_signal_labels",
deserialize_with = "term_labels_deserializer"
)]
pub term_signal_labels: Vec<TerminationSignalLabel>,
/// The container port mapping.
/// Irrelevant if container network is `host`.
/// Maps ports on host to ports on container.
#[serde(default, deserialize_with = "conversions_deserializer")]
pub ports: Vec<Conversion>,
/// The container volume mapping.
/// Maps files / folders on host to files / folders in container.
#[serde(default, deserialize_with = "conversions_deserializer")]
pub volumes: Vec<Conversion>,
/// The environment variables passed to the container.
#[serde(default, deserialize_with = "env_vars_deserializer")]
pub environment: Vec<EnvironmentVar>,
/// The docker labels given to the container.
#[serde(default, deserialize_with = "env_vars_deserializer")]
pub labels: Vec<EnvironmentVar>,
}
fn default_send_alerts() -> bool {
true
}
fn default_term_signal_labels() -> Vec<TerminationSignalLabel> {
vec![TerminationSignalLabel::default()]
}
fn default_termination_timeout() -> i32 {
10
}
fn default_network() -> String {
String::from("host")
}
impl From<DeploymentConfig>
for monitor_client::entities::deployment::DeploymentConfig
{
fn from(value: DeploymentConfig) -> Self {
monitor_client::entities::deployment::DeploymentConfig {
server_id: value.server_id,
image: value.image,
image_registry_account: match value.image_registry {
ImageRegistry::None(_)
| ImageRegistry::AwsEcr(_)
| ImageRegistry::Custom(_) => String::new(),
ImageRegistry::DockerHub(params) => params.account,
ImageRegistry::Ghcr(params) => params.account,
},
skip_secret_interp: value.skip_secret_interp,
redeploy_on_build: value.redeploy_on_build,
send_alerts: value.send_alerts,
network: value.network,
restart: value.restart,
command: value.command,
termination_signal: value.termination_signal,
termination_timeout: value.termination_timeout,
extra_args: value.extra_args,
term_signal_labels: value.term_signal_labels,
ports: value.ports,
volumes: value.volumes,
environment: value.environment,
labels: value.labels,
}
}
}

View File

@@ -0,0 +1,48 @@
use mungos::{init::MongoBuilder, mongodb::Collection};
use serde::{Deserialize, Serialize};
pub mod build;
pub mod deployment;
pub mod resource;
pub struct DbClient {
pub builds: Collection<build::Build>,
pub deployments: Collection<deployment::Deployment>,
}
impl DbClient {
pub async fn new(
legacy_uri: &str,
legacy_db_name: &str,
) -> DbClient {
let client = MongoBuilder::default()
.uri(legacy_uri)
.build()
.await
.expect("failed to init legacy mongo client");
let db = client.database(legacy_db_name);
DbClient {
builds: db.collection("Build"),
deployments: db.collection("Deployment"),
}
}
}
#[derive(
Serialize, Deserialize, Debug, Clone, Default, PartialEq,
)]
pub struct Version {
pub major: i32,
pub minor: i32,
pub patch: i32,
}
#[derive(
Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq,
)]
pub struct SystemCommand {
#[serde(default)]
pub path: String,
#[serde(default)]
pub command: String,
}

View File

@@ -0,0 +1,54 @@
use mungos::mongodb::bson::serde_helpers::hex_string_as_object_id;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Resource<Config, Info: Default = ()> {
/// The Mongo ID of the resource.
/// This field is de/serialized from/to JSON as
/// `{ "_id": { "$oid": "..." }, ...(rest of serialized Resource<T>) }`
#[serde(
default,
rename = "_id",
skip_serializing_if = "String::is_empty",
with = "hex_string_as_object_id"
)]
pub id: String,
/// The resource name.
/// This is guaranteed unique among others of the same resource type.
pub name: String,
/// A description for the resource
#[serde(default)]
pub description: String,
/// When description last updated
#[serde(default)]
pub updated_at: i64,
/// Tag Ids
#[serde(default)]
pub tags: Vec<String>,
/// Resource-specific information (not user configurable).
#[serde(default)]
pub info: Info,
/// Resource-specific configuration.
pub config: Config,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ResourceListItem<Info> {
/// The resource id
pub id: String,
/// The resource type, ie `Server` or `Deployment`
// #[serde(rename = "type")]
// pub resource_type: ResourceTargetVariant,
/// The resource name
pub name: String,
/// Tag Ids
pub tags: Vec<String>,
/// Resource specific info
pub info: Info,
}

View File

@@ -1,5 +1,5 @@
use monitor_client::entities::{
build::{CloudRegistryConfig, ImageRegistry},
build::{ImageRegistry, StandardRegistryConfig},
NoData,
};
use serde::{Deserialize, Serialize};
@@ -119,10 +119,12 @@ impl From<BuildConfig>
minor: value.version.minor,
patch: value.version.patch,
},
git_provider: String::from("github.com"),
git_https: true,
repo: value.repo,
branch: value.branch,
commit: value.commit,
github_account: value.github_account,
git_account: value.github_account,
pre_build: monitor_client::entities::SystemCommand {
path: value.pre_build.path,
command: value.pre_build.command,
@@ -134,13 +136,15 @@ impl From<BuildConfig>
.into_iter()
.map(Into::into)
.collect(),
secret_args: Default::default(),
labels: value.labels.into_iter().map(Into::into).collect(),
extra_args: value.extra_args,
use_buildx: value.use_buildx,
image_registry: if value.docker_account.is_empty() {
ImageRegistry::None(NoData {})
} else {
ImageRegistry::DockerHub(CloudRegistryConfig {
ImageRegistry::Standard(StandardRegistryConfig {
domain: String::from("docker.io"),
account: value.docker_account,
organization: value.docker_organization,
})

View File

@@ -1,10 +1,6 @@
use monitor_client::entities::{
build::{CloudRegistryConfig, ImageRegistry},
deployment::{
Conversion, DeploymentImage, RestartMode, TerminationSignal,
TerminationSignalLabel,
},
NoData,
use monitor_client::entities::deployment::{
Conversion, DeploymentImage, RestartMode, TerminationSignal,
TerminationSignalLabel,
};
use serde::{Deserialize, Serialize};
@@ -118,14 +114,7 @@ impl From<DeploymentConfig>
server_id: value.server_id,
send_alerts: value.send_alerts,
image: value.image,
image_registry: if value.docker_account.is_empty() {
ImageRegistry::None(NoData {})
} else {
ImageRegistry::DockerHub(CloudRegistryConfig {
account: value.docker_account,
..Default::default()
})
},
image_registry_account: value.docker_account,
skip_secret_interp: value.skip_secret_interp,
redeploy_on_build: value.redeploy_on_build,
term_signal_labels: value.term_signal_labels,

View File

@@ -12,14 +12,18 @@ mod legacy;
mod migrate;
#[derive(Deserialize)]
enum AppMode {
enum Migration {
#[serde(alias = "v0")]
V0,
#[serde(alias = "v1.6")]
V1_6,
#[serde(alias = "v1.11")]
V1_11,
}
#[derive(Deserialize)]
struct Env {
app_mode: AppMode,
migration: Migration,
legacy_uri: String,
legacy_db_name: String,
target_uri: String,
@@ -35,8 +39,8 @@ async fn main() -> anyhow::Result<()> {
let env: Env = envy::from_env()?;
match env.app_mode {
AppMode::V0 => {
match env.migration {
Migration::V0 => {
let legacy_db = legacy::v0::DbClient::new(
&env.legacy_uri,
&env.legacy_db_name,
@@ -46,7 +50,7 @@ async fn main() -> anyhow::Result<()> {
DbClient::new(&env.target_uri, &env.target_db_name).await?;
migrate::v0::migrate_all(&legacy_db, &target_db).await?
}
AppMode::V1_6 => {
Migration::V1_6 => {
let db = legacy::v1_6::DbClient::new(
&env.target_uri,
&env.target_db_name,
@@ -54,6 +58,14 @@ async fn main() -> anyhow::Result<()> {
.await;
migrate::v1_6::migrate_all_in_place(&db).await?
}
Migration::V1_11 => {
let db = legacy::v1_11::DbClient::new(
&env.target_uri,
&env.target_db_name,
)
.await;
migrate::v1_11::migrate_all_in_place(&db).await?
}
}
info!("finished!");

View File

@@ -1,2 +1,3 @@
pub mod v0;
pub mod v1_11;
pub mod v1_6;

View File

@@ -7,7 +7,9 @@ use monitor_client::entities::{
update::{ResourceTarget, Update},
user::User,
};
use mungos::find::find_collect;
use mungos::{
find::find_collect, mongodb::options::InsertManyOptions,
};
use crate::legacy::v0;
@@ -28,13 +30,22 @@ pub async fn migrate_users(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.users, None, None)
.await
.context("failed to get existing target users")?;
let users = find_collect(&legacy_db.users, None, None)
.await
.context("failed to get legacy users")?
.into_iter()
.filter_map(|s| {
let username = s.username.clone();
s.try_into()
.filter_map(|user| {
if existing.iter().any(|u| u.username == user.username) {
return None;
}
let username = user.username.clone();
user
.try_into()
.inspect_err(|e| {
warn!("failed to convert user {username} | {e:#}")
})
@@ -46,7 +57,7 @@ pub async fn migrate_users(
target_db
.users
.insert_many(users, None)
.insert_many(users)
.await
.context("failed to insert users on target")?;
@@ -59,6 +70,10 @@ pub async fn migrate_servers(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.servers, None, None)
.await
.context("failed to get existing target servers")?;
let servers = find_collect(&legacy_db.servers, None, None)
.await
.context("failed to get legacy servers")?;
@@ -67,6 +82,10 @@ pub async fn migrate_servers(
let mut permissions = Vec::<Permission>::new();
for server in servers {
if existing.iter().any(|s| s.name == server.name) {
continue;
}
for (user_id, level) in &server.permissions {
let permission = Permission {
id: Default::default(),
@@ -91,7 +110,10 @@ pub async fn migrate_servers(
if !new_servers.is_empty() {
target_db
.servers
.insert_many(new_servers, None)
.insert_many(new_servers)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert servers on target")?;
}
@@ -99,7 +121,10 @@ pub async fn migrate_servers(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert server permissions on target")?;
}
@@ -113,6 +138,10 @@ pub async fn migrate_deployments(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.deployments, None, None)
.await
.context("failed to get existing target deployments")?;
let deployments = find_collect(&legacy_db.deployments, None, None)
.await
.context("failed to get legacy deployments")?;
@@ -121,6 +150,10 @@ pub async fn migrate_deployments(
let mut permissions = Vec::<Permission>::new();
for deployment in deployments {
if existing.iter().any(|d| d.name == deployment.name) {
continue;
}
for (user_id, level) in &deployment.permissions {
let permission = Permission {
id: Default::default(),
@@ -147,7 +180,10 @@ pub async fn migrate_deployments(
if !new_deployments.is_empty() {
target_db
.deployments
.insert_many(new_deployments, None)
.insert_many(new_deployments)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert deployments on target")?;
}
@@ -155,7 +191,10 @@ pub async fn migrate_deployments(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert deployment permissions on target")?;
}
@@ -169,6 +208,10 @@ pub async fn migrate_builds(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.builds, None, None)
.await
.context("failed to get existing target builds")?;
let builds = find_collect(&legacy_db.builds, None, None)
.await
.context("failed to get legacy builds")?;
@@ -177,6 +220,10 @@ pub async fn migrate_builds(
let mut permissions = Vec::<Permission>::new();
for build in builds {
if existing.iter().any(|b| b.name == build.name) {
continue;
}
for (user_id, level) in &build.permissions {
let permission = Permission {
id: Default::default(),
@@ -201,17 +248,29 @@ pub async fn migrate_builds(
if !new_builds.is_empty() {
target_db
.builds
.insert_many(new_builds, None)
.insert_many(new_builds)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert builds on target")?;
.inspect_err(|e| {
warn!("failed to insert builds on target | {e}")
})
.ok();
}
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert build permissions on target")?;
.inspect_err(|e| {
warn!("failed to insert build permissions on target | {e}")
})
.ok();
}
info!("builds have been migrated\n");
@@ -239,7 +298,8 @@ pub async fn migrate_updates(
target_db
.updates
.insert_many(updates, None)
.insert_many(updates)
.with_options(InsertManyOptions::builder().ordered(false).build())
.await
.context("failed to insert updates on target")?;

View File

@@ -0,0 +1,72 @@
use anyhow::Context;
use monitor_client::entities::{
build::Build, deployment::Deployment,
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, to_document},
};
use crate::legacy::v1_11;
pub async fn migrate_all_in_place(
db: &v1_11::DbClient,
) -> anyhow::Result<()> {
migrate_builds_in_place(db).await?;
migrate_deployments_in_place(db).await?;
Ok(())
}
pub async fn migrate_builds_in_place(
db: &v1_11::DbClient,
) -> anyhow::Result<()> {
let builds = find_collect(&db.builds, None, None)
.await
.context("failed to get builds")?
.into_iter()
.map(Into::into)
.collect::<Vec<Build>>();
info!("migrating {} builds...", builds.len());
for build in builds {
db.builds
.update_one(
doc! { "name": &build.name },
doc! { "$set": to_document(&build)? },
)
.await
.context("failed to insert builds on target")?;
}
info!("builds have been migrated\n");
Ok(())
}
pub async fn migrate_deployments_in_place(
db: &v1_11::DbClient,
) -> anyhow::Result<()> {
let deployments = find_collect(&db.deployments, None, None)
.await
.context("failed to get deployments")?
.into_iter()
.map(Into::into)
.collect::<Vec<Deployment>>();
info!("migrating {} deployments...", deployments.len());
for deployment in deployments {
db.deployments
.update_one(
doc! { "name": &deployment.name },
doc! { "$set": to_document(&deployment)? },
)
.await
.context("failed to insert deployments on target")?;
}
info!("deployments have been migrated\n");
Ok(())
}

View File

@@ -34,7 +34,6 @@ pub async fn migrate_deployments_in_place(
.update_one(
doc! { "name": &deployment.name },
doc! { "$set": to_document(&deployment)? },
None,
)
.await
.context("failed to insert deployments on target")?;
@@ -62,7 +61,6 @@ pub async fn migrate_builds_in_place(
.update_one(
doc! { "name": &build.name },
doc! { "$set": to_document(&build)? },
None,
)
.await
.context("failed to insert builds on target")?;

View File

@@ -17,28 +17,29 @@ path = "src/main.rs"
# local
monitor_client = { workspace = true, features = ["docker"] }
periphery_client.workspace = true
formatting.workspace = true
command.workspace = true
logger.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
async_timing_util.workspace = true
merge_config_files.workspace = true
parse_csl.workspace = true
run_command.workspace = true
svi.workspace = true
async_timing_util.workspace = true
resolver_api.workspace = true
run_command.workspace = true
parse_csl.workspace = true
svi.workspace = true
# external
tokio.workspace = true
axum.workspace = true
axum-extra.workspace = true
dotenv.workspace = true
envy.workspace = true
serde.workspace = true
serde_json.workspace = true
anyhow.workspace = true
bollard.workspace = true
clap.workspace = true
tracing.workspace = true
uuid.workspace = true
bollard.workspace = true
sysinfo.workspace = true
dotenv.workspace = true
anyhow.workspace = true
tokio.workspace = true
serde.workspace = true
axum.workspace = true
envy.workspace = true
clap.workspace = true
uuid.workspace = true

View File

@@ -1,31 +1,200 @@
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
server::docker_image::ImageSummary, update::Log,
};
use periphery_client::api::build::{
Build, GetImageList, PruneImages,
build::{Build, BuildConfig},
get_image_name, optional_string,
server::docker_image::ImageSummary,
to_monitor_name,
update::Log,
EnvironmentVar, Version,
};
use periphery_client::api::build::{self, GetImageList, PruneImages};
use resolver_api::Resolve;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
impl Resolve<Build> for State {
#[instrument(name = "Build", skip(self, replacers))]
impl Resolve<build::Build> for State {
#[instrument(name = "Build", skip_all)]
async fn resolve(
&self,
Build {
build::Build {
build,
aws_ecr,
registry_token,
replacers,
}: Build,
replacers: core_replacers,
}: build::Build,
_: (),
) -> anyhow::Result<Vec<Log>> {
docker::build::build(&build, registry_token, replacers).await
let Build {
name,
config:
BuildConfig {
version,
skip_secret_interp,
build_path,
dockerfile_path,
build_args,
secret_args,
labels,
extra_args,
use_buildx,
image_registry,
..
},
..
} = &build;
let mut logs = Vec::new();
// Maybe docker login
let should_push = match docker_login(
image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await
{
Ok(should_push) => should_push,
Err(e) => {
logs.push(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
return Ok(logs);
}
};
let name = to_monitor_name(name);
// Get paths
let build_dir =
periphery_config().repo_dir.join(&name).join(build_path);
let dockerfile_path = match optional_string(dockerfile_path) {
Some(dockerfile_path) => dockerfile_path.to_owned(),
None => "Dockerfile".to_owned(),
};
// Get command parts
let image_name = get_image_name(&build, |_| aws_ecr)
.context("failed to make image name")?;
let build_args = parse_build_args(build_args);
let _secret_args =
parse_secret_args(secret_args, *skip_secret_interp)?;
let labels = parse_labels(labels);
let extra_args = parse_extra_args(extra_args);
let buildx = if *use_buildx { " buildx" } else { "" };
let image_tags = image_tags(&image_name, version);
let push_command = should_push
.then(|| {
format!(" && docker image push --all-tags {image_name}")
})
.unwrap_or_default();
// Construct command
let command = format!(
"cd {} && docker{buildx} build{build_args}{_secret_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
build_dir.display()
);
if *skip_secret_interp {
let build_log =
run_monitor_command("docker build", command).await;
info!("finished building docker image");
logs.push(build_log);
} else {
// Interpolate any missing secrets
let (command, mut replacers) = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker build command",
)?;
replacers.extend(core_replacers);
let mut build_log =
run_monitor_command("docker build", command).await;
build_log.command =
svi::replace_in_string(&build_log.command, &replacers);
build_log.stdout =
svi::replace_in_string(&build_log.stdout, &replacers);
build_log.stderr =
svi::replace_in_string(&build_log.stderr, &replacers);
logs.push(build_log);
}
cleanup_secret_env_vars(secret_args);
Ok(logs)
}
}
fn image_tags(image_name: &str, version: &Version) -> String {
let Version { major, minor, .. } = version;
format!(
" -t {image_name}:latest -t {image_name}:{version} -t {image_name}:{major}.{minor} -t {image_name}:{major}",
)
}
fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
build_args
.iter()
.map(|p| format!(" --build-arg {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_secret_args(
secret_args: &[EnvironmentVar],
skip_secret_interp: bool,
) -> anyhow::Result<String> {
let periphery_config = periphery_config();
Ok(
secret_args
.iter()
.map(|EnvironmentVar { variable, value }| {
if variable.is_empty() {
return Err(anyhow!("secret variable cannot be empty string"))
} else if variable.contains('=') {
return Err(anyhow!("invalid variable {variable}. variable cannot contain '='"))
}
let value = if skip_secret_interp {
value.to_string()
} else {
svi::interpolate_variables(
value,
&periphery_config.secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate periphery secrets into build secrets",
)?.0
};
std::env::set_var(variable, value);
anyhow::Ok(format!(" --secret id={variable}"))
})
.collect::<anyhow::Result<Vec<_>>>()?
.join(""),
)
}
fn cleanup_secret_env_vars(secret_args: &[EnvironmentVar]) {
secret_args.iter().for_each(
|EnvironmentVar { variable, .. }| std::env::remove_var(variable),
)
}
//
impl Resolve<GetImageList> for State {
@@ -48,6 +217,7 @@ impl Resolve<PruneImages> for State {
_: PruneImages,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::build::prune_images().await)
let command = String::from("docker image prune -a -f");
Ok(run_monitor_command("prune images", command).await)
}
}

View File

@@ -1,13 +1,25 @@
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
deployment::{ContainerSummary, DockerContainerStats},
build::{ImageRegistry, StandardRegistryConfig},
deployment::{
extract_registry_domain, ContainerSummary, Conversion,
Deployment, DeploymentConfig, DeploymentImage,
DockerContainerStats, RestartMode, TerminationSignal,
},
to_monitor_name,
update::Log,
EnvironmentVar, NoData, SearchCombinator,
};
use periphery_client::api::container::*;
use resolver_api::Resolve;
use run_command::async_run_command;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
@@ -34,10 +46,11 @@ impl Resolve<GetContainerLog> for State {
#[instrument(name = "GetContainerLog", level = "debug", skip(self))]
async fn resolve(
&self,
req: GetContainerLog,
GetContainerLog { name, tail }: GetContainerLog,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::container_log(&req.name, req.tail).await)
let command = format!("docker logs {name} --tail {tail}");
Ok(run_monitor_command("get container log", command).await)
}
}
@@ -59,12 +72,21 @@ impl Resolve<GetContainerLogSearch> for State {
}: GetContainerLogSearch,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::container_log_search(
&name, &terms, combinator, invert,
)
.await,
)
let maybe_invert = invert.then_some(" -v").unwrap_or_default();
let grep = match combinator {
SearchCombinator::Or => {
format!("grep{maybe_invert} -E '{}'", terms.join("|"))
}
SearchCombinator::And => {
format!(
"grep{maybe_invert} -P '^(?=.*{})'",
terms.join(")(?=.*")
)
}
};
let command =
format!("docker logs {name} --tail 5000 2>&1 | {grep}");
Ok(run_monitor_command("get container log grep", command).await)
}
}
@@ -82,8 +104,7 @@ impl Resolve<GetContainerStats> for State {
_: (),
) -> anyhow::Result<DockerContainerStats> {
let error = anyhow!("no stats matching {}", req.name);
let mut stats =
docker::container::container_stats(Some(req.name)).await?;
let mut stats = container_stats(Some(req.name)).await?;
let stats = stats.pop().ok_or(error)?;
Ok(stats)
}
@@ -102,7 +123,7 @@ impl Resolve<GetContainerStatsList> for State {
_: GetContainerStatsList,
_: (),
) -> anyhow::Result<Vec<DockerContainerStats>> {
docker::container::container_stats(None).await
container_stats(None).await
}
}
@@ -112,10 +133,16 @@ impl Resolve<StartContainer> for State {
#[instrument(name = "StartContainer", skip(self))]
async fn resolve(
&self,
req: StartContainer,
StartContainer { name }: StartContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::start_container(&req.name).await)
Ok(
run_monitor_command(
"docker start",
format!("docker start {name}"),
)
.await,
)
}
}
@@ -125,15 +152,26 @@ impl Resolve<StopContainer> for State {
#[instrument(name = "StopContainer", skip(self))]
async fn resolve(
&self,
req: StopContainer,
StopContainer { name, signal, time }: StopContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_container(
&req.name, req.signal, req.time,
)
.await,
)
let command = stop_container_command(&name, signal, time);
let log = run_monitor_command("docker stop", command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(&name, None, time);
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -143,15 +181,31 @@ impl Resolve<RemoveContainer> for State {
#[instrument(name = "RemoveContainer", skip(self))]
async fn resolve(
&self,
req: RemoveContainer,
RemoveContainer { name, signal, time }: RemoveContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_and_remove_container(
&req.name, req.signal, req.time,
)
.await,
)
let stop_command = stop_container_command(&name, signal, time);
let command =
format!("{stop_command} && docker container rm {name}");
let log =
run_monitor_command("docker stop and remove", command).await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command = stop_container_command(&name, None, time);
let command =
format!("{stop_command} && docker container rm {name}");
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -161,16 +215,15 @@ impl Resolve<RenameContainer> for State {
#[instrument(name = "RenameContainer", skip(self))]
async fn resolve(
&self,
req: RenameContainer,
RenameContainer {
curr_name,
new_name,
}: RenameContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::rename_container(
&req.curr_name,
&req.new_name,
)
.await,
)
let new = to_monitor_name(&new_name);
let command = format!("docker rename {curr_name} {new}");
Ok(run_monitor_command("docker rename", command).await)
}
}
@@ -183,14 +236,18 @@ impl Resolve<PruneContainers> for State {
_: PruneContainers,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::prune_containers().await)
let command = String::from("docker container prune -f");
Ok(run_monitor_command("prune containers", command).await)
}
}
//
impl Resolve<Deploy> for State {
#[instrument(name = "Deploy", skip(self, replacers))]
#[instrument(
name = "Deploy",
skip(self, core_replacers, aws_ecr, registry_token)
)]
async fn resolve(
&self,
Deploy {
@@ -198,22 +255,218 @@ impl Resolve<Deploy> for State {
stop_signal,
stop_time,
registry_token,
replacers,
replacers: core_replacers,
aws_ecr,
}: Deploy,
_: (),
) -> anyhow::Result<Log> {
let res = docker::container::deploy(
&deployment,
stop_signal
.unwrap_or(deployment.config.termination_signal)
.into(),
stop_time
.unwrap_or(deployment.config.termination_timeout)
.into(),
registry_token,
replacers,
let image = if let DeploymentImage::Image { image } =
&deployment.config.image
{
if image.is_empty() {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
}
image
} else {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
};
let image_registry = if aws_ecr.is_some() {
ImageRegistry::AwsEcr(String::new())
} else if deployment.config.image_registry_account.is_empty() {
ImageRegistry::None(NoData {})
} else {
ImageRegistry::Standard(StandardRegistryConfig {
account: deployment.config.image_registry_account.clone(),
domain: extract_registry_domain(image)?,
..Default::default()
})
};
if let Err(e) = docker_login(
&image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await;
Ok(res)
.await
{
return Ok(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
}
let _ = pull_image(image).await;
debug!("image pulled");
let _ = State
.resolve(
RemoveContainer {
name: deployment.name.clone(),
signal: stop_signal,
time: stop_time,
},
(),
)
.await;
debug!("container stopped and removed");
let command = docker_run_command(&deployment, image);
debug!("docker run command: {command}");
if deployment.config.skip_secret_interp {
Ok(run_monitor_command("docker run", command).await)
} else {
let command = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker run command",
);
if let Err(e) = command {
return Ok(Log::error("docker run", format!("{e:?}")));
}
let (command, mut replacers) = command.unwrap();
replacers.extend(core_replacers);
let mut log = run_monitor_command("docker run", command).await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
log.stderr = svi::replace_in_string(&log.stderr, &replacers);
Ok(log)
}
}
}
//
fn docker_run_command(
Deployment {
name,
config:
DeploymentConfig {
volumes,
ports,
network,
command,
restart,
environment,
labels,
extra_args,
..
},
..
}: &Deployment,
image: &str,
) -> String {
let name = to_monitor_name(name);
let ports = parse_conversions(ports, "-p");
let volumes = volumes.to_owned();
let volumes = parse_conversions(&volumes, "-v");
let network = parse_network(network);
let restart = parse_restart(restart);
let environment = parse_environment(environment);
let labels = parse_labels(labels);
let command = parse_command(command);
let extra_args = parse_extra_args(extra_args);
format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}")
}
fn parse_conversions(
conversions: &[Conversion],
flag: &str,
) -> String {
conversions
.iter()
.map(|p| format!(" {flag} {}:{}", p.local, p.container))
.collect::<Vec<_>>()
.join("")
}
fn parse_environment(environment: &[EnvironmentVar]) -> String {
environment
.iter()
.map(|p| format!(" --env {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_network(network: &str) -> String {
format!(" --network {network}")
}
fn parse_restart(restart: &RestartMode) -> String {
let restart = match restart {
RestartMode::OnFailure => "on-failure:10".to_string(),
_ => restart.to_string(),
};
format!(" --restart {restart}")
}
fn parse_command(command: &str) -> String {
if command.is_empty() {
String::new()
} else {
format!(" {command}")
}
}
//
async fn container_stats(
container_name: Option<String>,
) -> anyhow::Result<Vec<DockerContainerStats>> {
let format = "--format \"{{ json . }}\"";
let container_name = match container_name {
Some(name) => format!(" {name}"),
None => "".to_string(),
};
let command =
format!("docker stats{container_name} --no-stream {format}");
let output = async_run_command(&command).await;
if output.success() {
let res = output
.stdout
.split('\n')
.filter(|e| !e.is_empty())
.map(|e| {
let parsed = serde_json::from_str(e)
.context(format!("failed at parsing entry {e}"))?;
Ok(parsed)
})
.collect::<anyhow::Result<Vec<DockerContainerStats>>>()?;
Ok(res)
} else {
Err(anyhow!("{}", output.stderr.replace('\n', "")))
}
}
#[instrument]
async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_monitor_command("docker pull", command).await
}
fn stop_container_command(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> String {
let container_name = to_monitor_name(container_name);
let signal = signal
.map(|signal| format!(" --signal {signal}"))
.unwrap_or_default();
let time = time
.map(|time| format!(" --time {time}"))
.unwrap_or_default();
format!("docker stop{signal}{time} {container_name}")
}

View File

@@ -8,7 +8,7 @@ use periphery_client::api::git::{
use resolver_api::Resolve;
use crate::{
config::periphery_config, helpers::get_github_token, State,
config::periphery_config, helpers::get_git_token, State,
};
impl Resolve<GetLatestCommit, ()> for State {
@@ -31,22 +31,29 @@ impl Resolve<CloneRepo> for State {
#[instrument(name = "CloneRepo", skip(self))]
async fn resolve(
&self,
CloneRepo { args, github_token }: CloneRepo,
CloneRepo { args, git_token }: CloneRepo,
_: (),
) -> anyhow::Result<Vec<Log>> {
let CloneArgs { github_account, .. } = &args;
let github_token = match (github_account, github_token) {
(None, _) => None,
(Some(_), Some(token)) => Some(token),
(Some(account), None) => Some(
get_github_token(account)
.context(
"failed to get github token from periphery config",
let CloneArgs {
provider, account, ..
} = &args;
let token = match (account, provider, git_token) {
(None, _, _) => None,
(Some(_), None, _) => {
return Err(anyhow!(
"got incoming git account but no git provider"
))
}
(Some(_), Some(_), Some(token)) => Some(token),
(Some(account), Some(provider), None) => Some(
get_git_token(provider, account)
.with_context(
|| format!("failed to get git token from periphery config | provider: {provider} | account: {account}")
)?
.clone(),
),
};
git::clone(args, &periphery_config().repo_dir, github_token).await
git::clone(args, &periphery_config().repo_dir, token).await
}
}

Some files were not shown because too many files have changed in this diff Show More