Compare commits

...

148 Commits

Author SHA1 Message Date
Maxwell Becker
8c6f38cafb v1.11 Improve permission management (#6)
* add "all permissions" feature on user and user group schema

* prepare support for group all

* implement user.all and user_group.all for broad base permissioning

* clean up unused deps

* sync support user group permissions regex

* 1.11

* fix fe ? issue

* this doesn't work

* sync handle user group all set

* retain above non earlier

* remove permissions that already exist

* update docs

* add user group docs

* minimize user group permissions for execute

* sync toml

* add sync name to slack alert title

* add syncs to alerter white/blacklist

* use \\ instead of $reg

* share resource type base permissions api users and user groups

* manage user / group base permissions ui

* manage user / group base resource type permissions

* update api permission handling

* manage all resource permissions in table

* user show group membership

* update client to 1.11
2024-07-19 02:11:36 -07:00
mbecker20
4a03eba99a granular invalidations 2024-07-17 14:51:51 -07:00
mbecker20
79fe078e3b 1.10.5 cpu/mem only update alert if severity increases (or resolved) 2024-07-17 14:36:22 -07:00
mbecker20
6be032fcd4 update client to 1.10.4 2024-07-16 16:06:38 -07:00
mbecker20
d0c94278ec 1.10.4 fix EnvVar parsing when value contains '=' 2024-07-16 16:05:11 -07:00
mbecker20
03ae7268fd fix server table search when sorting by deployments 2024-07-10 12:09:42 -07:00
mbecker20
f443294818 add clear link to api docs 2024-07-10 02:33:14 -07:00
mbecker20
2202835d86 improve core setup docs 2024-07-10 02:26:58 -07:00
mbecker20
98fbc7a506 improve migrator and add Dockerfile 2024-07-10 02:25:44 -07:00
mbecker20
8ee89296e1 frontend only invalidate on update Complete 2024-07-09 13:50:03 -07:00
mbecker20
989c3d2d01 more compact webhook button labels 2024-07-09 02:26:50 -07:00
mbecker20
dc72883b90 update config example 2024-07-09 02:09:17 -07:00
mbecker20
e99364430f update local client version 2024-07-09 02:06:30 -07:00
mbecker20
e106e38cd9 1.10.3 support multiple github webhook app installations 2024-07-09 02:05:38 -07:00
mbecker20
e4d0c56e49 debug git logs 2024-07-09 00:50:24 -07:00
mbecker20
7427a158f4 full err too large for alert 2024-07-09 00:40:11 -07:00
mbecker20
b926f89954 log on build unsuccessful and alerting 2024-07-09 00:20:03 -07:00
mbecker20
e666a22f08 debug instrument git calls 2024-07-09 00:09:06 -07:00
mbecker20
4107f779a5 fix build increment major version 2024-07-08 13:15:52 -07:00
mbecker20
828d6cdfed improve responsive 2024-07-05 20:19:20 -07:00
mbecker20
fe82400a99 1.10.2 ResourceSync manage repo webhooks 2024-07-05 20:02:20 -07:00
mbecker20
e37fc6adde publish 1.10.1 2024-07-05 03:32:24 -07:00
mbecker20
c21c8f99ae manage webhooks working 2024-07-05 03:29:23 -07:00
mbecker20
78a63f92bb build repo webhook management 2024-07-05 03:17:29 -07:00
mbecker20
ce67655021 core info provide owners 2024-07-05 02:26:18 -07:00
mbecker20
2ccecf38f2 default pk path /github/private-key.pem 2024-07-05 02:15:35 -07:00
mbecker20
1ddae31aad update config example 2024-07-05 02:06:27 -07:00
mbecker20
097fbefa63 1.10.1 2024-07-05 02:02:59 -07:00
mbecker20
b51442a661 ts types 2024-07-05 02:02:25 -07:00
mbecker20
a21d49d224 build / repo webhook write api 2024-07-05 02:02:03 -07:00
mbecker20
c99a33880e Create / Delete webhook api 2024-07-05 01:31:15 -07:00
mbecker20
6ee55262ba webhook management api aware if repo can be managed 2024-07-05 01:18:21 -07:00
mbecker20
878b9b55bb see whether webhooks enabled 2024-07-05 01:05:27 -07:00
mbecker20
af6193f83a update async_timing_util 2024-07-04 21:15:38 -07:00
mbecker20
b8fefddd8b EC2 2024-07-04 19:13:49 -07:00
mbecker20
7f490f5bf2 tweak 2024-07-04 19:12:02 -07:00
mbecker20
efa7c13286 docs 2024-07-04 19:08:48 -07:00
mbecker20
f913be7a0b builder setup guide 2024-07-04 19:03:43 -07:00
mbecker20
35901ef7ea actions can wrap 2024-07-04 17:53:24 -07:00
mbecker20
5b938490fc response 2024-07-04 17:29:45 -07:00
mbecker20
a7326a0116 user group toml export replace target ids with names 2024-07-04 17:10:36 -07:00
mbecker20
877bda91d7 improve log responsiveness 2024-07-04 16:49:08 -07:00
mbecker20
439a091e50 improve resource responsive 2024-07-04 16:29:13 -07:00
mbecker20
b0e89f4963 fix dashboard 2024-07-04 15:46:43 -07:00
mbecker20
b1e4b55ba1 more responsive 2024-07-04 14:41:40 -07:00
mbecker20
d4a1891c70 delete user group 2024-07-04 14:17:03 -07:00
mbecker20
9db7592d7e all_resources tables use right search 2024-07-04 01:25:40 -07:00
mbecker20
84fb603951 1.10 2024-07-01 03:18:26 -07:00
mbecker20
55bac0dd13 check right thing for empty 2024-07-01 03:12:22 -07:00
mbecker20
b143f42363 update mungos 2024-07-01 02:47:06 -07:00
mbecker20
007efd136a 1.10.0 pre 2024-07-01 02:38:24 -07:00
mbecker20
b329767f9e 1.10.0-pre-0 2024-07-01 02:33:01 -07:00
mbecker20
b4231957d5 config for secret args 2024-07-01 02:31:53 -07:00
mbecker20
b4dc446f95 interpolate core variables / secrets into build secret_args 2024-07-01 02:27:03 -07:00
mbecker20
c92515cecc combine into router 2024-07-01 01:44:07 -07:00
mbecker20
f3712feea2 finish periphery clean 2024-07-01 01:39:03 -07:00
mbecker20
0e81d17860 shrink periphery implementation 2024-07-01 01:19:25 -07:00
mbecker20
c3f1557b83 fix mem alert 2024-06-30 00:27:37 -07:00
mbecker20
5f88e4b436 seperate webhook actions 2024-06-25 01:22:38 -07:00
mbecker20
473c6b3867 dont send failed build alert on build cancel 2024-06-24 16:59:34 -07:00
mbecker20
c10edaa5d1 fix builder toml export 2024-06-23 03:00:31 -07:00
mbecker20
9418a6d963 update client to 1.9.0 2024-06-23 02:30:50 -07:00
mbecker20
57646b750f clean up 2024-06-23 02:29:47 -07:00
mbecker20
0d57f9411c can deploy ecr 2024-06-23 02:27:19 -07:00
mbecker20
7d396dd539 clean up ecr 2024-06-23 02:22:14 -07:00
mbecker20
bfe762b71a install unzip 2024-06-23 01:37:12 -07:00
mbecker20
16ede84bac install aws cli core 2024-06-23 01:31:15 -07:00
mbecker20
4524db94db get ecr token using cli 2024-06-23 01:23:56 -07:00
mbecker20
580dab4acd improve error log formatting 2024-06-23 01:02:52 -07:00
mbecker20
645382856a update only flattens one level deep 2024-06-22 23:56:01 -07:00
mbecker20
5c4e6a6dbb select aws config 2024-06-22 23:33:35 -07:00
mbecker20
66810e1efb add method to get availabel aws ecr labels 2024-06-22 23:29:02 -07:00
mbecker20
69a84882f0 1.9.0 2024-06-22 23:06:53 -07:00
mbecker20
41648436a5 default periphery method fields 2024-06-22 22:59:51 -07:00
mbecker20
083a88aa7b implement aws ecr image registry 2024-06-22 22:57:26 -07:00
mbecker20
750f95c90d improve shortcut menu 2024-06-22 18:24:38 -07:00
mbecker20
129f3ecd82 add more kb shortcuts and shortcut menu 2024-06-22 02:56:57 -07:00
mbecker20
1b754f80ab fix double emojis 2024-06-22 01:54:45 -07:00
mbecker20
968a882012 fix alerter table 2024-06-22 01:29:31 -07:00
mbecker20
696ebdb26f label blacklist correctly 2024-06-22 01:25:38 -07:00
mbecker20
8fee04607d imporve slack alerting 2024-06-22 01:10:13 -07:00
mbecker20
6fe250244b add alerter blacklist 2024-06-22 00:30:43 -07:00
mbecker20
b530af0eec send_alerts for sync alert 2024-06-21 23:09:38 -07:00
mbecker20
21e9361079 remove unused 2024-06-21 02:28:35 -07:00
mbecker20
524d2d956b fix alerts usage 2024-06-21 02:23:42 -07:00
mbecker20
aca9633941 add links and errors to slack messages 2024-06-21 01:12:46 -07:00
mbecker20
32e1bd2dda add badges for tag filter shortcuts 2024-06-21 00:15:40 -07:00
mbecker20
cb363d1559 add shift + T and shift + C to manage tags 2024-06-20 23:51:12 -07:00
mbecker20
63eb74b9c8 Add and configure build alerts 2024-06-20 23:41:28 -07:00
mbecker20
bbcc27704f bump rust builder version 2024-06-16 16:00:57 -07:00
mbecker20
0aa9513dd0 1.8.0 2024-06-16 15:36:51 -07:00
mbecker20
26b216b478 add resources page 2024-06-16 15:33:31 -07:00
mbecker20
166299bb57 sync docs 2024-06-16 14:35:09 -07:00
mbecker20
03c47eb3dc remove cli sync 2024-06-16 01:41:54 -07:00
mbecker20
1fcb4ad085 move / update changelog 2024-06-16 01:41:15 -07:00
mbecker20
f51af8fbe1 docs 2024-06-16 01:34:08 -07:00
mbecker20
4a975e1b92 update resource sync docs 2024-06-16 01:33:05 -07:00
mbecker20
ba556e3284 fix doc link 2024-06-16 00:31:23 -07:00
mbecker20
299a326942 log build has new version 2024-06-16 00:20:22 -07:00
mbecker20
a5d4b9aefb add cached results reasons 2024-06-16 00:04:05 -07:00
mbecker20
40b820ae42 add reason to deploy logs 2024-06-15 22:01:14 -07:00
mbecker20
7028bf2996 remove termination_signal for tokio signal 2024-06-15 21:48:54 -07:00
mbecker20
75ebd0e6c0 fix fe cancel logic error 2024-06-15 21:36:26 -07:00
mbecker20
426153df66 try improve toml parse error message 2024-06-15 21:33:53 -07:00
mbecker20
5bd423a6a6 sync deploy new build 2024-06-15 21:15:17 -07:00
mbecker20
c24131d383 nested propogate read resources error 2024-06-15 20:37:29 -07:00
mbecker20
9f54b6c26a 1.8.0. improve env config UI, add sync deploy state management 2024-06-15 20:15:33 -07:00
mbecker20
ab8ae51ece slight more colors 2024-06-15 20:14:25 -07:00
mbecker20
ef2a83ff16 add colors to procedure logs 2024-06-15 20:06:34 -07:00
mbecker20
7872771aee clean up sync log 2024-06-15 19:45:53 -07:00
mbecker20
b12cf858d8 sync deploy logs need \n 2024-06-15 19:36:46 -07:00
mbecker20
38dba91c3a sync deploy accounts for any dependencies in 'after' need deploy 2024-06-15 19:20:45 -07:00
mbecker20
ea8136aa57 add sync deployment state log 2024-06-15 17:31:49 -07:00
mbecker20
f956e12e28 move formatting to shared lib 2024-06-15 17:15:05 -07:00
mbecker20
207ea52b95 add finished log 2024-06-15 17:12:02 -07:00
mbecker20
caf28d3a26 sync deploy 2024-06-15 17:03:16 -07:00
mbecker20
8fff45649d implement sync deployment get updates for view with deploy action 2024-06-15 15:50:10 -07:00
mbecker20
de5df70e11 invert search FE 2024-06-15 00:58:03 -07:00
mbecker20
3df010ac2a read req error debug 2024-06-15 00:54:11 -07:00
mbecker20
2d3beb708e invert logs 2024-06-15 00:28:04 -07:00
mbecker20
1dc22d01c4 improve execute instrumentation 2024-06-15 00:20:28 -07:00
mbecker20
eb029d0408 clone repo to specific directory on host 2024-06-14 23:43:47 -07:00
mbecker20
f926932181 build / deployment env variable / secret selectors 2024-06-14 23:28:08 -07:00
mbecker20
cc96d80c6a string deser filter empty lines 2024-06-14 22:20:39 -07:00
mbecker20
144b49495c string deser can handle empty string 2024-06-14 22:15:02 -07:00
mbecker20
de9354bdc7 frontend manage env with string 2024-06-14 22:10:07 -07:00
mbecker20
38bfee84d7 read resources propogate error 2024-06-14 21:53:13 -07:00
mbecker20
ec33d9fb9e trim incoming value env var string, conversion string, before deserialize 2024-06-14 21:42:59 -07:00
mbecker20
0a66937b1d fix unused liniting 2024-06-14 21:30:10 -07:00
mbecker20
43cc0c3bc1 remove @ in format date 2024-06-14 14:48:22 -07:00
mbecker20
c14b395c70 quick copy variable value 2024-06-12 12:15:29 -07:00
mbecker20
7b8529a7c6 tweak colors 2024-06-12 11:55:06 -07:00
mbecker20
547c089581 update colors 2024-06-12 11:53:39 -07:00
mbecker20
4fe5e461b3 use stroke for icons 2024-06-12 03:48:47 -07:00
mbecker20
edfb873f7c improve error logs 2024-06-12 03:22:51 -07:00
mbecker20
5ef5294c44 remove onkeydown causing redundant create 2024-06-12 03:15:07 -07:00
mbecker20
5d3c50e04f reorder procedure config table 2024-06-12 02:47:41 -07:00
mbecker20
f10efbb5ba add bg to body 2024-06-12 02:39:26 -07:00
mbecker20
39ce98161b add the colors, always plz 2024-06-12 02:21:49 -07:00
mbecker20
cff6e79eee fix omnibar all resource types 2024-06-12 01:46:30 -07:00
mbecker20
dedf22ede8 continue on disabled stage 2024-06-12 01:25:10 -07:00
mbecker20
6955b92a99 add same colors in update 2024-06-12 01:15:39 -07:00
mbecker20
5c63eeab02 better sync coloring 2024-06-12 01:13:33 -07:00
mbecker20
4c14a4ae20 create variable log skip description line if it's empty 2024-06-12 00:39:23 -07:00
mbecker20
29fd856a2d deal with deployment build version 2024-06-11 03:07:56 -07:00
mbecker20
195bdbd94a fix " to \" 2024-06-11 02:14:57 -07:00
mbecker20
298ccd945c improve export dialog sizing 2024-06-11 01:42:06 -07:00
mbecker20
436e4e79e9 toml include ResourceSync 2024-06-11 01:09:37 -07:00
212 changed files with 9160 additions and 3989 deletions

773
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.7.3"
version = "1.11.0"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -15,8 +15,9 @@ monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.7.3"
monitor_client = "1.11.0"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
logger = { path = "lib/logger" }
git = { path = "lib/git" }
@@ -28,20 +29,19 @@ slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
termination_signal = "0.1.3"
async_timing_util = "0.1.14"
async_timing_util = "1.0.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
toml_pretty = "1.1.1"
mongo_indexed = "1.0.0"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
mungos = "1.0.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
reqwest = { version = "0.12.5", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
@@ -51,14 +51,14 @@ axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.23.0"
tokio-tungstenite = "0.23.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.117"
toml = "0.8.13"
serde_json = "1.0.118"
toml = "0.8.14"
# ERROR
anyhow = "1.0.86"
@@ -73,13 +73,14 @@ opentelemetry = "0.23.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.4", features = ["derive"] }
clap = { version = "4.5.7", features = ["derive"] }
dotenv = "0.15.0"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.9.1", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
@@ -93,11 +94,15 @@ bollard = "0.16.1"
sysinfo = "0.30.12"
# CLOUD
aws-config = "1.5.0"
aws-sdk-ec2 = "1.46.0"
aws-config = "1.5.3"
aws-sdk-ec2 = "1.53.0"
aws-sdk-ecr = "1.33.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.3"
octorust = "0.7.0"
colored = "2.1.0"
bson = "2.10.0"
regex = "1.10.5"
bson = "2.11.0"

View File

@@ -13,8 +13,6 @@ repository.workspace = true
# local
monitor_client.workspace = true
logger.workspace = true
# mogh
termination_signal.workspace = true
# external
tokio.workspace = true
tracing.workspace = true

View File

@@ -9,7 +9,6 @@ use monitor_client::entities::{
alert::Alert, server::stats::SeverityLevel,
};
use serde::Deserialize;
use termination_signal::tokio::immediate_term_handle;
#[derive(Deserialize)]
struct Env {
@@ -57,13 +56,15 @@ async fn app() -> anyhow::Result<()> {
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let term_signal = immediate_term_handle()?;
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
_ = term_signal => {},
res = app => return res?,
_ = term_signal.recv() => {},
}
Ok(())

View File

@@ -84,7 +84,7 @@ impl ResourceSync for Deployment {
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: version.clone(),
version: *version,
};
}

View File

@@ -80,15 +80,25 @@ pub fn get_updates(
to_update.push(item);
}
None => {
println!(
"\n{}: variable: {}\n{}: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"description".dimmed(),
variable.description,
"value".dimmed(),
variable.value,
);
if variable.description.is_empty() {
println!(
"\n{}: variable: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"value".dimmed(),
variable.value,
);
} else {
println!(
"\n{}: variable: {}\n{}: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"description".dimmed(),
variable.description,
"value".dimmed(),
variable.value,
);
}
to_create.push(variable)
}
}

View File

@@ -17,18 +17,19 @@ path = "src/main.rs"
# local
monitor_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
formatting.workspace = true
logger.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
termination_signal.workspace = true
async_timing_util.workspace = true
partial_derive2.workspace = true
derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
parse_csl.workspace = true
mungos.workspace = true
slack.workspace = true
@@ -37,15 +38,18 @@ svi.workspace = true
ordered_hash_map.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-sdk-ecr.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
typeshare.workspace = true
octorust.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenv.workspace = true
bcrypt.workspace = true
@@ -53,6 +57,8 @@ base64.workspace = true
tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,5 +1,5 @@
# Build Core
FROM rust:1.78.0-bookworm as core-builder
FROM rust:1.79.0-bookworm as core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
@@ -16,8 +16,11 @@ RUN cd frontend && yarn link @monitor/client && yarn && yarn build
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git ca-certificates
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install
# Copy
COPY ./config_example/core.config.example.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /

View File

@@ -1,20 +1,24 @@
use std::{collections::HashSet, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{format_serror, muted};
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
entities::{
alert::{Alert, AlertData},
all_logs_success,
build::{Build, CloudRegistryConfig, ImageRegistry},
builder::{AwsBuilderConfig, Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
permission::PermissionLevel,
server::Server,
server::{stats::SeverityLevel, Server},
server_template::aws::AwsServerTemplateConfig,
to_monitor_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
},
@@ -32,19 +36,22 @@ use periphery_client::{
PeripheryClient,
};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
ecr,
},
BuildCleanupData,
},
config::core_config,
helpers::{
alert::send_alerts,
channel::build_cancel_channel,
periphery_client,
query::{get_deployment_state, get_global_variables},
@@ -59,7 +66,7 @@ use crate::helpers::update::init_execution_update;
use super::ExecuteRequest;
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user))]
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunBuild { build }: RunBuild,
@@ -72,8 +79,8 @@ impl Resolve<RunBuild, (User, Update)> for State {
)
.await?;
let registry_token =
validate_account_extract_registry_token(&build)?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
// get the action state for the build (or insert default).
let action_state =
@@ -85,7 +92,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
action_state.update(|state| state.building = true)?;
build.config.version.increment();
update.version = build.config.version.clone();
update.version = build.config.version;
update_update(update.clone()).await?;
let cancel = CancellationToken::new();
@@ -102,10 +109,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
id = cancel_recv.recv() => id?
};
if incoming_build_id == build_id {
info!("build cancel acknowledged");
update.push_simple_log(
"cancel acknowledged",
"the build cancellation has been queud, it may still take some time",
"the build cancellation has been queued, it may still take some time",
);
update.finalize();
let id = update.id.clone();
@@ -137,9 +143,12 @@ impl Resolve<RunBuild, (User, Update)> for State {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
serialize_error_pretty(&e),
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update).await;
return handle_early_return(
update, build.id, build.name, false,
)
.await;
}
};
@@ -165,7 +174,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -176,8 +185,10 @@ impl Resolve<RunBuild, (User, Update)> for State {
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update
.push_error_log("clone repo", serialize_error_pretty(&e));
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
}
}
@@ -187,6 +198,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut secret_replacers_for_log = HashSet::new();
// Interpolate into build args
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
@@ -205,10 +219,40 @@ impl Resolve<RunBuild, (User, Update)> for State {
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.iter().map(|(_, variable)| variable.clone()),
);
secret_replacers.extend(more_replacers);
arg.value = res;
}
// Interpolate into secret args
for arg in &mut build.config.secret_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.into_iter().map(|(_, variable)| variable),
);
// Secret args don't need to be in replacers sent to periphery.
// The secret args don't end up in the command like build args do.
arg.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
@@ -220,12 +264,12 @@ impl Resolve<RunBuild, (User, Update)> for State {
.join("\n"),
);
}
if !secret_replacers.is_empty() {
if !secret_replacers_for_log.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
secret_replacers_for_log
.into_iter()
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
@@ -236,6 +280,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
.request(api::build::Build {
build: build.clone(),
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
@@ -243,7 +288,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -254,7 +299,10 @@ impl Resolve<RunBuild, (User, Update)> for State {
}
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log("build", serialize_error_pretty(&e))
update.push_error_log(
"build",
format_serror(&e.context("failed to build").into()),
)
}
};
}
@@ -275,7 +323,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
"info.last_built_at": monitor_timestamp(),
}
},
None,
)
.await;
}
@@ -307,7 +354,26 @@ impl Resolve<RunBuild, (User, Update)> for State {
// don't hold response up for user
tokio::spawn(async move {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
version,
},
};
send_alerts(&[alert]).await
});
}
@@ -318,6 +384,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
@@ -335,9 +404,31 @@ async fn handle_early_return(
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build_id,
name: build_name,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
@@ -347,24 +438,28 @@ pub async fn validate_cancel_build(
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
db.updates
.find_one(doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
@@ -381,7 +476,7 @@ pub async fn validate_cancel_build(
}
impl Resolve<CancelBuild, (User, Update)> for State {
#[instrument(name = "CancelBuild", skip(self, user))]
#[instrument(name = "CancelBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelBuild { build }: CancelBuild,
@@ -636,20 +731,12 @@ async fn handle_post_build_redeploy(build_id: &str) {
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
for res in join_all(futures).await {
let Some((id, res)) = res else {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures
.push(format!("{id}: {}", serialize_error_pretty(&e))),
};
if let Err(e) = res {
warn!("failed post build redeploy for deployment {id}: {e:#}");
}
}
}
@@ -672,16 +759,28 @@ fn start_aws_builder_log(
let readable_sec_group_ids = security_group_ids.join(", ");
format!("instance id: {instance_id}\nip: {ip}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}\nassign public ip: {assign_public_ip}\nuse public ip: {use_public_ip}")
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token matching requirements (otherwise it is left to periphery)
fn validate_account_extract_registry_token(
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<Option<String>> {
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
match &build.config.image_registry {
ImageRegistry::None(_) => Ok(None),
ImageRegistry::None(_) => Ok((None, None)),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
@@ -690,7 +789,7 @@ fn validate_account_extract_registry_token(
"Must attach account to use DockerHub image registry"
));
}
Ok(core_config().docker_accounts.get(account).cloned())
Ok((core_config().docker_accounts.get(account).cloned(), None))
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
@@ -698,8 +797,40 @@ fn validate_account_extract_registry_token(
"Must attach account to use GithubContainerRegistry"
));
}
Ok(core_config().github_accounts.get(account).cloned())
Ok((core_config().github_accounts.get(account).cloned(), None))
}
ImageRegistry::AwsEcr(label) => {
let config = core_config().aws_ecr_registries.get(label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
access_key_id,
secret_access_key,
..
}) => {
let token = ecr::get_ecr_token(
region,
access_key_id,
secret_access_key,
)
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_monitor_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,
)
.await
.context("failed to create aws ecr repo")?;
Some(token)
}
None => None,
};
Ok((token, config.map(AwsEcrConfig::from)))
}
ImageRegistry::Custom(_) => {
Err(anyhow!("Custom image registry is not implemented"))
}
ImageRegistry::Custom(_) => todo!(),
}
}

View File

@@ -1,11 +1,13 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::*,
entities::{
build::{Build, ImageRegistry},
config::core::AwsEcrConfig,
deployment::{Deployment, DeploymentImage},
get_image_name,
permission::PermissionLevel,
@@ -18,9 +20,9 @@ use monitor_client::{
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
periphery_client,
@@ -35,7 +37,7 @@ use crate::{
use crate::helpers::update::init_execution_update;
impl Resolve<Deploy, (User, Update)> for State {
#[instrument(name = "Deploy", skip(self, user))]
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
Deploy {
@@ -83,7 +85,13 @@ impl Resolve<Deploy, (User, Update)> for State {
let version = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build);
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.get(label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
@@ -161,13 +169,40 @@ impl Resolve<Deploy, (User, Update)> for State {
update.version = version;
update_update(update.clone()).await?;
let registry_token = match &deployment.config.image_registry {
ImageRegistry::None(_) => None,
ImageRegistry::DockerHub(params) => {
core_config.docker_accounts.get(&params.account).cloned()
}
ImageRegistry::Ghcr(params) => {
core_config.github_accounts.get(&params.account).cloned()
let (registry_token, aws_ecr) = match &deployment
.config
.image_registry
{
ImageRegistry::None(_) => (None, None),
ImageRegistry::DockerHub(params) => (
core_config.docker_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::Ghcr(params) => (
core_config.github_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::AwsEcr(label) => {
let config = core_config
.aws_ecr_registries
.get(label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
(
Some(
ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?,
),
Some(AwsEcrConfig::from(config)),
)
}
ImageRegistry::Custom(_) => {
return Err(anyhow!("Custom ImageRegistry not yet supported"))
@@ -180,6 +215,7 @@ impl Resolve<Deploy, (User, Update)> for State {
stop_signal,
stop_time,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
})
.await
@@ -188,7 +224,9 @@ impl Resolve<Deploy, (User, Update)> for State {
Err(e) => {
update.push_error_log(
"deploy container",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to deploy container").into(),
),
);
}
};
@@ -203,7 +241,7 @@ impl Resolve<Deploy, (User, Update)> for State {
}
impl Resolve<StartContainer, (User, Update)> for State {
#[instrument(name = "StartContainer", skip(self, user))]
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { deployment }: StartContainer,
@@ -248,9 +286,10 @@ impl Resolve<StartContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("start container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
@@ -263,7 +302,7 @@ impl Resolve<StartContainer, (User, Update)> for State {
}
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user))]
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
@@ -318,9 +357,10 @@ impl Resolve<StopContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
@@ -333,7 +373,7 @@ impl Resolve<StopContainer, (User, Update)> for State {
}
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user))]
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
@@ -403,9 +443,11 @@ impl Resolve<StopAllContainers, (User, Update)> for State {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format!(
"failed to stop container {name} ({id})\n\n{}",
serialize_error_pretty(&e)
format_serror(
&e.context(format!(
"failed to stop container {name} ({id})"
))
.into(),
),
);
}
@@ -419,7 +461,7 @@ impl Resolve<StopAllContainers, (User, Update)> for State {
}
impl Resolve<RemoveContainer, (User, Update)> for State {
#[instrument(name = "RemoveContainer", skip(self, user))]
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RemoveContainer {
@@ -474,9 +516,10 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);

View File

@@ -2,6 +2,7 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -12,7 +13,7 @@ use monitor_client::{
use mungos::by_id::find_one_by_id;
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::{serialize_error_pretty, Json};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -92,7 +93,7 @@ async fn handler(
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", serialize_error_pretty(&e))
Log::error("task error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
@@ -121,7 +122,7 @@ async fn handler(
Ok(Json(update))
}
#[instrument(name = "ExecuteRequest", skip(user, update))]
#[instrument(name = "ExecuteRequest", skip(user, update), fields(user_id = user.id, update_id = update.id))]
async fn task(
req_id: Uuid,
request: ExecuteRequest,
@@ -149,7 +150,7 @@ async fn task(
}
let elapsed = timer.elapsed();
info!("/execute request {req_id} | resolve time: {elapsed:?}");
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -1,5 +1,6 @@
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use monitor_client::{
api::execute::RunProcedure,
entities::{
@@ -9,7 +10,6 @@ use monitor_client::{
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio::sync::Mutex;
use crate::{
@@ -19,7 +19,7 @@ use crate::{
};
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user))]
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunProcedure { procedure }: RunProcedure,
@@ -51,7 +51,11 @@ fn resolve_inner(
// and will panic otherwise.
update.push_simple_log(
"execute_procedure",
format!("executing procedure {}", procedure.name),
format!(
"{}: executing procedure '{}'",
muted("INFO"),
bold(&procedure.name)
),
);
// get the action state for the procedure (or insert default).
@@ -75,13 +79,15 @@ fn resolve_inner(
Ok(_) => {
update.push_simple_log(
"execution ok",
"the procedure has completed with no errors",
format!(
"{}: the procedure has {} with no errors",
muted("INFO"),
colored("completed", Color::Green)
),
);
}
Err(e) => update.push_error_log(
"execution error",
serialize_error_pretty(&e),
),
Err(e) => update
.push_error_log("execution error", format_serror(&e.into())),
}
update.finalize();

View File

@@ -1,4 +1,5 @@
use anyhow::anyhow;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -16,7 +17,6 @@ use mungos::{
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
config::core_config,
@@ -26,7 +26,7 @@ use crate::{
};
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user))]
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CloneRepo { repo }: CloneRepo,
@@ -71,7 +71,10 @@ impl Resolve<CloneRepo, (User, Update)> for State {
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("clone repo", serialize_error_pretty(&e))]
vec![Log::error(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
)]
}
};
@@ -87,7 +90,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
}
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user))]
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PullRepo { repo }: PullRepo,
@@ -129,7 +132,10 @@ impl Resolve<PullRepo, (User, Update)> for State {
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("pull repo", serialize_error_pretty(&e))]
vec![Log::error(
"pull repo",
format_serror(&e.context("failed to pull repo").into()),
)]
}
};
@@ -175,7 +181,6 @@ async fn update_last_pulled_time(repo_name: &str) {
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -11,7 +12,6 @@ use monitor_client::{
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{periphery_client, update::update_update},
@@ -20,7 +20,7 @@ use crate::{
};
impl Resolve<PruneContainers, (User, Update)> for State {
#[instrument(name = "PruneContainers", skip(self, user))]
#[instrument(name = "PruneContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneContainers { server }: PruneContainers,
@@ -54,9 +54,12 @@ impl Resolve<PruneContainers, (User, Update)> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune containers", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune containers",
format_serror(
&e.context("failed to prune containers").into(),
),
),
};
update.success = log.success;
@@ -71,7 +74,7 @@ impl Resolve<PruneContainers, (User, Update)> for State {
}
impl Resolve<PruneNetworks, (User, Update)> for State {
#[instrument(name = "PruneNetworks", skip(self, user))]
#[instrument(name = "PruneNetworks", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneNetworks { server }: PruneNetworks,
@@ -105,9 +108,10 @@ impl Resolve<PruneNetworks, (User, Update)> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune networks", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune networks",
format_serror(&e.context("failed to prune networks").into()),
),
};
update.success = log.success;
@@ -122,7 +126,7 @@ impl Resolve<PruneNetworks, (User, Update)> for State {
}
impl Resolve<PruneImages, (User, Update)> for State {
#[instrument(name = "PruneImages", skip(self, user))]
#[instrument(name = "PruneImages", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneImages { server }: PruneImages,

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
@@ -11,17 +12,18 @@ use monitor_client::{
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::{aws::launch_ec2_instance, hetzner::launch_hetzner_server},
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
helpers::update::update_update,
resource,
state::{db_client, State},
};
impl Resolve<LaunchServer, (User, Update)> for State {
#[instrument(name = "LaunchServer", skip(self, user))]
#[instrument(name = "LaunchServer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
LaunchServer {
@@ -34,12 +36,9 @@ impl Resolve<LaunchServer, (User, Update)> for State {
if db_client()
.await
.servers
.find_one(
doc! {
"name": &name
},
None,
)
.find_one(doc! {
"name": &name
})
.await
.context("failed to query db for servers")?
.is_some()
@@ -130,10 +129,7 @@ impl Resolve<LaunchServer, (User, Update)> for State {
Err(e) => {
update.push_error_log(
"create server",
format!(
"failed to create server\n\n{}",
serialize_error_pretty(&e)
),
format_serror(&e.context("failed to create server").into()),
);
}
};

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
@@ -7,7 +8,6 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
@@ -20,13 +20,12 @@ use monitor_client::{
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
query::get_id_to_tags,
sync::{
colored,
deployment,
resource::{
get_updates_for_execution, AllResourcesById, ResourceSync,
},
@@ -38,6 +37,7 @@ use crate::{
};
impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
@@ -77,7 +77,7 @@ impl Resolve<RunSync, (User, Update)> for State {
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = get_updates_for_execution::<Deployment>(
) = deployment::get_updates_for_execution(
resources.deployments,
sync.config.delete,
&all_resources,
@@ -205,7 +205,10 @@ impl Resolve<RunSync, (User, Update)> for State {
{
update.push_simple_log(
"No Changes",
format!("{}. exiting.", colored("nothing to do", "green")),
format!(
"{}. exiting.",
colored("nothing to do", Color::Green)
),
);
update.finalize();
update_update(update.clone()).await?;
@@ -302,15 +305,15 @@ impl Resolve<RunSync, (User, Update)> for State {
);
// Dependant on server / build
maybe_extend(
&mut update.logs,
Deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await,
);
if let Some(res) = deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await
{
update.logs.extend(res);
}
// Dependant on everything
maybe_extend(
@@ -355,9 +358,9 @@ impl Resolve<RunSync, (User, Update)> for State {
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
update.push_error_log(
"refresh sync",
format!(
"failed to refresh sync pending after run | {}",
serialize_error_pretty(&e)
format_serror(
&e.context("failed to refresh sync pending after run")
.into(),
),
);
}

View File

@@ -14,7 +14,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
state::{db_client, State},
};
@@ -28,13 +28,13 @@ impl Resolve<ListAlerts, User> for State {
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
let server_ids = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
let deployment_ids = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Deployment,
)
.await?;

View File

@@ -1,6 +1,5 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
@@ -10,12 +9,11 @@ use monitor_client::{
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -61,26 +59,21 @@ impl Resolve<GetAlertersSummary, User> for State {
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::Alerter,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.alerters
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {

View File

@@ -10,6 +10,7 @@ use monitor_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
user::User,
@@ -25,7 +26,9 @@ use resolver_api::{Resolve, ResolveToString};
use crate::{
config::core_config,
resource,
state::{action_states, build_state_cache, db_client, State},
state::{
action_states, build_state_cache, db_client, github_client, State,
},
};
impl Resolve<GetBuild, User> for State {
@@ -147,16 +150,13 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
.find(doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
None,
)
"operation": Operation::RunBuild.to_string(),
})
.await
.context("failed to get updates cursor")?;
@@ -201,6 +201,7 @@ impl Resolve<GetBuildVersions, User> for State {
major,
minor,
patch,
limit,
}: GetBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
@@ -233,7 +234,10 @@ impl Resolve<GetBuildVersions, User> for State {
let versions = find_collect(
&db_client().await.updates,
filter,
FindOptions::builder().sort(doc! { "_id": -1 }).build(),
FindOptions::builder()
.sort(doc! { "_id": -1 })
.limit(limit)
.build(),
)
.await
.context("failed to pull versions from mongo")?
@@ -306,3 +310,76 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
Ok(res)
}
}
impl Resolve<GetBuildWebhookEnabled, User> for State {
async fn resolve(
&self,
GetBuildWebhookEnabled { build }: GetBuildWebhookEnabled,
user: User,
) -> anyhow::Result<GetBuildWebhookEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
if build.config.repo.is_empty() {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: true,
});
}
}
Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: false,
})
}
}

View File

@@ -1,6 +1,7 @@
use std::{collections::HashSet, str::FromStr};
use std::collections::HashSet;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::{self, *},
entities::{
@@ -10,12 +11,12 @@ use monitor_client::{
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -61,26 +62,21 @@ impl Resolve<GetBuildersSummary, User> for State {
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::Builder,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {

View File

@@ -121,6 +121,7 @@ impl Resolve<SearchLog, User> for State {
deployment,
terms,
combinator,
invert,
}: SearchLog,
user: User,
) -> anyhow::Result<Log> {
@@ -143,6 +144,7 @@ impl Resolve<SearchLog, User> for State {
name,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")

View File

@@ -1,10 +1,10 @@
use std::time::Instant;
use std::{sync::OnceLock, time::Instant};
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::read::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use resolver_api::{derive::Resolver, ResolveToString, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
@@ -37,16 +37,21 @@ mod variable;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
#[to_string_resolver]
GetVersion(GetVersion),
#[to_string_resolver]
GetCoreInfo(GetCoreInfo),
#[to_string_resolver]
GetAvailableAwsEcrLabels(GetAvailableAwsEcrLabels),
// ==== USER ====
ListUsers(ListUsers),
GetUsername(GetUsername),
GetPermissionLevel(GetPermissionLevel),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
GetPermissionLevel(GetPermissionLevel),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
@@ -65,15 +70,13 @@ enum ReadRequest {
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
GetServerTemplatesSummary(GetServerTemplatesSummary),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
ListFullServers(ListFullServers),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
@@ -83,27 +86,30 @@ enum ReadRequest {
GetHistoricalServerStats(GetHistoricalServerStats),
GetAvailableAccounts(GetAvailableAccounts),
GetAvailableSecrets(GetAvailableSecrets),
ListServers(ListServers),
ListFullServers(ListFullServers),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
// ==== BUILD ====
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
#[to_string_resolver]
ListGithubOrganizations(ListGithubOrganizations),
@@ -113,23 +119,25 @@ enum ReadRequest {
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
GetRepoActionState(GetRepoActionState),
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
GetRepoActionState(GetRepoActionState),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
GetResourceSyncActionState(GetResourceSyncActionState),
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
GetResourceSyncActionState(GetResourceSyncActionState),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
ListBuilders(ListBuilders),
ListFullBuilders(ListFullBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
@@ -172,17 +180,14 @@ pub fn router() -> Router {
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "ReadHandler", level = "debug", skip(user))]
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/read request {req_id} | user: {} ({})",
user.username, user.id
);
debug!("/read request | user: {}", user.username);
let res =
State
.resolve_request(request, user)
@@ -194,35 +199,39 @@ async fn handler(
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/read request {req_id} error: {e:#}");
debug!("/read request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/read request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
impl Resolve<GetVersion, User> for State {
#[instrument(name = "GetVersion", level = "debug", skip(self))]
async fn resolve(
fn version() -> &'static String {
static VERSION: OnceLock<String> = OnceLock::new();
VERSION.get_or_init(|| {
serde_json::to_string(&GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
.context("failed to serialize GetVersionResponse")
.unwrap()
})
}
impl ResolveToString<GetVersion, User> for State {
async fn resolve_to_string(
&self,
GetVersion {}: GetVersion,
_: User,
) -> anyhow::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
) -> anyhow::Result<String> {
Ok(version().to_string())
}
}
impl Resolve<GetCoreInfo, User> for State {
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
async fn resolve(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<GetCoreInfoResponse> {
fn core_info() -> &'static String {
static CORE_INFO: OnceLock<String> = OnceLock::new();
CORE_INFO.get_or_init(|| {
let config = core_config();
Ok(GetCoreInfoResponse {
let info = GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
github_webhook_base_url: config
@@ -231,6 +240,50 @@ impl Resolve<GetCoreInfo, User> for State {
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
})
github_webhook_owners: config
.github_webhook_app
.installations
.iter()
.map(|i| i.namespace.to_string())
.collect(),
};
serde_json::to_string(&info)
.context("failed to serialize GetCoreInfoResponse")
.unwrap()
})
}
impl ResolveToString<GetCoreInfo, User> for State {
async fn resolve_to_string(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<String> {
Ok(core_info().to_string())
}
}
fn ecr_labels() -> &'static String {
static ECR_LABELS: OnceLock<String> = OnceLock::new();
ECR_LABELS.get_or_init(|| {
serde_json::to_string(
&core_config()
.aws_ecr_registries
.keys()
.cloned()
.collect::<Vec<_>>(),
)
.context("failed to serialize ecr registries")
.unwrap()
})
}
impl ResolveToString<GetAvailableAwsEcrLabels, User> for State {
async fn resolve_to_string(
&self,
GetAvailableAwsEcrLabels {}: GetAvailableAwsEcrLabels,
_: User,
) -> anyhow::Result<String> {
Ok(ecr_labels().to_string())
}
}

View File

@@ -44,7 +44,7 @@ impl Resolve<GetPermissionLevel, User> for State {
return Ok(PermissionLevel::Write);
}
let (variant, id) = target.extract_variant_id();
get_user_permission_on_resource(&user.id, variant, id).await
get_user_permission_on_resource(&user, variant, id).await
}
}

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
user::User,
@@ -10,8 +11,9 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, repo_state_cache, State},
state::{action_states, github_client, repo_state_cache, State},
};
impl Resolve<GetRepo, User> for State {
@@ -118,3 +120,86 @@ impl Resolve<GetReposSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetRepoWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetRepoWebhooksEnabled { repo }: GetRepoWebhooksEnabled,
user: User,
) -> anyhow::Result<GetRepoWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await?;
if repo.config.repo.is_empty() {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let clone_url =
format!("{host}/listener/github/repo/{}/clone", repo.id);
let pull_url =
format!("{host}/listener/github/repo/{}/pull", repo.id);
let mut clone_enabled = false;
let mut pull_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == clone_url {
clone_enabled = true
}
if webhook.active && webhook.config.url == pull_url {
pull_enabled = true
}
}
Ok(GetRepoWebhooksEnabledResponse {
managed: true,
clone_enabled,
pull_enabled,
})
}
}

View File

@@ -1,6 +1,5 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
@@ -8,11 +7,11 @@ use monitor_client::{
update::ResourceTargetVariant, user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -58,26 +57,21 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match get_resource_ids_for_user(
&user,
ResourceTargetVariant::ServerTemplate,
)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.server_templates
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
@@ -13,8 +14,11 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, resource_sync_state_cache, State},
state::{
action_states, github_client, resource_sync_state_cache, State,
},
};
impl Resolve<GetResourceSync, User> for State {
@@ -137,3 +141,86 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetSyncWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetSyncWebhooksEnabled { sync }: GetSyncWebhooksEnabled,
user: User,
) -> anyhow::Result<GetSyncWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
if sync.config.repo.is_empty() {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/sync/{}/refresh", sync.id);
let sync_url =
format!("{host}/listener/github/sync/{}/sync", sync.id);
let mut refresh_enabled = false;
let mut sync_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == sync_url {
sync_enabled = true
}
}
Ok(GetSyncWebhooksEnabledResponse {
managed: true,
refresh_enabled,
sync_enabled,
})
}
}

View File

@@ -118,13 +118,22 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags).build(),
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
let user_groups = if user.admin {
find_collect(&db_client().await.user_groups, None, None)
@@ -221,7 +230,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
// replace server id of builder
if let BuilderConfig::Server(config) = &mut builder.config {
config.server_id.clone_from(
names.servers.get(&id).unwrap_or(&String::new()),
names
.servers
.get(&config.server_id)
.unwrap_or(&String::new()),
)
}
res
@@ -300,7 +312,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
};
}
add_user_groups(user_groups, &mut res, &user)
add_user_groups(user_groups, &mut res, &names, &user)
.await
.context("failed to add user groups")?;
@@ -412,6 +424,8 @@ struct ResourceNames {
deployments: HashMap<String, String>,
procedures: HashMap<String, String>,
syncs: HashMap<String, String>,
alerters: HashMap<String, String>,
templates: HashMap<String, String>,
}
impl ResourceNames {
@@ -466,6 +480,18 @@ impl ResourceNames {
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
alerters: find_collect(&db.alerters, None, None)
.await
.context("failed to get all alerters")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
templates: find_collect(&db.server_templates, None, None)
.await
.context("failed to get all server templates")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
})
}
}
@@ -473,6 +499,7 @@ impl ResourceNames {
async fn add_user_groups(
user_groups: Vec<String>,
res: &mut ResourcesToml,
names: &ResourceNames,
user: &User,
) -> anyhow::Result<()> {
let db = db_client().await;
@@ -500,9 +527,43 @@ async fn add_user_groups(
)
.await?
.into_iter()
.map(|permission| PermissionToml {
target: permission.resource_target,
level: permission.level,
.map(|mut permission| {
match &mut permission.resource_target {
ResourceTarget::Build(id) => {
*id = names.builds.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = names.builders.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id =
names.deployments.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = names.servers.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = names.repos.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = names.alerters.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id =
names.procedures.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = names.templates.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = names.syncs.get(id).cloned().unwrap_or_default()
}
ResourceTarget::System(_) => {}
}
PermissionToml {
target: permission.resource_target,
level: permission.level,
}
})
.collect();
res.user_groups.push(UserGroupToml {
@@ -512,6 +573,7 @@ async fn add_user_groups(
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect(),
all: ug.all,
permissions,
});
}
@@ -533,6 +595,8 @@ fn convert_resource<R: MonitorResource>(
.filter_map(|t| tag_names.get(t).cloned())
.collect(),
description: resource.description,
deploy: false,
after: Default::default(),
config,
}
}
@@ -570,6 +634,25 @@ fn serialize_resources_toml(
.context("deployment has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(DeploymentImage::Build { version, .. }) =
&deployment.config.image
{
let image = config
.get_mut("image")
.context("deployment has no image")?
.get_mut("params")
.context("deployment image has no params")?
.as_object_mut()
.context("deployment image params is not object")?;
if version.is_none() {
image.remove("version");
} else {
image.insert(
"version".to_string(),
Value::String(version.to_string()),
);
}
}
if let Some(term_signal_labels) =
&deployment.config.term_signal_labels
{

View File

@@ -29,7 +29,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
helpers::query::get_resource_ids_for_user,
resource,
state::{db_client, State},
};
@@ -45,58 +45,124 @@ impl Resolve<ListUpdates, User> for State {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
let server_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Deployment,
)
.await?;
let build_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let build_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Build,
)
.await?;
let repo_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Repo,
)
.await?;
let procedure_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
get_resource_ids_for_user(&user, ResourceTargetVariant::Repo)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Procedure,
)
.await?;
let builder_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let builder_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Builder,
)
.await?;
let alerter_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::Alerter,
)
.await?;
let server_template_ids = get_resource_ids_for_non_admin(
&user.id,
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::ServerTemplate,
)
.await?;
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query = get_resource_ids_for_user(
&user,
ResourceTargetVariant::ResourceSync,
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = query.unwrap_or_default();
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
{ "target.type": "ServerTemplate", "target.id": { "$in": &server_template_ids } },
server_query,
build_query,
deployment_query,
repo_query,
procedure_query,
alerter_query,
builder_query,
server_template_query,
resource_sync_query,
]
});
query.into()

View File

@@ -1,9 +1,10 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{
GetUsername, GetUsernameResponse, ListApiKeys,
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
ListApiKeysResponse, ListUsers, ListUsersResponse,
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
ListApiKeys, ListApiKeysForServiceUser,
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
};
@@ -14,7 +15,10 @@ use mungos::{
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
impl Resolve<GetUsername, User> for State {
async fn resolve(
@@ -40,6 +44,19 @@ impl Resolve<GetUsername, User> for State {
}
}
impl Resolve<FindUser, User> for State {
async fn resolve(
&self,
FindUser { user }: FindUser,
admin: User,
) -> anyhow::Result<FindUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
get_user(&user).await
}
}
impl Resolve<ListUsers, User> for State {
async fn resolve(
&self,
@@ -87,22 +104,21 @@ impl Resolve<ListApiKeys, User> for State {
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
ListApiKeysForServiceUser { user }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for users")?
.context("user at id not found")?;
let user = get_user(&user).await?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": user_id },
doc! { "user_id": &user.id },
None,
)
.await

View File

@@ -37,7 +37,7 @@ impl Resolve<GetUserGroup, User> for State {
db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")

View File

@@ -11,10 +11,7 @@ use monitor_client::{
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
user::User,
},
entities::{api_key::ApiKey, monitor_timestamp, user::User},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
@@ -90,33 +87,21 @@ impl Resolve<PushRecentlyViewed, User> for State {
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
let (resource_type, id) = resource.extract_variant_id();
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { format!("recents.{resource_type}"): to_bson(&recents)? }
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
None => {
doc! { format!("recents.{resource_type}"): [id] }
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
@@ -124,7 +109,9 @@ impl Resolve<PushRecentlyViewed, User> for State {
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
.with_context(|| {
format!("failed to update recents.{resource_type}")
})?;
Ok(PushRecentlyViewedResponse {})
}
@@ -187,7 +174,7 @@ impl Resolve<CreateApiKey, User> for State {
db_client()
.await
.api_keys
.insert_one(api_key, None)
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
@@ -208,7 +195,7 @@ impl Resolve<DeleteApiKey, User> for State {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
@@ -217,7 +204,7 @@ impl Resolve<DeleteApiKey, User> for State {
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})

View File

@@ -1,10 +1,24 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{build::Build, permission::PermissionLevel, user::User},
entities::{
build::{Build, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
user::User,
NoData,
},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
resource,
state::{github_client, State},
};
impl Resolve<CreateBuild, User> for State {
#[instrument(name = "CreateBuild", skip(self, user))]
@@ -56,3 +70,175 @@ impl Resolve<UpdateBuild, User> for State {
resource::update::<Build>(&id, config, &user).await
}
}
impl Resolve<CreateBuildWebhook, User> for State {
#[instrument(name = "CreateBuildWebhook", skip(self, user))]
async fn resolve(
&self,
CreateBuildWebhook { build }: CreateBuildWebhook,
user: User,
) -> anyhow::Result<CreateBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
github_webhook_secret,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: github_webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !build.config.webhook_enabled {
self
.resolve(
UpdateBuild {
id: build.id,
config: PartialBuildConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update build to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteBuildWebhook, User> for State {
#[instrument(name = "DeleteBuildWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteBuildWebhook { build }: DeleteBuildWebhook,
user: User,
) -> anyhow::Result<DeleteBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't delete webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -50,6 +50,7 @@ pub enum WriteRequest {
// ==== PERMISSIONS ====
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
@@ -75,6 +76,8 @@ pub enum WriteRequest {
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
@@ -93,6 +96,8 @@ pub enum WriteRequest {
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
@@ -112,6 +117,8 @@ pub enum WriteRequest {
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== TAG ====
CreateTag(CreateTag),
@@ -149,16 +156,13 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "WriteRequest", skip(user))]
#[instrument(name = "WriteRequest", skip(user), fields(user_id = user.id))]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> anyhow::Result<String> {
info!(
"/write request {req_id} | user: {} ({})",
user.username, user.id
);
info!("/write request | user: {}", user.username);
let timer = Instant::now();
@@ -178,7 +182,7 @@ async fn task(
}
let elapsed = timer.elapsed();
info!("/write request {req_id} | resolve time: {elapsed:?}");
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -3,8 +3,10 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
UpdatePermissionOnResourceType,
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
UpdatePermissionOnTargetResponse, UpdateUserBasePermissions,
UpdateUserBasePermissionsResponse,
},
entities::{
permission::{UserTarget, UserTargetVariant},
@@ -41,6 +43,7 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query mongo for user")?
@@ -73,6 +76,73 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
}
}
impl Resolve<UpdatePermissionOnResourceType, User> for State {
#[instrument(
name = "UpdatePermissionOnResourceType",
skip(self, admin)
)]
async fn resolve(
&self,
UpdatePermissionOnResourceType {
user_target,
resource_type,
permission,
}: UpdatePermissionOnResourceType,
admin: User,
) -> anyhow::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
if !user.enabled {
return Err(anyhow!("user not enabled"));
}
}
let (user_target_variant, user_target_id) =
extract_user_target_with_validation(&user_target).await?;
let id = ObjectId::from_str(&user_target_id)
.context("id is not ObjectId")?;
let field = format!("all.{resource_type}");
let filter = doc! { "_id": id };
let update = doc! { "$set": { &field: permission.as_ref() } };
match user_target_variant {
UserTargetVariant::User => {
db_client()
.await
.users
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
UserTargetVariant::UserGroup => {
db_client()
.await
.user_groups
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
}
Ok(UpdatePermissionOnResourceTypeResponse {})
}
}
impl Resolve<UpdatePermissionOnTarget, User> for State {
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
async fn resolve(
@@ -129,8 +199,8 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.with_options(UpdateOptions::builder().upsert(true).build())
.await?;
Ok(UpdatePermissionOnTargetResponse {})
@@ -150,7 +220,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for users")?
.context("no matching user found")?
@@ -165,7 +235,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
@@ -192,7 +262,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builds
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
@@ -207,7 +277,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builders
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
@@ -222,7 +292,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.deployments
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
@@ -237,7 +307,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.servers
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
@@ -252,7 +322,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.repos
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
@@ -267,7 +337,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.alerters
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
@@ -282,7 +352,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.procedures
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
@@ -297,7 +367,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
@@ -312,7 +382,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?

View File

@@ -1,10 +1,24 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{permission::PermissionLevel, repo::Repo, user::User},
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo},
user::User,
NoData,
},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
resource,
state::{github_client, State},
};
impl Resolve<CreateRepo, User> for State {
#[instrument(name = "CreateRepo", skip(self, user))]
@@ -56,3 +70,190 @@ impl Resolve<UpdateRepo, User> for State {
resource::update::<Repo>(&id, config, &user).await
}
}
impl Resolve<CreateRepoWebhook, User> for State {
#[instrument(name = "CreateRepoWebhook", skip(self, user))]
async fn resolve(
&self,
CreateRepoWebhook { repo, action }: CreateRepoWebhook,
user: User,
) -> anyhow::Result<CreateRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
github_webhook_secret,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: github_webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo_name, &request)
.await
.context("failed to create webhook")?;
if !repo.config.webhook_enabled {
self
.resolve(
UpdateRepo {
id: repo.id,
config: PartialRepoConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update repo to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteRepoWebhook, User> for State {
#[instrument(name = "DeleteRepoWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteRepoWebhook { repo, action }: DeleteRepoWebhook,
user: User,
) -> anyhow::Result<DeleteRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo_name, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
@@ -13,7 +14,6 @@ use monitor_client::{
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -112,8 +112,10 @@ impl Resolve<CreateNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("create network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
),
};
update.finalize();
@@ -149,8 +151,10 @@ impl Resolve<DeleteNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("delete network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"delete network",
format_serror(&e.context("failed to delete network").into()),
),
};
update.finalize();

View File

@@ -51,17 +51,14 @@ impl Resolve<CreateServiceUser, User> for State {
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user, None)
.insert_one(&user)
.await
.context("failed to create service user on db")?
.inserted_id
@@ -91,7 +88,7 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("no user with given username")?;
@@ -102,12 +99,11 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("user with username not found")
@@ -155,7 +151,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
@@ -168,7 +164,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.delete_one(doc! { "key": key })
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
@@ -7,7 +8,7 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
config::core::CoreConfig,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
@@ -15,28 +16,36 @@ use monitor_client::{
server::{stats::SeverityLevel, Server},
server_template::ServerTemplate,
sync::{
PendingSyncUpdates, PendingSyncUpdatesData,
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
ResourceSync,
PartialResourceSyncConfig, PendingSyncUpdates,
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
PendingSyncUpdatesDataOk, ResourceSync,
},
update::ResourceTarget,
user::User,
NoData,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
config::core_config,
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::resource::{get_updates_for_view, AllResourcesById},
sync::{
deployment,
resource::{get_updates_for_view, AllResourcesById},
},
},
resource,
state::{db_client, State},
state::{db_client, github_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
@@ -127,7 +136,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
)
.await
.context("failed to get server updates")?,
deployment_updates: get_updates_for_view::<Deployment>(
deployment_updates: deployment::get_updates_for_view(
resources.deployments,
sync.config.delete,
&all_resources,
@@ -232,7 +241,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
message: None,
data: PendingSyncUpdatesData::Err(
PendingSyncUpdatesDataErr {
message: serialize_error_pretty(&e),
message: format_serror(&e.into()),
},
),
},
@@ -259,14 +268,11 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
let Some(existing) = db_client()
.await
.alerts
.find_one(
doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.find_one(doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
})
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
@@ -287,11 +293,12 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.insert_one(&alert)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
@@ -319,3 +326,190 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
crate::resource::get::<ResourceSync>(&sync.id).await
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(
&self,
CreateSyncWebhook { sync, action }: CreateSyncWebhook,
user: User,
) -> anyhow::Result<CreateSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
github_webhook_secret,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: github_webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !sync.config.webhook_enabled {
self
.resolve(
UpdateResourceSync {
id: sync.id,
config: PartialResourceSyncConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update sync to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteSyncWebhook, User> for State {
#[instrument(name = "DeleteSyncWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteSyncWebhook { sync, action }: DeleteSyncWebhook,
user: User,
) -> anyhow::Result<DeleteSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
github_webhook_base_url,
..
} = core_config();
let host = github_webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -46,7 +46,7 @@ impl Resolve<CreateTag, User> for State {
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.insert_one(&tag)
.await
.context("failed to create tag on db")?
.inserted_id

View File

@@ -29,13 +29,14 @@ impl Resolve<CreateUserGroup, User> for State {
let user_group = UserGroup {
id: Default::default(),
users: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
name,
};
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.insert_one(user_group)
.await
.context("failed to create UserGroup on db")?
.inserted_id
@@ -99,7 +100,7 @@ impl Resolve<DeleteUserGroup, User> for State {
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
})
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
@@ -125,7 +126,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -138,12 +139,11 @@ impl Resolve<AddUserToUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -171,7 +171,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -184,12 +184,11 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -229,15 +228,11 @@ impl Resolve<SetUsersInUserGroup, User> for State {
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.update_one(filter.clone(), doc! { "$set": { "users": users } })
.await
.context("failed to add user to group on db")?;
.context("failed to set users on user group")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")

View File

@@ -44,7 +44,7 @@ impl Resolve<CreateVariable, User> for State {
db_client()
.await
.variables
.insert_one(&variable, None)
.insert_one(&variable)
.await
.context("failed to create variable on db")?;
@@ -86,7 +86,6 @@ impl Resolve<UpdateVariableValue, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
None,
)
.await
.context("failed to update variable value on db")?;
@@ -127,7 +126,6 @@ impl Resolve<UpdateVariableDescription, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
None,
)
.await
.context("failed to update variable description on db")?;
@@ -148,7 +146,7 @@ impl Resolve<DeleteVariable, User> for State {
db_client()
.await
.variables
.delete_one(doc! { "name": &name }, None)
.delete_one(doc! { "name": &name })
.await
.context("failed to delete variable on db")?;

View File

@@ -216,8 +216,8 @@ impl GithubOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize)]
@@ -225,5 +225,5 @@ pub struct GithubUserResponse {
pub login: String,
pub id: u128,
pub avatar_url: String,
pub email: Option<String>,
// pub email: Option<String>,
}

View File

@@ -2,6 +2,7 @@ use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
user::{User, UserConfig},
@@ -66,7 +67,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.find_one(doc! { "config.data.github_id": &github_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -76,7 +77,7 @@ async fn callback(
None => {
let ts = monitor_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
@@ -86,11 +87,8 @@ async fn callback(
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Github {
github_id,
avatar: github_user.avatar_url,
@@ -98,7 +96,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -185,10 +185,10 @@ impl GoogleOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
// pub access_token: String,
pub id_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize, Clone)]

View File

@@ -3,6 +3,7 @@ use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
@@ -75,7 +76,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.find_one(doc! { "config.data.google_id": &google_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -85,7 +86,7 @@ async fn callback(
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
@@ -101,11 +102,8 @@ async fn callback(
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Google {
google_id,
avatar: google_user.picture,
@@ -113,7 +111,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -3,6 +3,7 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use mongo_indexed::Document;
use monitor_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
@@ -46,7 +47,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.find_one(Document::new())
.await?
.is_none();
@@ -61,18 +62,15 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
};
let user_id = db_client()
.await
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user")?
.inserted_id
@@ -102,7 +100,7 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed at db query for users")?
.with_context(|| {

View File

@@ -127,7 +127,7 @@ pub async fn auth_api_key_get_user_id(
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.find_one(doc! { "key": key })
.await
.context("failed to query db")?
.context("no api key matching key")?;

View File

@@ -1,9 +1,8 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use aws_config::BehaviorVersion;
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,

View File

@@ -0,0 +1,82 @@
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ecr::Client as EcrClient;
use run_command::async_run_command;
#[tracing::instrument(skip(access_key_id, secret_access_key))]
async fn make_ecr_client(
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> EcrClient {
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
EcrClient::new(&config)
}
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn maybe_create_repo(
repo: &str,
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<()> {
let client =
make_ecr_client(region, access_key_id, secret_access_key).await;
let existing = client
.describe_repositories()
.send()
.await
.context("failed to describe existing repositories")?
.repositories
.unwrap_or_default();
if existing.iter().any(|r| {
if let Some(name) = r.repository_name() {
name == repo
} else {
false
}
}) {
return Ok(());
};
client
.create_repository()
.repository_name(repo)
.send()
.await
.context("failed to create repository")?;
Ok(())
}
/// Gets a token docker login.
///
/// Requires the aws cli be installed on the host
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn get_ecr_token(
region: &str,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<String> {
let log = async_run_command(&format!(
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
))
.await;
if log.success() {
Ok(log.stdout)
} else {
Err(
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
.context("failed to get aws ecr login token"),
)
}
}

View File

@@ -0,0 +1,2 @@
pub mod ec2;
pub mod ecr;

View File

@@ -1,4 +1,6 @@
pub mod aws;
#[allow(unused)]
pub mod hetzner;
#[derive(Debug)]

View File

@@ -4,8 +4,9 @@ use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, HetznerCredentials, MongoConfig,
OauthCredentials,
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
GithubWebhookAppInstallationConfig, HetznerCredentials,
MongoConfig, OauthCredentials,
},
logger::LogConfig,
};
@@ -45,6 +46,27 @@ pub fn core_config() -> &'static CoreConfig {
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
let installations = match (env.monitor_github_webhook_app_installations_ids, env.monitor_github_webhook_app_installations_namespaces) {
(Some(ids), Some(namespaces)) => {
if ids.len() != namespaces.len() {
panic!("MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
}
ids
.into_iter()
.zip(namespaces)
.map(|(id, namespace)| GithubWebhookAppInstallationConfig {
id,
namespace
})
.collect()
},
(Some(_), None) | (None, Some(_)) => {
panic!("Got only one of MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
}
(None, None) => {
config.github_webhook_app.installations
}
};
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.monitor_title.unwrap_or(config.title),
@@ -109,6 +131,15 @@ pub fn core_config() -> &'static CoreConfig {
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: env
.monitor_github_webhook_app_app_id
.unwrap_or(config.github_webhook_app.app_id),
pk_path: env
.monitor_github_webhook_app_pk_path
.unwrap_or(config.github_webhook_app.pk_path),
installations,
},
aws: AwsCredentials {
access_key_id: env
.monitor_aws_access_key_id
@@ -157,6 +188,7 @@ pub fn core_config() -> &'static CoreConfig {
secrets: config.secrets,
github_accounts: config.github_accounts,
docker_accounts: config.docker_accounts,
aws_ecr_registries: config.aws_ecr_registries,
}
})
}

View File

@@ -112,7 +112,7 @@ impl DbClient {
}
}
async fn resource_collection<T>(
async fn resource_collection<T: Send + Sync>(
db: &Database,
collection_name: &str,
) -> anyhow::Result<Collection<T>> {

View File

@@ -6,11 +6,12 @@ use monitor_client::entities::{
alerter::*,
deployment::DeploymentState,
server::stats::SeverityLevel,
update::ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use slack::types::Block;
use crate::state::db_client;
use crate::{config::core_config, state::db_client};
#[instrument]
pub async fn send_alerts(alerts: &[Alert]) {
@@ -18,20 +19,18 @@ pub async fn send_alerts(alerts: &[Alert]) {
return;
}
let alerters = match find_collect(
let Ok(alerters) = find_collect(
&db_client().await.alerters,
doc! { "config.enabled": true },
None,
)
.await
{
Ok(alerters) => alerters,
Err(e) => {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
return;
}
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
let handles =
@@ -61,7 +60,12 @@ async fn send_alert(alerters: &[Alerter], alert: &Alert) {
return Ok(());
}
// Don't send if resource target not configured on the alerter
// Don't send if resource is in the blacklist
if alerter.config.except_resources.contains(&alert.target) {
return Ok(());
}
// Don't send if whitelist configured and target is not included
if !alerter.config.resources.is_empty()
&& !alerter.config.resources.contains(&alert.target)
{
@@ -126,7 +130,12 @@ async fn send_slack_alert(
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let (text, blocks): (_, Option<_>) = match &alert.data {
AlertData::ServerUnreachable { name, region, .. } => {
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
@@ -143,10 +152,18 @@ async fn send_slack_alert(
SeverityLevel::Critical => {
let text =
format!("{level} | *{name}*{region} is *unreachable* ❌");
let err = err
.as_ref()
.map(|e| format!("\nerror: {e:#?}"))
.unwrap_or_default();
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is *unreachable* ❌"
"*{name}*{region} is *unreachable* ❌{err}"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
@@ -155,70 +172,136 @@ async fn send_slack_alert(
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
..
} => {
let region = fmt_region(region);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨");
let blocks = vec![
Block::header(format!("{level} 🚨")),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text =
format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿 🚨"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
}
}
AlertData::ContainerStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_docker_container_state(to);
@@ -226,7 +309,11 @@ async fn send_slack_alert(
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}"
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
)),
];
(text, blocks.into())
@@ -236,24 +323,39 @@ async fn send_slack_alert(
message,
} => {
let text = format!(
"{level} | Failed to terminated AWS builder instance"
"{level} | Failed to terminated AWS builder instance "
);
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"instance id: **{instance_id}**\n{message}"
"instance id: *{instance_id}*\n{message}"
)),
];
(text, blocks.into())
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | There are pending resource sync updates");
format!("{level} | Pending resource sync updates on {name}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"sync id: **{id}**\nsync name: **{name}**"
"sync id: *{id}*\nsync name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::ResourceSync,
id,
)),
];
(text, blocks.into())
}
AlertData::BuildFailed { id, name, version } => {
let text = format!("{level} | Build {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}",
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
@@ -286,7 +388,41 @@ fn fmt_docker_container_state(state: &DeploymentState) -> String {
fn fmt_level(level: SeverityLevel) -> &'static str {
match level {
SeverityLevel::Critical => "CRITICAL 🚨",
SeverityLevel::Warning => "WARNING 🚨",
SeverityLevel::Warning => "WARNING ‼️",
SeverityLevel::Ok => "OK ✅",
}
}
fn resource_link(
resource_type: ResourceTargetVariant,
id: &str,
) -> String {
let path = match resource_type {
ResourceTargetVariant::System => unreachable!(),
ResourceTargetVariant::Build => format!("/builds/{id}"),
ResourceTargetVariant::Builder => {
format!("/builders/{id}")
}
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}
ResourceTargetVariant::Repo => format!("/repos/{id}"),
ResourceTargetVariant::Alerter => {
format!("/alerters/{id}")
}
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}
};
format!("{}{path}", core_config().host)
}

View File

@@ -1,13 +1,14 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use mongo_indexed::Document;
use monitor_client::entities::{
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
update::ResourceTarget,
user::User,
};
use mungos::mongodb::bson::doc;
use mungos::mongodb::bson::{doc, Bson};
use periphery_client::PeripheryClient;
use rand::{thread_rng, Rng};
@@ -61,7 +62,6 @@ where
}
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")
@@ -103,17 +103,34 @@ pub async fn create_permission<T>(
if let Err(e) = db_client()
.await
.permissions
.insert_one(
Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
},
None,
)
.insert_one(Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
})
.await
{
error!("failed to create permission for {target:?} | {e:#}");
};
}
/// Flattens a document only one level deep
///
/// eg `{ config: { label: "yes", thing: { field1: "ok", field2: "ok" } } }` ->
/// `{ "config.label": "yes", "config.thing": { field1: "ok", field2: "ok" } }`
pub fn flatten_document(doc: Document) -> Document {
let mut target = Document::new();
for (outer_field, bson) in doc {
if let Bson::Document(doc) = bson {
for (inner_field, bson) in doc {
target.insert(format!("{outer_field}.{inner_field}"), bson);
}
} else {
target.insert(outer_field, bson);
}
}
target
}

View File

@@ -1,6 +1,7 @@
use std::time::{Duration, Instant};
use anyhow::{anyhow, Context, Ok};
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::execute::Execution,
@@ -21,9 +22,16 @@ pub async fn execute_procedure(
update: &Mutex<Update>,
) -> anyhow::Result<()> {
for stage in &procedure.config.stages {
if !stage.enabled {
continue;
}
add_line_to_update(
update,
&format!("executing stage: {}", stage.name),
&format!(
"{}: executing stage: '{}'",
muted("INFO"),
bold(&stage.name)
),
)
.await;
let timer = Instant::now();
@@ -41,16 +49,19 @@ pub async fn execute_procedure(
.await
.with_context(|| {
format!(
"failed stage '{}' execution after {:?}",
stage.name,
"{}: failed stage '{}' execution after {:?}",
colored("ERROR", Color::Red),
bold(&stage.name),
timer.elapsed(),
)
})?;
add_line_to_update(
update,
&format!(
"finished stage '{}' execution in {:?}",
stage.name,
"{}: {} stage '{}' execution in {:?}",
muted("INFO"),
colored("finished", Color::Green),
bold(&stage.name),
timer.elapsed()
),
)
@@ -69,9 +80,15 @@ async fn execute_stage(
) -> anyhow::Result<()> {
let futures = executions.into_iter().map(|execution| async move {
let now = Instant::now();
add_line_to_update(update, &format!("executing: {execution:?}"))
.await;
let fail_log = format!("failed on {execution:?}");
add_line_to_update(
update,
&format!("{}: executing: {execution:?}", muted("INFO")),
)
.await;
let fail_log = format!(
"{}: failed on {execution:?}",
colored("ERROR", Color::Red)
);
let res =
execute_execution(execution.clone(), parent_id, parent_name)
.await
@@ -79,7 +96,9 @@ async fn execute_stage(
add_line_to_update(
update,
&format!(
"finished execution in {:?}: {execution:?}",
"{}: {} execution in {:?}: {execution:?}",
muted("INFO"),
colored("finished", Color::Green),
now.elapsed()
),
)
@@ -263,8 +282,9 @@ async fn execute_execution(
Ok(())
} else {
Err(anyhow!(
"execution not successful. see update {}",
update.id
"{}: execution not successful. see update '{}'",
colored("ERROR", Color::Red),
bold(&update.id),
))
}
}

View File

@@ -68,12 +68,9 @@ async fn prune_stats() -> anyhow::Result<()> {
let res = db_client()
.await
.stats
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} stats from db", res.deleted_count);
Ok(())
@@ -89,12 +86,9 @@ async fn prune_alerts() -> anyhow::Result<()> {
let res = db_client()
.await
.alerts
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} alerts from db", res.deleted_count);
Ok(())

View File

@@ -11,11 +11,11 @@ use monitor_client::entities::{
tag::Tag,
update::{ResourceTargetVariant, Update},
user::{admin_service_user, User},
user_group::UserGroup,
variable::Variable,
Operation,
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
@@ -26,14 +26,18 @@ use mungos::{
use crate::{config::core_config, resource, state::db_client};
#[instrument(level = "debug")]
pub async fn get_user(user_id: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user_id) {
// user: Id or username
pub async fn get_user(user: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user) {
return Ok(user);
}
find_one_by_id(&db_client().await.users, user_id)
db_client()
.await
.users
.find_one(id_or_username_filter(user))
.await
.context("failed to query mongo for user")?
.with_context(|| format!("no user found with id {user_id}"))
.with_context(|| format!("no user found with {user}"))
}
#[instrument(level = "debug")]
@@ -89,7 +93,7 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
db_client()
.await
.tags
.find_one(query, None)
.find_one(query)
.await
.context("failed to query mongo for tag")?
.with_context(|| format!("no tag found matching {id_or_name}"))
@@ -120,10 +124,10 @@ pub async fn get_id_to_tags(
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
pub async fn get_user_user_groups(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = find_collect(
) -> anyhow::Result<Vec<UserGroup>> {
find_collect(
&db_client().await.user_groups,
doc! {
"users": user_id
@@ -131,50 +135,84 @@ pub async fn get_user_user_group_ids(
None,
)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|ug| ug.id)
.collect();
.context("failed to query db for user groups")
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = get_user_user_groups(user_id)
.await?
.into_iter()
.map(|ug| ug.id)
.collect();
Ok(res)
}
/// Returns Vec of all queries on permissions that match against the user
/// or any user groups that the user is a part of.
/// Result used with Mongodb '$or'.
#[instrument(level = "debug")]
pub async fn user_target_query(
pub fn user_target_query(
user_id: &str,
user_groups: &[UserGroup],
) -> anyhow::Result<Vec<Document>> {
let mut user_target_query = vec![
doc! { "user_target.type": "User", "user_target.id": user_id },
];
let user_groups = get_user_user_group_ids(user_id)
.await?
.into_iter()
.map(|ug_id| {
doc! {
"user_target.type": "UserGroup", "user_target.id": ug_id,
}
});
let user_groups = user_groups.iter().map(|ug| {
doc! {
"user_target.type": "UserGroup", "user_target.id": &ug.id,
}
});
user_target_query.extend(user_groups);
Ok(user_target_query)
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource(
user_id: &str,
user: &User,
resource_variant: ResourceTargetVariant,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
let lowest_permission = if core_config().transparent_mode {
if user.admin {
return Ok(PermissionLevel::Write);
}
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_variant).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_variant).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_variant.as_ref(),
"resource_target.id": resource_id
},
@@ -184,7 +222,7 @@ pub async fn get_user_permission_on_resource(
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(lowest_permission, |level, permission| {
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
@@ -194,15 +232,39 @@ pub async fn get_user_permission_on_resource(
Ok(permission)
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_non_admin(
user_id: &str,
pub async fn get_resource_ids_for_user(
user: &User,
resource_type: ResourceTargetVariant,
) -> anyhow::Result<Vec<String>> {
let permissions = find_collect(
) -> anyhow::Result<Option<Vec<ObjectId>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
}
// Check user 'all' on variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
}
// Get specific ids
let ids = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
@@ -213,8 +275,12 @@ pub async fn get_resource_ids_for_non_admin(
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
Ok(permissions.into_iter().collect())
.collect::<HashSet<_>>()
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
Ok(Some(ids))
}
pub fn id_or_name_filter(id_or_name: &str) -> Document {
@@ -224,6 +290,13 @@ pub fn id_or_name_filter(id_or_name: &str) -> Document {
}
}
pub fn id_or_username_filter(id_or_username: &str) -> Document {
match ObjectId::from_str(id_or_username) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": id_or_username },
}
}
pub async fn get_global_variables(
) -> anyhow::Result<HashMap<String, String>> {
Ok(
@@ -240,7 +313,7 @@ pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
db_client()
.await
.variables
.find_one(doc! { "name": &name }, None)
.find_one(doc! { "name": &name })
.await
.context("failed at call to db")?
.with_context(|| {
@@ -256,12 +329,12 @@ pub async fn get_latest_update(
db_client()
.await
.updates
.find_one(
doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
},
.find_one(doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -0,0 +1,858 @@
use std::{collections::HashMap, time::Duration};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::{execute::Deploy, read::GetBuildVersions},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
PartialDeploymentConfig,
},
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use mungos::find::find_collect;
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update,
resource::MonitorResource,
state::{deployment_status_cache, State},
};
use super::resource::{
run_update_description, run_update_tags, AllResourcesById,
ResourceSync,
};
pub type ToUpdate = Vec<ToUpdateItem>;
pub type ToCreate = Vec<ResourceToml<PartialDeploymentConfig>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult = (ToCreate, ToUpdate, ToDelete);
pub struct ToUpdateItem {
pub id: String,
pub resource: ResourceToml<PartialDeploymentConfig>,
pub update_description: bool,
pub update_tags: bool,
pub deploy: bool,
}
/// Turns all the diffs into a readable string
pub async fn get_updates_for_view(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: format!("{} Updates", Deployment::resource_type()),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
update.to_delete += 1;
to_delete.push(resource.name.clone())
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, state, reason) = extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
if state == DeploymentState::Unknown {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\nDeployment sync actions could not be computed due to Unknown deployment state\n-------------------",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource.name)
));
}
continue;
}
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
Deployment::resource_type(),
bold(&resource.name)
));
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, Color::Red),
muted("to"),
colored(&resource.description, Color::Green)
));
}
if resource.tags != original_tags {
let from =
colored(&format!("{:?}", original_tags), Color::Red);
let to =
colored(&format!("{:?}", resource.tags), Color::Green);
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
muted("field"),
muted("from"),
muted("to"),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(from, Color::Red),
muted("to"),
colored(to, Color::Green)
)
},
));
if state == DeploymentState::Unknown {
lines.push(format!(
"{}: Deployment sync actions {} due to Unknown deployment state",
colored("ERROR", Color::Red),
bold("could not be computed")
));
} else if to_deploy {
let mut line = if state == DeploymentState::Running {
format!(
"{}: {reason}, {}",
muted("deploy"),
bold("sync will trigger deploy")
)
} else {
format!(
"{}: deployment is currently in {} state, {}",
muted("deploy"),
colored(&state.to_string(), Color::Red),
bold("sync will trigger deploy")
)
};
if !resource.after.is_empty() {
line.push_str(&format!(
"\n{}: {:?}",
muted("deploy after"),
resource.after
));
}
lines.push(line);
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
None => {
update.to_create += 1;
let mut lines = vec![
format!(
"{}: {}",
muted("description"),
resource.description,
),
format!("{}: {:?}", muted("tags"), resource.tags,),
format!(
"{}: {}",
muted("config"),
serde_json::to_string_pretty(&resource.config)
.context("failed to serialize config to json")?
),
];
if resource.deploy {
lines.push(format!(
"{}: {}",
muted("will deploy"),
colored("true", Color::Green)
));
if !resource.after.is_empty() {
lines.push(format!(
"{}: {:?}",
muted("deploy after"),
resource.after
));
}
}
update.log.push_str(&format!(
"\n\n{}: {}: {}\n{}",
colored("CREATE", Color::Green),
Deployment::resource_type(),
bold(&resource.name),
lines.join("\n")
))
}
}
}
for name in to_delete {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("DELETE", Color::Red),
Deployment::resource_type(),
bold(&name)
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
/// Gets all the resources to update. For use in sync execution.
pub async fn get_updates_for_execution(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<UpdatesResult> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut to_create = ToCreate::new();
let mut to_update = ToUpdate::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, _state, _reason) =
extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
deploy: to_deploy,
};
to_update.push(update);
}
None => to_create.push(resource),
}
}
Ok((to_create, to_update, to_delete))
}
type Res<'a> = std::pin::Pin<
Box<
dyn std::future::Future<
Output = anyhow::Result<(bool, DeploymentState, String)>,
> + Send
+ 'a,
>,
>;
fn extract_to_deploy_and_state<'a>(
all_resources: &'a AllResourcesById,
map: &'a HashMap<String, Deployment>,
resources: &'a [ResourceToml<PartialDeploymentConfig>],
name: String,
// name to 'to_deploy'
cache: &'a mut HashMap<String, bool>,
// build id to latest built version string
build_cache: &'a mut HashMap<String, String>,
) -> Res<'a> {
Box::pin(async move {
let mut reason = String::new();
let Some(deployment) = resources.iter().find(|r| r.name == name)
else {
// this case should be unreachable, the names come off of a loop over resources
cache.insert(name, false);
return Ok((false, DeploymentState::Unknown, reason));
};
if deployment.deploy {
let Some(original) = map.get(&name) else {
// not created, definitely deploy
cache.insert(name, true);
// Don't need reason here, will be populated automatically
return Ok((true, DeploymentState::NotDeployed, reason));
};
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = deployment.config.clone().into();
let mut config: PartialDeploymentConfig = config.into();
Deployment::validate_partial_config(&mut config);
let mut diff = Deployment::get_diff(
original.config.clone(),
config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let status = &deployment_status_cache()
.get_or_insert_default(&original.id)
.await
.curr;
let state = status.state;
let mut to_deploy = match state {
DeploymentState::Unknown => false,
DeploymentState::Running => {
// Needs to only check config fields that affect docker run
let changed = diff.server_id.is_some()
|| diff.image.is_some()
|| diff.image_registry.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.network.is_some()
|| diff.restart.is_some()
|| diff.command.is_some()
|| diff.extra_args.is_some()
|| diff.ports.is_some()
|| diff.volumes.is_some()
|| diff.environment.is_some()
|| diff.labels.is_some();
if changed {
reason = String::from("deployment config has changed")
}
changed
}
// All other cases will require Deploy to enter Running state.
// Don't need reason here as this case is handled outside, using returned state.
_ => true,
};
// Check if build attached, version latest, and there is a new build.
if !to_deploy {
// only need to check original, if diff.image was Some, to_deploy would be true.
if let DeploymentImage::Build { build_id, version } =
&original.config.image
{
// check if version is none, ie use latest build
if version.is_none() {
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.split(':').last())
.unwrap_or("0.0.0");
match build_cache.get(build_id) {
Some(version) if deployed_version != version => {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
Some(_) => {}
None => {
let Some(version) = State
.resolve(
GetBuildVersions {
build: build_id.to_string(),
limit: Some(1),
..Default::default()
},
sync_user().to_owned(),
)
.await
.context("failed to get build versions")?
.pop()
else {
// this case shouldn't ever happen, how would deployment be deployed if build was never built?
return Ok((
false,
DeploymentState::NotDeployed,
reason,
));
};
let version = version.version.to_string();
build_cache
.insert(build_id.to_string(), version.clone());
if deployed_version != version {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
}
};
}
}
}
// Still need to check 'after' if they need deploy
if !to_deploy {
for name in &deployment.after {
match cache.get(name) {
Some(will_deploy) if *will_deploy => {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
Some(_) => {}
None => {
let (will_deploy, _, _) = extract_to_deploy_and_state(
all_resources,
map,
resources,
name.to_string(),
cache,
build_cache,
)
.await?;
if will_deploy {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
}
}
}
}
cache.insert(name, to_deploy);
Ok((to_deploy, state, reason))
} else {
// The state in this case doesn't matter and won't be read (as long as it isn't 'Unknown' which will log in all cases)
cache.insert(name, false);
Ok((false, DeploymentState::NotDeployed, reason))
}
})
}
pub async fn run_updates(
to_create: ToCreate,
to_update: ToUpdate,
to_delete: ToDelete,
) -> Option<Vec<Log>> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log = String::new();
// Collect all the deployment names that need to be deployed
// and their 'after' dependencies
let mut to_deploy = Vec::<(String, Vec<String>)>::new();
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match crate::resource::create::<Deployment>(
&resource.name,
resource.config,
sync_user(),
)
.await
{
Ok(resource) => resource.id,
Err(e) => {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name)
));
continue;
}
};
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
run_update_description::<Deployment>(
id,
&name,
description,
&mut log,
&mut has_error,
)
.await;
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", Color::Green),
Deployment::resource_type(),
bold(&name)
));
if resource.deploy {
to_deploy.push((resource.name, resource.after));
}
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
deploy,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Deployment>(
id.clone(),
&name,
description,
&mut log,
&mut has_error,
)
.await;
}
if update_tags {
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
}
let mut config_update_error = false;
if !resource.config.is_none() {
if let Err(e) = crate::resource::update::<Deployment>(
&id,
resource.config,
sync_user(),
)
.await
{
has_error = true;
config_update_error = true;
log.push_str(&format!(
"\n{}: failed to update config on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' configuration",
muted("INFO"),
colored("updated", Color::Blue),
Deployment::resource_type(),
bold(&name)
));
}
}
if !config_update_error && deploy {
to_deploy.push((resource.name, resource.after));
}
}
for resource in to_delete {
if let Err(e) =
crate::resource::delete::<Deployment>(&resource, sync_user())
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Deployment::resource_type(),
bold(&resource)
));
}
}
let mut logs = Vec::with_capacity(1);
let stage = format!("Update {}s", Deployment::resource_type());
if has_error {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::error(&stage, log));
return Some(logs);
} else if !log.is_empty() {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::simple(&stage, log));
}
if to_deploy.is_empty() {
return Some(logs);
}
let mut log = format!(
"{}: running executions to sync deployment state",
muted("INFO")
);
let mut round = 1;
while !to_deploy.is_empty() {
// Collect all waiting deployments without waiting dependencies.
let good_to_deploy = to_deploy
.iter()
.filter(|(_, after)| {
to_deploy.iter().all(|(name, _)| !after.contains(name))
})
.map(|(name, _)| name.clone())
.collect::<Vec<_>>();
// Deploy the ones ready for deployment
let res =
join_all(good_to_deploy.iter().map(|name| async move {
let res = async {
let req = ExecuteRequest::Deploy(Deploy {
deployment: name.to_string(),
stop_signal: None,
stop_time: None,
});
let user = sync_user();
let update = init_execution_update(&req, user).await?;
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State.resolve(req, (user.to_owned(), update)).await
}
.await;
(name, res)
}))
.await;
// Log results of deploy
for (name, res) in res {
if let Err(e) = res {
has_error = true;
log.push_str(&format!(
"\n{}: failed to deploy '{}' in round {} | {e:#}",
colored("ERROR", Color::Red),
bold(name),
bold(round)
));
} else {
log.push_str(&format!(
"\n{}: deployed '{}' in round {}",
muted("INFO"),
bold(name),
bold(round)
));
}
}
// Early exit if any deploy has errors
if has_error {
log.push_str(&format!(
"\n{}: exited in round {} {}",
muted("INFO"),
bold(round),
colored("with errors", Color::Red)
));
logs.push(Log::error("Sync Deployment State", log));
return Some(logs);
}
// Remove the deployed ones from 'to_deploy'
to_deploy.retain(|(name, _)| !good_to_deploy.contains(name));
// If there must be another round, these are dependent on the first round.
// Sleep for 1s to allow for first round to startup
if !to_deploy.is_empty() {
// Increment the round
round += 1;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
log.push_str(&format!(
"\n{}: finished after {} round{}",
muted("INFO"),
bold(round),
(round > 1).then_some("s").unwrap_or_default()
));
logs.push(Log::simple("Sync Deployment State", log));
Some(logs)
}
impl ResourceSync for Deployment {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: resources
.builds
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
}

View File

@@ -1,17 +1,19 @@
use std::{fs, path::Path};
use anyhow::{anyhow, Context};
use formatting::{colored, muted, Color};
use monitor_client::entities::{toml::ResourcesToml, update::Log};
use serde::de::DeserializeOwned;
use super::{colored, muted};
pub fn read_resources(
path: &Path,
) -> anyhow::Result<(ResourcesToml, Log)> {
let mut res = ResourcesToml::default();
let mut log = format!("reading resources from {path:?}");
read_resources_recursive(path, &mut res, &mut log)?;
let mut log =
format!("{}: reading resources from {path:?}", muted("INFO"));
read_resources_recursive(path, &mut res, &mut log).with_context(
|| format!("failed to read resources from {path:?}"),
)?;
Ok((res, Log::simple("read remote resources", log)))
}
@@ -30,20 +32,15 @@ fn read_resources_recursive(
{
return Ok(());
}
let more = match parse_toml_file::<ResourcesToml>(path) {
Ok(res) => res,
Err(e) => {
warn!("failed to parse {:?}. skipping file | {e:#}", path);
return Ok(());
}
};
let more = parse_toml_file::<ResourcesToml>(path)
.context("failed to parse resource file")?;
log.push('\n');
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", "green"),
colored(&path.display().to_string(), "blue")
colored("adding resources", Color::Green),
colored(path.display(), Color::Blue)
));
resources.servers.extend(more.servers);
@@ -63,15 +60,9 @@ fn read_resources_recursive(
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
if let Err(e) = read_resources_recursive(&path, resources, log)
{
log.push('\n');
log.push_str(&format!(
"{}: failed to read additional resources from {} | {e:#}",
colored("ERROR", "red"),
colored(&path.display().to_string(), "blue")
));
}
read_resources_recursive(&path, resources, log).with_context(
|| format!("failed to read resources from {path:?}"),
)?;
}
Ok(())
} else {
@@ -84,5 +75,8 @@ fn parse_toml_file<T: DeserializeOwned>(
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents).context("failed to parse toml contents")
toml::from_str(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse toml contents")
}

View File

@@ -7,6 +7,7 @@ use resolver_api::Resolve;
use crate::state::{db_client, State};
pub mod deployment;
pub mod remote;
pub mod resource;
pub mod user_groups;
@@ -41,15 +42,3 @@ pub fn spawn_sync_refresh_loop() {
}
});
}
fn muted(content: &str) -> String {
format!("<span class=\"text-muted-foreground\">{content}</span>")
}
fn bold(content: &str) -> String {
format!("<span class=\"font-bold\">{content}</span>")
}
pub fn colored(content: &str, color: &str) -> String {
format!("<span class=\"text-{color}-500\">{content}</span>")
}

View File

@@ -1,6 +1,7 @@
use std::collections::HashMap;
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
@@ -26,8 +27,6 @@ use resolver_api::Resolve;
use crate::{resource::MonitorResource, state::State};
use super::{bold, colored, muted};
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
/// Vec of resource names
@@ -93,7 +92,7 @@ pub trait ResourceSync: MonitorResource + Sized {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
@@ -119,7 +118,7 @@ pub trait ResourceSync: MonitorResource + Sized {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", "green"),
colored("created", Color::Green),
Self::resource_type(),
bold(&name)
));
@@ -170,7 +169,7 @@ pub trait ResourceSync: MonitorResource + Sized {
has_error = true;
log.push_str(&format!(
"\n{}: failed to update config on {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
@@ -178,7 +177,7 @@ pub trait ResourceSync: MonitorResource + Sized {
log.push_str(&format!(
"\n{}: {} {} '{}' configuration",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
Self::resource_type(),
bold(&name)
));
@@ -193,7 +192,7 @@ pub trait ResourceSync: MonitorResource + Sized {
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&resource),
))
@@ -201,7 +200,7 @@ pub trait ResourceSync: MonitorResource + Sized {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", "red"),
colored("deleted", Color::Red),
Self::resource_type(),
bold(&resource)
));
@@ -283,7 +282,7 @@ pub async fn get_updates_for_view<Resource: ResourceSync>(
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("UPDATE", "blue"),
colored("UPDATE", Color::Blue),
Resource::resource_type(),
bold(&resource.name)
));
@@ -294,14 +293,16 @@ pub async fn get_updates_for_view<Resource: ResourceSync>(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, "red"),
colored(&original.description, Color::Red),
muted("to"),
colored(&resource.description, "green")
colored(&resource.description, Color::Green)
));
}
if resource.tags != original_tags {
let from = colored(&format!("{:?}", original_tags), "red");
let to = colored(&format!("{:?}", resource.tags), "green");
let from =
colored(&format!("{:?}", original_tags), Color::Red);
let to =
colored(&format!("{:?}", resource.tags), Color::Green);
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
muted("field"),
@@ -315,9 +316,9 @@ pub async fn get_updates_for_view<Resource: ResourceSync>(
"{}: '{field}'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&from, "red"),
colored(from, Color::Red),
muted("to"),
colored(&to, "green")
colored(to, Color::Green)
)
},
));
@@ -328,7 +329,7 @@ pub async fn get_updates_for_view<Resource: ResourceSync>(
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
colored("CREATE", "green"),
colored("CREATE", Color::Green),
Resource::resource_type(),
bold(&resource.name),
muted("description"),
@@ -346,7 +347,7 @@ pub async fn get_updates_for_view<Resource: ResourceSync>(
for name in to_delete {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("DELETE", "red"),
colored("DELETE", Color::Red),
Resource::resource_type(),
bold(&name)
));
@@ -459,7 +460,7 @@ pub async fn run_update_tags<Resource: ResourceSync>(
*has_error = true;
log.push_str(&format!(
"\n{}: failed to update tags on {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Resource::resource_type(),
bold(name),
))
@@ -467,7 +468,7 @@ pub async fn run_update_tags<Resource: ResourceSync>(
log.push_str(&format!(
"\n{}: {} {} '{}' tags",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
Resource::resource_type(),
bold(name)
));
@@ -494,7 +495,7 @@ pub async fn run_update_description<Resource: ResourceSync>(
*has_error = true;
log.push_str(&format!(
"\n{}: failed to update description on {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Resource::resource_type(),
bold(name),
))
@@ -502,7 +503,7 @@ pub async fn run_update_description<Resource: ResourceSync>(
log.push_str(&format!(
"\n{}: {} {} '{}' description",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
Resource::resource_type(),
bold(name)
));

View File

@@ -1,3 +1,4 @@
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::execute::Execution,
entities::{
@@ -5,7 +6,6 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
deployment::{Deployment, DeploymentImage},
procedure::Procedure,
repo::Repo,
server::Server,
@@ -17,12 +17,9 @@ use monitor_client::{
use partial_derive2::{MaybeNone, PartialDiff};
use crate::{
helpers::sync::{
bold, colored, muted,
resource::{
run_update_description, run_update_tags, ResourceSync,
ToUpdateItem,
},
helpers::sync::resource::{
run_update_description, run_update_tags, ResourceSync,
ToUpdateItem,
},
resource::MonitorResource,
};
@@ -73,41 +70,6 @@ impl ResourceSync for Build {
}
}
impl ResourceSync for Deployment {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: resources
.builds
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: version.clone(),
};
}
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Repo {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Repo(id)
@@ -329,7 +291,7 @@ impl ResourceSync for Procedure {
has_error = true;
log.push_str(&format!(
"{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
@@ -337,7 +299,7 @@ impl ResourceSync for Procedure {
log.push_str(&format!(
"{}: {} {} '{}'",
muted("INFO"),
colored("deleted", "red"),
colored("deleted", Color::Red),
Self::resource_type(),
bold(&name)
));
@@ -398,7 +360,7 @@ impl ResourceSync for Procedure {
has_error = true;
log.push_str(&format!(
"\n{}: failed to update {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
@@ -437,7 +399,7 @@ impl ResourceSync for Procedure {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
@@ -464,7 +426,7 @@ impl ResourceSync for Procedure {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", "green"),
colored("created", Color::Green),
Self::resource_type(),
bold(&name)
));

View File

@@ -1,33 +1,35 @@
use std::{cmp::Ordering, collections::HashMap};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
UpdatePermissionOnResourceType, UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
permission::{PermissionLevel, UserTarget},
sync::SyncUpdate,
toml::{PermissionToml, UserGroupToml},
update::{Log, ResourceTarget},
update::{Log, ResourceTarget, ResourceTargetVariant},
user::sync_user,
},
};
use mungos::find::find_collect;
use regex::Regex;
use resolver_api::Resolve;
use crate::state::{db_client, State};
use super::{bold, colored, muted, resource::AllResourcesById};
use super::resource::AllResourcesById;
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
}
pub struct DeleteItem {
@@ -71,19 +73,49 @@ pub async fn get_updates_for_view(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
colored("CREATE", "green"),
colored(&user_group.name, "green"),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
if user_group.all.is_empty() {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
} else {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("base permissions"),
user_group.all,
muted("permissions"),
user_group.permissions,
));
}
continue;
}
};
@@ -106,6 +138,7 @@ pub async fn get_updates_for_view(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -184,22 +217,27 @@ pub async fn get_updates_for_view(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_all = !all_diff.is_empty();
let update_permissions =
user_group.permissions != original_permissions;
// only add log after diff detected
if update_users || update_permissions {
if update_users || update_all || update_permissions {
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("UPDATE", "blue"),
colored("UPDATE", Color::Blue),
bold(&user_group.name),
));
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
@@ -210,7 +248,7 @@ pub async fn get_updates_for_view(
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), "green")
colored(&adding.join(", "), Color::Green)
};
let removing = original_users
.iter()
@@ -220,7 +258,7 @@ pub async fn get_updates_for_view(
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), "red")
colored(&removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
@@ -229,39 +267,90 @@ pub async fn get_updates_for_view(
muted("adding"),
))
}
if update_all {
let updates = all_diff
.into_iter()
.map(|(variant, (orig, incoming))| {
format!(
"{}: {} {} {}",
bold(variant),
colored(orig, Color::Red),
muted("->"),
colored(incoming, Color::Green)
)
})
.collect::<Vec<_>>()
.join("\n");
lines.push(format!(
"{}: 'base permission'\n{updates}",
muted("field"),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
// add if original has no exising permission on the target
!original_permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), "green")
colored(&adding.join(", "), Color::Green)
};
let updating = user_group
.permissions
.iter()
.filter(|permission| {
// update if original has exising permission on the target with different level
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
return false;
};
permission.level != level
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let updating = if updating.is_empty() {
String::from("None")
} else {
colored(&updating.join(", "), Color::Blue)
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
// remove if incoming has no permission on the target
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), "red")
colored(&removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"{}: 'permissions'\n{}: {removing}\n{}: {updating}\n{}: {adding}",
muted("field"),
muted("removing"),
muted("updating"),
muted("adding"),
))
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
@@ -270,7 +359,7 @@ pub async fn get_updates_for_view(
for name in &to_delete {
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("DELETE", "red"),
colored("DELETE", Color::Red),
bold(name),
));
}
@@ -325,6 +414,22 @@ pub async fn get_updates_for_execution(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
@@ -351,6 +456,7 @@ pub async fn get_updates_for_execution(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -429,19 +535,55 @@ pub async fn get_updates_for_execution(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
// Extend permissions with any existing that have no target in incoming
let to_remove = original_permissions
.iter()
.filter(|permission| {
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| PermissionToml {
target: permission.target.clone(),
level: PermissionLevel::None,
})
.collect::<Vec<_>>();
user_group.permissions.extend(to_remove);
// remove any permissions that already exist on original
user_group.permissions.retain(|permission| {
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
// not in original, keep it
return true;
};
// keep it if level doesn't match
level != permission.level
});
// only push update after diff detected
if update_users
|| !all_diff.is_empty()
|| !user_group.permissions.is_empty()
{
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
all_diff: all_diff
.into_iter()
.map(|(k, (_, v))| (k, v))
.collect(),
});
}
}
@@ -495,7 +637,7 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to create user group '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&user_group.name)
));
continue;
@@ -503,7 +645,7 @@ pub async fn run_updates(
log.push_str(&format!(
"\n{}: {} user group '{}'",
muted("INFO"),
colored("created", "green"),
colored("created", Color::Green),
bold(&user_group.name)
))
};
@@ -515,6 +657,13 @@ pub async fn run_updates(
&mut has_error,
)
.await;
run_update_all(
user_group.name.clone(),
user_group.all,
&mut log,
&mut has_error,
)
.await;
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -528,7 +677,7 @@ pub async fn run_updates(
for UpdateItem {
user_group,
update_users,
update_permissions,
all_diff,
} in to_update
{
if update_users {
@@ -540,7 +689,16 @@ pub async fn run_updates(
)
.await;
}
if update_permissions {
if !all_diff.is_empty() {
run_update_all(
user_group.name.clone(),
all_diff,
&mut log,
&mut has_error,
)
.await;
}
if !user_group.permissions.is_empty() {
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -562,14 +720,14 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete user group '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&user_group.name)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}'",
muted("INFO"),
colored("deleted", "red"),
colored("deleted", Color::Red),
bold(&user_group.name)
))
}
@@ -602,19 +760,54 @@ async fn set_users(
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set users in group {} | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' users",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
async fn run_update_all(
user_group: String,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
log: &mut String,
has_error: &mut bool,
) {
for (resource_type, permission) in all_diff {
if let Err(e) = State
.resolve(
UpdatePermissionOnResourceType {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_type,
permission,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set base permissions on {resource_type} in group {} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' base permissions on {resource_type}",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
@@ -635,17 +828,188 @@ async fn run_update_permissions(
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set permssion in group {} | target: {target:?} | {e:#}",
colored("ERROR", "red"),
"\n{}: failed to set permission in group {} | target: {target:?} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' permissions",
"\n{}: {} user group '{}' permissions | {}: {target:?} | {}: {level}",
muted("INFO"),
colored("updated", "blue"),
bold(&user_group)
colored("updated", Color::Blue),
bold(&user_group),
muted("target"),
muted("level")
))
}
}
}
/// Expands any regex defined targets into the full list
async fn expand_user_group_permissions(
permissions: Vec<PermissionToml>,
all_resources: &AllResourcesById,
) -> anyhow::Result<Vec<PermissionToml>> {
let mut expanded =
Vec::<PermissionToml>::with_capacity(permissions.capacity());
for permission in permissions {
let (variant, id) = permission.target.extract_variant_id();
if id.is_empty() {
continue;
}
if id.starts_with('\\') && id.ends_with('\\') {
let inner = &id[1..(id.len() - 1)];
let regex = Regex::new(inner)
.with_context(|| format!("invalid regex. got: {inner}"))?;
match variant {
ResourceTargetVariant::Build => {
let permissions = all_resources
.builds
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Build(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Builder => {
let permissions = all_resources
.builders
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Builder(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Deployment => {
let permissions = all_resources
.deployments
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Deployment(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Server => {
let permissions = all_resources
.servers
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Server(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Repo => {
let permissions = all_resources
.repos
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Repo(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Alerter => {
let permissions = all_resources
.alerters
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Alerter(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Procedure => {
let permissions = all_resources
.procedures
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Procedure(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ServerTemplate => {
let permissions = all_resources
.templates
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ServerTemplate(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ResourceSync => {
let permissions = all_resources
.syncs
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ResourceSync(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::System => {}
}
} else {
// No regex
expanded.push(permission);
}
}
Ok(expanded)
}
type AllDiff =
HashMap<ResourceTargetVariant, (PermissionLevel, PermissionLevel)>;
/// diffs user_group.all
fn diff_group_all(
original: &HashMap<ResourceTargetVariant, PermissionLevel>,
incoming: &HashMap<ResourceTargetVariant, PermissionLevel>,
) -> AllDiff {
let mut to_update = HashMap::new();
// need to compare both forward and backward because either hashmap could be sparse.
// forward direction
for (variant, level) in incoming {
let original_level = original.get(variant).unwrap_or_default();
if level == original_level {
continue;
}
to_update.insert(*variant, (*original_level, *level));
}
// backward direction
for (variant, level) in original {
let incoming_level = incoming.get(variant).unwrap_or_default();
if level == incoming_level {
continue;
}
to_update.insert(*variant, (*level, *incoming_level));
}
to_update
}

View File

@@ -1,6 +1,7 @@
use std::collections::HashMap;
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::write::{
CreateVariable, DeleteVariable, UpdateVariableDescription,
@@ -16,8 +17,6 @@ use resolver_api::Resolve;
use crate::state::{db_client, State};
use super::{bold, colored, muted};
pub struct ToUpdateItem {
pub variable: Variable,
pub update_value: bool,
@@ -66,7 +65,7 @@ pub async fn get_updates_for_view(
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: variable: '{}'\n-------------------",
colored("UPDATE", "blue"),
colored("UPDATE", Color::Blue),
bold(&item.variable.name),
));
@@ -77,9 +76,9 @@ pub async fn get_updates_for_view(
"{}: 'value'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.value, "red"),
colored(&original.value, Color::Red),
muted("to"),
colored(&item.variable.value, "green")
colored(&item.variable.value, Color::Green)
))
}
@@ -88,9 +87,9 @@ pub async fn get_updates_for_view(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, "red"),
colored(&original.description, Color::Red),
muted("to"),
colored(&item.variable.description, "green")
colored(&item.variable.description, Color::Green)
))
}
@@ -99,15 +98,25 @@ pub async fn get_updates_for_view(
}
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: variable: {}\n{}: {}\n{}: {}",
colored("CREATE", "green"),
colored(&variable.name, "green"),
muted("description"),
variable.description,
muted("value"),
variable.value,
));
if variable.description.is_empty() {
update.log.push_str(&format!(
"\n\n{}: variable: {}\n{}: {}",
colored("CREATE", Color::Green),
colored(&variable.name, Color::Green),
muted("value"),
variable.value,
));
} else {
update.log.push_str(&format!(
"\n\n{}: variable: {}\n{}: {}\n{}: {}",
colored("CREATE", Color::Green),
colored(&variable.name, Color::Green),
muted("description"),
variable.description,
muted("value"),
variable.value,
));
}
}
}
}
@@ -115,7 +124,7 @@ pub async fn get_updates_for_view(
for name in &to_delete {
update.log.push_str(&format!(
"\n\n{}: variable: '{}'\n-------------------",
colored("DELETE", "red"),
colored("DELETE", Color::Red),
bold(name),
));
}
@@ -202,14 +211,14 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to create variable '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&variable.name)
));
} else {
log.push_str(&format!(
"\n{}: {} variable '{}'",
muted("INFO"),
colored("created", "green"),
colored("created", Color::Green),
bold(&variable.name)
))
};
@@ -235,14 +244,14 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to update variable value for '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&variable.name)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}' value",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
bold(&variable.name)
))
};
@@ -261,14 +270,14 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to update variable description for '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&variable.name)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}' description",
muted("INFO"),
colored("updated", "blue"),
colored("updated", Color::Blue),
bold(&variable.name)
))
};
@@ -288,14 +297,14 @@ pub async fn run_updates(
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete variable '{}' | {e:#}",
colored("ERROR", "red"),
colored("ERROR", Color::Red),
bold(&variable)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}'",
muted("INFO"),
colored("deleted", "red"),
colored("deleted", Color::Red),
bold(&variable)
))
}

View File

@@ -45,7 +45,7 @@ pub async fn add_update(
update.id = db_client()
.await
.updates
.insert_one(&update, None)
.insert_one(&update)
.await
.context("failed to insert update into db")?
.inserted_id

View File

@@ -1,425 +0,0 @@
use std::sync::{Arc, OnceLock};
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use monitor_client::{
api::{execute, write::RefreshResourceSyncPending},
entities::{
build::Build, procedure::Procedure, repo::Repo,
sync::ResourceSync, user::github_user,
},
};
use resolver_api::Resolve;
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{
cache::Cache, random_duration, update::init_execution_update,
},
resource,
state::State,
};
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunBuild(
execute::RunBuild { build: build_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::CloneRepo(
execute::CloneRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(
execute::PullRepo { repo: repo_id },
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::RunProcedure(
execute::RunProcedure {
procedure: procedure_id,
},
);
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunProcedure(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::RunSync(execute::RunSync {
sync: sync_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().github_webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}

View File

@@ -0,0 +1,51 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunBuild,
entities::{build::Build, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let build = resource::get::<Build>(&build_id).await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,204 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{cache::Cache, random_duration},
};
mod build;
mod procedure;
mod repo;
mod sync;
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = build::handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = repo::handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = repo::handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = procedure::handle_procedure_webhook(
id.clone(),
branch,
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = sync::handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = sync::handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(
core_config().github_webhook_secret.as_bytes(),
)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;

View File

@@ -0,0 +1,55 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::RunProcedure,
entities::{procedure::Procedure, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let procedure = resource::get::<Procedure>(&procedure_id).await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure_id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,86 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::execute::{CloneRepo, PullRepo},
entities::{repo::Repo, user::github_user},
};
use resolver_api::Resolve;
use crate::{
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
pub async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
pub async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let repo = resource::get::<Repo>(&repo_id).await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,78 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{sync::ResourceSync, user::github_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}
pub async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
pub async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = github_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -5,7 +5,6 @@ use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::Router;
use termination_signal::tokio::immediate_term_handle;
use tower_http::{
cors::{Any, CorsLayer},
services::{ServeDir, ServeFile},
@@ -77,13 +76,15 @@ async fn app() -> anyhow::Result<()> {
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let term_signal = immediate_term_handle()?;
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
_ = term_signal => {},
_ = term_signal.recv() => {},
}
Ok(())

View File

@@ -74,7 +74,7 @@ pub async fn alert_deployments(
return;
}
send_alerts(&alerts).await;
let res = db_client().await.alerts.insert_many(alerts, None).await;
let res = db_client().await.alerts.insert_many(alerts).await;
if let Err(e) = res {
error!("failed to record deployment status alerts to db | {e:#}");
}

View File

@@ -42,7 +42,7 @@ pub async fn alert_servers(
let mut alerts_to_open = Vec::<(Alert, SendAlerts)>::new();
let mut alerts_to_update = Vec::<(Alert, SendAlerts)>::new();
let mut alert_ids_to_close = Vec::<(String, SendAlerts)>::new();
let mut alert_ids_to_close = Vec::<(Alert, SendAlerts)>::new();
for server_status in server_statuses {
let Some(server) = servers.remove(&server_status.id) else {
@@ -101,13 +101,10 @@ pub async fn alert_servers(
}
// Close an open alert
(
ServerState::Ok | ServerState::Disabled,
Some(health_alert),
) => alert_ids_to_close.push((
health_alert.id.clone(),
server.info.send_unreachable_alerts,
)),
(ServerState::Ok | ServerState::Disabled, Some(alert)) => {
alert_ids_to_close
.push((alert.clone(), server.info.send_unreachable_alerts));
}
_ => {}
}
@@ -149,8 +146,8 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
// modify alert level
if alert.level != health.cpu {
// modify alert level only if it has increased
if alert.level < health.cpu {
alert.level = health.cpu;
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
@@ -165,8 +162,20 @@ pub async fn alert_servers(
alerts_to_update.push((alert, server.info.send_cpu_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_cpu_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_cpu_alerts))
}
_ => {}
}
@@ -185,7 +194,7 @@ pub async fn alert_servers(
ts,
resolved: false,
resolved_ts: None,
level: health.cpu,
level: health.mem,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerMem {
id: server_status.id.clone(),
@@ -209,7 +218,8 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
if alert.level != health.mem {
// modify alert level only if it has increased
if alert.level < health.mem {
alert.level = health.mem;
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
@@ -229,8 +239,25 @@ pub async fn alert_servers(
alerts_to_update.push((alert, server.info.send_mem_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_mem_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
total_gb: server_status
.stats
.as_ref()
.map(|s| s.mem_total_gb)
.unwrap_or(0.0),
used_gb: server_status
.stats
.as_ref()
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_mem_alerts))
}
_ => {}
}
@@ -273,6 +300,7 @@ pub async fn alert_servers(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
) => {
// Disk is persistent, update alert if health changes regardless of direction
if *health != alert.level {
let disk =
server_status.stats.as_ref().and_then(|stats| {
@@ -291,8 +319,23 @@ pub async fn alert_servers(
.push((alert, server.info.send_disk_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => alert_ids_to_close
.push((alert.id.clone(), server.info.send_disk_alerts)),
(SeverityLevel::Ok, Some(alert)) => {
let mut alert = alert.clone();
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alert_ids_to_close
.push((alert, server.info.send_disk_alerts))
}
_ => {}
}
}
@@ -316,7 +359,7 @@ async fn open_alerts(alerts: &[(Alert, SendAlerts)]) {
let open = || async {
let ids = db
.alerts
.insert_many(alerts.iter().map(|(alert, _)| alert), None)
.insert_many(alerts.iter().map(|(alert, _)| alert))
.await?
.inserted_ids
.into_iter()
@@ -402,22 +445,20 @@ async fn update_alerts(alerts: &[(Alert, SendAlerts)]) {
}
#[instrument(level = "debug")]
async fn resolve_alerts(alert_ids: &[(String, SendAlerts)]) {
if alert_ids.is_empty() {
async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
if alerts.is_empty() {
return;
}
let send_alerts_map =
alert_ids.iter().cloned().collect::<HashMap<_, _>>();
let close = || async {
let alert_ids = alert_ids
let close = || async move {
let alert_ids = alerts
.iter()
.map(|(id, _)| {
ObjectId::from_str(id)
.map(|(alert, _)| {
ObjectId::from_str(&alert.id)
.context("failed to convert alert id to ObjectId")
})
.collect::<anyhow::Result<Vec<_>>>()?;
db_client()
.await
.alerts
@@ -429,31 +470,25 @@ async fn resolve_alerts(alert_ids: &[(String, SendAlerts)]) {
"resolved_ts": monitor_timestamp()
}
},
None,
)
.await
.context("failed to resolve alerts on db")?;
let mut closed = find_collect(
&db_client().await.alerts,
doc! { "_id": { "$in": &alert_ids } },
None,
)
.await
.context("failed to get closed alerts from db")?;
.context("failed to resolve alerts on db")
.inspect_err(|e| warn!("{e:#}"))
.ok();
for closed in &mut closed {
closed.level = SeverityLevel::Ok;
}
let ts = monitor_timestamp();
let closed = closed
.into_iter()
.filter(|closed| {
if let ResourceTarget::Server(id) = &closed.target {
send_alerts_map.get(id).cloned().unwrap_or(true)
} else {
error!("got resource target other than server in resolve_server_alerts");
true
}
let closed = alerts
.iter()
.filter(|(_, send)| *send)
.map(|(alert, _)| {
let mut alert = alert.clone();
alert.resolved = true;
alert.resolved_ts = Some(ts);
alert.level = SeverityLevel::Ok;
alert
})
.collect::<Vec<_>>();

View File

@@ -52,7 +52,6 @@ pub async fn insert_repos_status_unknown(repos: Vec<Repo>) {
.insert(
repo.id.clone(),
CachedRepoStatus {
id: repo.id,
latest_hash: None,
latest_message: None,
}

View File

@@ -52,7 +52,6 @@ pub struct CachedDeploymentStatus {
#[derive(Default, Clone, Debug)]
pub struct CachedRepoStatus {
pub id: String,
pub latest_hash: Option<String>,
pub latest_message: Option<String>,
}
@@ -229,9 +228,8 @@ pub async fn update_cache_for_server(server: &Server) {
.unzip();
status_cache
.insert(
repo.id.clone(),
repo.id,
CachedRepoStatus {
id: repo.id,
latest_hash,
latest_message,
}

View File

@@ -30,8 +30,7 @@ pub async fn record_server_stats(ts: i64) {
})
.collect::<Vec<_>>();
if !records.is_empty() {
let res =
db_client().await.stats.insert_many(records, None).await;
let res = db_client().await.stats.insert_many(records).await;
if let Err(e) = res {
error!("failed to record server stats | {e:#}");
}

View File

@@ -137,7 +137,6 @@ impl super::MonitorResource for Builder {
mungos::update::Update::Set(
doc! { "config.builder.params.builder_id": "" },
),
None,
)
.await
.context("failed to update_many builds on database")?;

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::entities::{
build::Build,
deployment::{
@@ -16,7 +17,6 @@ use monitor_client::entities::{
};
use mungos::mongodb::Collection;
use periphery_client::api::container::RemoveContainer;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -187,10 +187,12 @@ impl super::MonitorResource for Deployment {
Err(e) => {
update.push_error_log(
"remove container",
format!(
"failed to retrieve server at {} from db.\n\nerror: {}",
deployment.config.server_id,
serialize_error_pretty(&e)
format_serror(
&e.context(format!(
"failed to retrieve server at {} from db.",
deployment.config.server_id
))
.into(),
),
);
return Ok(());
@@ -211,9 +213,8 @@ impl super::MonitorResource for Deployment {
// Leaving it for completeness sake
update.push_error_log(
"remove container",
format!(
"failed to remove container on periphery.\n\nerror: {}",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to get periphery client").into(),
),
);
return Ok(());
@@ -230,9 +231,8 @@ impl super::MonitorResource for Deployment {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"remove container",
format!(
"failed to remove container.\n\nerror: {}",
serialize_error_pretty(&e)
format_serror(
&e.context("failed to remove container").into(),
),
),
};
@@ -270,7 +270,7 @@ async fn validate_config(
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
config.image = Some(DeploymentImage::Build {
build_id: build.id,
version: version.clone(),
version: *version,
});
}
}

View File

@@ -1,6 +1,7 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::write::CreateTag,
@@ -26,14 +27,13 @@ use mungos::{
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use serde::{de::DeserializeOwned, Serialize};
use serror::serialize_error_pretty;
use crate::{
config::core_config,
helpers::{
create_permission,
create_permission, flatten_document,
query::{
get_resource_ids_for_non_admin, get_tag,
get_resource_ids_for_user, get_tag,
get_user_permission_on_resource, id_or_name_filter,
},
update::{add_update, make_update},
@@ -187,7 +187,7 @@ pub async fn get<T: MonitorResource>(
) -> anyhow::Result<Resource<T::Config, T::Info>> {
T::coll()
.await
.find_one(id_or_name_filter(id_or_name), None)
.find_one(id_or_name_filter(id_or_name))
.await
.context("failed to query db for resource")?
.with_context(|| {
@@ -211,7 +211,7 @@ pub async fn get_check_permissions<T: MonitorResource>(
return Ok(resource);
}
let permissions = get_user_permission_on_resource(
&user.id,
user,
T::resource_type(),
&resource.id,
)
@@ -265,13 +265,9 @@ async fn list_full_for_user_using_document<T: MonitorResource>(
mut filters: Document,
user: &User,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
if !user.admin && !core_config().transparent_mode {
let ids =
get_resource_ids_for_non_admin(&user.id, T::resource_type())
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
if let Some(ids) =
get_resource_ids_for_user(user, T::resource_type()).await?
{
filters.insert("_id", doc! { "$in": ids });
}
find_collect(
@@ -336,7 +332,7 @@ pub async fn create<T: MonitorResource>(
let resource_id = T::coll()
.await
.insert_one(&resource, None)
.insert_one(&resource)
.await
.with_context(|| {
format!("failed to add {} to db", T::resource_type())
@@ -410,7 +406,7 @@ pub async fn update<T: MonitorResource>(
for FieldDiff { field, from, to } in diff.iter_field_diffs() {
diff_log.push_str(&format!(
"\n\n<span class=\"text-muted-foreground\">field</span>: '{field}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{from}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{to}</span>",
"\n\n<span class=\"text-muted-foreground\">field</span>: '{field}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-700 dark:text-red-400\">{from}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-700 dark:text-green-400\">{to}</span>",
));
}
@@ -422,10 +418,12 @@ pub async fn update<T: MonitorResource>(
let config_doc = T::update_document(resource, config)
.context("failed to serialize config to bson document")?;
let update_doc = flatten_document(doc! { "config": config_doc });
update_one_by_id(
T::coll().await,
&id,
mungos::update::Update::FlattenSet(doc! { "config": config_doc }),
doc! { "$set": update_doc },
None,
)
.await
@@ -487,7 +485,6 @@ pub async fn update_description<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "description": description } },
None,
)
.await?;
Ok(())
@@ -522,7 +519,6 @@ pub async fn update_tags<T: MonitorResource>(
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "tags": tags } },
None,
)
.await?;
Ok(())
@@ -533,7 +529,7 @@ pub async fn remove_tag_from_all<T: MonitorResource>(
) -> anyhow::Result<()> {
T::coll()
.await
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } }, None)
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
.await
.context("failed to remove tag from resources")?;
Ok(())
@@ -580,7 +576,7 @@ pub async fn delete<T: MonitorResource>(
);
if let Err(e) = T::post_delete(&resource, &mut update).await {
update.push_error_log("post delete", serialize_error_pretty(&e));
update.push_error_log("post delete", format_serror(&e.into()));
}
update.finalize();
@@ -612,13 +608,10 @@ where
if let Err(e) = db_client()
.await
.permissions
.delete_many(
doc! {
"resource_target.type": variant.as_ref(),
"resource_target.id": &id
},
None,
)
.delete_many(doc! {
"resource_target.type": variant.as_ref(),
"resource_target.id": &id
})
.await
{
warn!("failed to delete_many permissions matching target {target:?} | {e:#}");
@@ -650,7 +643,6 @@ where
recent_field: id
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")

View File

@@ -337,12 +337,12 @@ async fn get_procedure_state_from_db(id: &str) -> ProcedureState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
},
.find_one(doc! {
"target.type": "Procedure",
"target.id": id,
"operation": "RunProcedure"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -1,6 +1,7 @@
use std::time::Duration;
use anyhow::Context;
use formatting::format_serror;
use monitor_client::entities::{
permission::PermissionLevel,
repo::{
@@ -18,7 +19,6 @@ use mungos::{
mongodb::{bson::doc, options::FindOneOptions, Collection},
};
use periphery_client::api::git::DeleteRepo;
use serror::serialize_error_pretty;
use crate::{
helpers::periphery_client,
@@ -158,7 +158,7 @@ impl super::MonitorResource for Repo {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"delete repo on periphery",
serialize_error_pretty(&e),
format_serror(&e.context("failed to delete repo").into()),
),
}
@@ -244,15 +244,15 @@ async fn get_repo_state_from_db(id: &str) -> RepoState {
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
},
.find_one(doc! {
"target.type": "Repo",
"target.id": id,
"$or": [
{ "operation": "CloneRepo" },
{ "operation": "PullRepo" },
],
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -130,7 +130,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.params.server_id": &id },
doc! { "$set": { "config.params.server_id": "" } },
None,
)
.await
.context("failed to detach server from builders")?;
@@ -139,7 +138,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from deployments")?;
@@ -148,7 +146,6 @@ impl super::MonitorResource for Server {
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from repos")?;
@@ -160,7 +157,6 @@ impl super::MonitorResource for Server {
"resolved": true,
"resolved_ts": monitor_timestamp()
} },
None,
)
.await
.context("failed to detach server from repos")?;

View File

@@ -213,12 +213,12 @@ async fn get_resource_sync_state_from_db(
let state = db_client()
.await
.updates
.find_one(
doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
},
.find_one(doc! {
"target.type": "ResourceSync",
"target.id": id,
"operation": "RunSync"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -1,10 +1,20 @@
use std::sync::{Arc, OnceLock};
use std::{
collections::HashMap,
sync::{Arc, OnceLock},
};
use anyhow::{anyhow, Context};
use monitor_client::entities::{
build::BuildState, deployment::DeploymentState,
procedure::ProcedureState, repo::RepoState,
build::BuildState,
config::core::{CoreConfig, GithubWebhookAppConfig},
deployment::DeploymentState,
procedure::ProcedureState,
repo::RepoState,
sync::ResourceSyncState,
};
use octorust::auth::{
Credentials, InstallationTokenGenerator, JWTCredentials,
};
use tokio::sync::{Mutex, OnceCell};
use crate::{
@@ -36,6 +46,63 @@ pub fn jwt_client() -> &'static JwtClient {
JWT_CLIENT.get_or_init(|| JwtClient::new(core_config()))
}
pub fn github_client(
) -> Option<&'static HashMap<String, octorust::Client>> {
static GITHUB_CLIENT: OnceLock<
Option<HashMap<String, octorust::Client>>,
> = OnceLock::new();
GITHUB_CLIENT
.get_or_init(|| {
let CoreConfig {
github_webhook_app:
GithubWebhookAppConfig {
app_id,
installations,
pk_path,
..
},
..
} = core_config();
if *app_id == 0 || installations.is_empty() {
return None;
}
let private_key = std::fs::read(pk_path)
.context("github webhook app | failed to load private key")
.unwrap();
let private_key = nom_pem::decode_block(&private_key)
.map_err(|e| anyhow!("{e:?}"))
.context("github webhook app | failed to decode private key")
.unwrap();
let jwt = JWTCredentials::new(*app_id, private_key.data)
.context(
"github webhook app | failed to make github JWTCredentials",
)
.unwrap();
let mut clients =
HashMap::with_capacity(installations.capacity());
for installation in installations {
let token_generator = InstallationTokenGenerator::new(
installation.id,
jwt.clone(),
);
let client = octorust::Client::new(
"github-app",
Credentials::InstallationToken(token_generator),
)
.context("failed to initialize github client")
.unwrap();
clients.insert(installation.namespace.to_string(), client);
}
Some(clients)
})
.as_ref()
}
pub fn action_states() -> &'static ActionStates {
static ACTION_STATES: OnceLock<ActionStates> = OnceLock::new();
ACTION_STATES.get_or_init(ActionStates::default)

View File

@@ -208,7 +208,7 @@ async fn user_can_see_update(
}
let (variant, id) = update_target.extract_variant_id();
let permissions =
get_user_permission_on_resource(&user.id, variant, id).await?;
get_user_permission_on_resource(user, variant, id).await?;
if permissions > PermissionLevel::None {
Ok(())
} else {

View File

@@ -13,7 +13,6 @@ repository.workspace = true
monitor_client.workspace = true
logger.workspace = true
#
termination_signal.workspace = true
mungos.workspace = true
#
tokio.workspace = true

12
bin/migrator/Dockerfile Normal file
View File

@@ -0,0 +1,12 @@
# Build Core
FROM rust:1.79.0-bookworm as builder
WORKDIR /builder
COPY . .
RUN cargo build -p migrator --release
# Final Image
FROM gcr.io/distroless/cc-debian12
COPY --from=builder /builder/target/release/migrator /
CMD ["./migrator"]

View File

@@ -1,2 +1,3 @@
#[allow(unused)]
pub mod v0;
pub mod v1_6;

View File

@@ -198,11 +198,6 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
id: value.id,
name: value.name,
description: value.description,
// permissions: value
// .permissions
// .into_iter()
// .map(|(id, p)| (id, p.into()))
// .collect(),
updated_at: unix_from_monitor_ts(&value.updated_at)?,
tags: Vec::new(),
info: BuildInfo {
@@ -233,6 +228,7 @@ impl TryFrom<Build> for monitor_client::entities::build::Build {
build_path,
dockerfile_path,
build_args,
secret_args: Default::default(),
extra_args,
use_buildx,
labels: Default::default(),

View File

@@ -106,11 +106,8 @@ impl TryFrom<User> for monitor_client::entities::user::User {
create_server_permissions: value.create_server_permissions,
create_build_permissions: value.create_build_permissions,
last_update_view: Default::default(),
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
updated_at: unix_from_monitor_ts(&value.updated_at)?,
};
Ok(user)

View File

@@ -134,6 +134,7 @@ impl From<BuildConfig>
.into_iter()
.map(Into::into)
.collect(),
secret_args: Default::default(),
labels: value.labels.into_iter().map(Into::into).collect(),
extra_args: value.extra_args,
use_buildx: value.use_buildx,

View File

@@ -7,7 +7,9 @@ use monitor_client::entities::{
update::{ResourceTarget, Update},
user::User,
};
use mungos::find::find_collect;
use mungos::{
find::find_collect, mongodb::options::InsertManyOptions,
};
use crate::legacy::v0;
@@ -28,13 +30,21 @@ pub async fn migrate_users(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.users, None, None)
.await
.context("failed to get existing target users")?;
let users = find_collect(&legacy_db.users, None, None)
.await
.context("failed to get legacy users")?
.into_iter()
.filter_map(|s| {
let username = s.username.clone();
s.try_into()
.filter_map(|user| {
if existing.iter().any(|u| u.username == user.username) {
return None;
}
let username = user.username.clone();
user.try_into()
.inspect_err(|e| {
warn!("failed to convert user {username} | {e:#}")
})
@@ -46,7 +56,7 @@ pub async fn migrate_users(
target_db
.users
.insert_many(users, None)
.insert_many(users)
.await
.context("failed to insert users on target")?;
@@ -59,6 +69,10 @@ pub async fn migrate_servers(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.servers, None, None)
.await
.context("failed to get existing target servers")?;
let servers = find_collect(&legacy_db.servers, None, None)
.await
.context("failed to get legacy servers")?;
@@ -67,6 +81,10 @@ pub async fn migrate_servers(
let mut permissions = Vec::<Permission>::new();
for server in servers {
if existing.iter().any(|s| s.name == server.name) {
continue;
}
for (user_id, level) in &server.permissions {
let permission = Permission {
id: Default::default(),
@@ -91,7 +109,10 @@ pub async fn migrate_servers(
if !new_servers.is_empty() {
target_db
.servers
.insert_many(new_servers, None)
.insert_many(new_servers)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert servers on target")?;
}
@@ -99,7 +120,10 @@ pub async fn migrate_servers(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert server permissions on target")?;
}
@@ -113,6 +137,10 @@ pub async fn migrate_deployments(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.deployments, None, None)
.await
.context("failed to get existing target deployments")?;
let deployments = find_collect(&legacy_db.deployments, None, None)
.await
.context("failed to get legacy deployments")?;
@@ -121,6 +149,10 @@ pub async fn migrate_deployments(
let mut permissions = Vec::<Permission>::new();
for deployment in deployments {
if existing.iter().any(|d| d.name == deployment.name) {
continue;
}
for (user_id, level) in &deployment.permissions {
let permission = Permission {
id: Default::default(),
@@ -147,7 +179,10 @@ pub async fn migrate_deployments(
if !new_deployments.is_empty() {
target_db
.deployments
.insert_many(new_deployments, None)
.insert_many(new_deployments)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert deployments on target")?;
}
@@ -155,7 +190,10 @@ pub async fn migrate_deployments(
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert deployment permissions on target")?;
}
@@ -169,6 +207,10 @@ pub async fn migrate_builds(
legacy_db: &v0::DbClient,
target_db: &crate::DbClient,
) -> anyhow::Result<()> {
let existing = find_collect(&target_db.builds, None, None)
.await
.context("failed to get existing target builds")?;
let builds = find_collect(&legacy_db.builds, None, None)
.await
.context("failed to get legacy builds")?;
@@ -177,6 +219,10 @@ pub async fn migrate_builds(
let mut permissions = Vec::<Permission>::new();
for build in builds {
if existing.iter().any(|b| b.name == build.name) {
continue;
}
for (user_id, level) in &build.permissions {
let permission = Permission {
id: Default::default(),
@@ -201,17 +247,29 @@ pub async fn migrate_builds(
if !new_builds.is_empty() {
target_db
.builds
.insert_many(new_builds, None)
.insert_many(new_builds)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert builds on target")?;
.inspect_err(|e| {
warn!("failed to insert builds on target | {e}")
})
.ok();
}
if !permissions.is_empty() {
target_db
.permissions
.insert_many(permissions, None)
.insert_many(permissions)
.with_options(
InsertManyOptions::builder().ordered(false).build(),
)
.await
.context("failed to insert build permissions on target")?;
.inspect_err(|e| {
warn!("failed to insert build permissions on target | {e}")
})
.ok();
}
info!("builds have been migrated\n");
@@ -239,7 +297,8 @@ pub async fn migrate_updates(
target_db
.updates
.insert_many(updates, None)
.insert_many(updates)
.with_options(InsertManyOptions::builder().ordered(false).build())
.await
.context("failed to insert updates on target")?;

View File

@@ -34,7 +34,6 @@ pub async fn migrate_deployments_in_place(
.update_one(
doc! { "name": &deployment.name },
doc! { "$set": to_document(&deployment)? },
None,
)
.await
.context("failed to insert deployments on target")?;
@@ -62,7 +61,6 @@ pub async fn migrate_builds_in_place(
.update_one(
doc! { "name": &build.name },
doc! { "$set": to_document(&build)? },
None,
)
.await
.context("failed to insert builds on target")?;

View File

@@ -17,29 +17,29 @@ path = "src/main.rs"
# local
monitor_client = { workspace = true, features = ["docker"] }
periphery_client.workspace = true
formatting.workspace = true
command.workspace = true
logger.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
async_timing_util.workspace = true
merge_config_files.workspace = true
parse_csl.workspace = true
termination_signal.workspace = true
run_command.workspace = true
svi.workspace = true
async_timing_util.workspace = true
resolver_api.workspace = true
run_command.workspace = true
parse_csl.workspace = true
svi.workspace = true
# external
tokio.workspace = true
axum.workspace = true
axum-extra.workspace = true
dotenv.workspace = true
envy.workspace = true
serde.workspace = true
serde_json.workspace = true
anyhow.workspace = true
bollard.workspace = true
clap.workspace = true
tracing.workspace = true
uuid.workspace = true
bollard.workspace = true
sysinfo.workspace = true
dotenv.workspace = true
anyhow.workspace = true
tokio.workspace = true
serde.workspace = true
axum.workspace = true
envy.workspace = true
clap.workspace = true
uuid.workspace = true

View File

@@ -1,31 +1,200 @@
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
server::docker_image::ImageSummary, update::Log,
};
use periphery_client::api::build::{
Build, GetImageList, PruneImages,
build::{Build, BuildConfig},
get_image_name, optional_string,
server::docker_image::ImageSummary,
to_monitor_name,
update::Log,
EnvironmentVar, Version,
};
use periphery_client::api::build::{self, GetImageList, PruneImages};
use resolver_api::Resolve;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
impl Resolve<Build> for State {
#[instrument(name = "Build", skip(self, replacers))]
impl Resolve<build::Build> for State {
#[instrument(name = "Build", skip_all)]
async fn resolve(
&self,
Build {
build::Build {
build,
aws_ecr,
registry_token,
replacers,
}: Build,
replacers: core_replacers,
}: build::Build,
_: (),
) -> anyhow::Result<Vec<Log>> {
docker::build::build(&build, registry_token, replacers).await
let Build {
name,
config:
BuildConfig {
version,
skip_secret_interp,
build_path,
dockerfile_path,
build_args,
secret_args,
labels,
extra_args,
use_buildx,
image_registry,
..
},
..
} = &build;
let mut logs = Vec::new();
// Maybe docker login
let should_push = match docker_login(
image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await
{
Ok(should_push) => should_push,
Err(e) => {
logs.push(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
return Ok(logs);
}
};
let name = to_monitor_name(name);
// Get paths
let build_dir =
periphery_config().repo_dir.join(&name).join(build_path);
let dockerfile_path = match optional_string(dockerfile_path) {
Some(dockerfile_path) => dockerfile_path.to_owned(),
None => "Dockerfile".to_owned(),
};
// Get command parts
let image_name = get_image_name(&build, |_| aws_ecr)
.context("failed to make image name")?;
let build_args = parse_build_args(build_args);
let _secret_args =
parse_secret_args(secret_args, *skip_secret_interp)?;
let labels = parse_labels(labels);
let extra_args = parse_extra_args(extra_args);
let buildx = if *use_buildx { " buildx" } else { "" };
let image_tags = image_tags(&image_name, version);
let push_command = should_push
.then(|| {
format!(" && docker image push --all-tags {image_name}")
})
.unwrap_or_default();
// Construct command
let command = format!(
"cd {} && docker{buildx} build{build_args}{_secret_args}{extra_args}{labels}{image_tags} -f {dockerfile_path} .{push_command}",
build_dir.display()
);
if *skip_secret_interp {
let build_log =
run_monitor_command("docker build", command).await;
info!("finished building docker image");
logs.push(build_log);
} else {
// Interpolate any missing secrets
let (command, mut replacers) = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker build command",
)?;
replacers.extend(core_replacers);
let mut build_log =
run_monitor_command("docker build", command).await;
build_log.command =
svi::replace_in_string(&build_log.command, &replacers);
build_log.stdout =
svi::replace_in_string(&build_log.stdout, &replacers);
build_log.stderr =
svi::replace_in_string(&build_log.stderr, &replacers);
logs.push(build_log);
}
cleanup_secret_env_vars(secret_args);
Ok(logs)
}
}
fn image_tags(image_name: &str, version: &Version) -> String {
let Version { major, minor, .. } = version;
format!(
" -t {image_name}:latest -t {image_name}:{version} -t {image_name}:{major}.{minor} -t {image_name}:{major}",
)
}
fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
build_args
.iter()
.map(|p| format!(" --build-arg {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_secret_args(
secret_args: &[EnvironmentVar],
skip_secret_interp: bool,
) -> anyhow::Result<String> {
let periphery_config = periphery_config();
Ok(
secret_args
.iter()
.map(|EnvironmentVar { variable, value }| {
if variable.is_empty() {
return Err(anyhow!("secret variable cannot be empty string"))
} else if variable.contains('=') {
return Err(anyhow!("invalid variable {variable}. variable cannot contain '='"))
}
let value = if skip_secret_interp {
value.to_string()
} else {
svi::interpolate_variables(
value,
&periphery_config.secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate periphery secrets into build secrets",
)?.0
};
std::env::set_var(variable, value);
anyhow::Ok(format!(" --secret id={variable}"))
})
.collect::<anyhow::Result<Vec<_>>>()?
.join(""),
)
}
fn cleanup_secret_env_vars(secret_args: &[EnvironmentVar]) {
secret_args.iter().for_each(
|EnvironmentVar { variable, .. }| std::env::remove_var(variable),
)
}
//
impl Resolve<GetImageList> for State {
@@ -48,6 +217,7 @@ impl Resolve<PruneImages> for State {
_: PruneImages,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::build::prune_images().await)
let command = String::from("docker image prune -a -f");
Ok(run_monitor_command("prune images", command).await)
}
}

View File

@@ -1,13 +1,24 @@
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use command::run_monitor_command;
use formatting::format_serror;
use monitor_client::entities::{
deployment::{ContainerSummary, DockerContainerStats},
deployment::{
ContainerSummary, Conversion, Deployment, DeploymentConfig,
DeploymentImage, DockerContainerStats, RestartMode,
TerminationSignal,
},
to_monitor_name,
update::Log,
EnvironmentVar, SearchCombinator,
};
use periphery_client::api::container::*;
use resolver_api::Resolve;
use run_command::async_run_command;
use crate::{
docker::{self, client::docker_client},
config::periphery_config,
docker::docker_client,
helpers::{docker_login, parse_extra_args, parse_labels},
State,
};
@@ -34,10 +45,11 @@ impl Resolve<GetContainerLog> for State {
#[instrument(name = "GetContainerLog", level = "debug", skip(self))]
async fn resolve(
&self,
req: GetContainerLog,
GetContainerLog { name, tail }: GetContainerLog,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::container_log(&req.name, req.tail).await)
let command = format!("docker logs {name} --tail {tail}");
Ok(run_monitor_command("get container log", command).await)
}
}
@@ -51,17 +63,29 @@ impl Resolve<GetContainerLogSearch> for State {
)]
async fn resolve(
&self,
req: GetContainerLogSearch,
GetContainerLogSearch {
name,
terms,
combinator,
invert,
}: GetContainerLogSearch,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::container_log_search(
&req.name,
&req.terms,
req.combinator,
)
.await,
)
let maybe_invert = invert.then_some(" -v").unwrap_or_default();
let grep = match combinator {
SearchCombinator::Or => {
format!("grep{maybe_invert} -E '{}'", terms.join("|"))
}
SearchCombinator::And => {
format!(
"grep{maybe_invert} -P '^(?=.*{})'",
terms.join(")(?=.*")
)
}
};
let command =
format!("docker logs {name} --tail 5000 2>&1 | {grep}");
Ok(run_monitor_command("get container log grep", command).await)
}
}
@@ -79,8 +103,7 @@ impl Resolve<GetContainerStats> for State {
_: (),
) -> anyhow::Result<DockerContainerStats> {
let error = anyhow!("no stats matching {}", req.name);
let mut stats =
docker::container::container_stats(Some(req.name)).await?;
let mut stats = container_stats(Some(req.name)).await?;
let stats = stats.pop().ok_or(error)?;
Ok(stats)
}
@@ -99,7 +122,7 @@ impl Resolve<GetContainerStatsList> for State {
_: GetContainerStatsList,
_: (),
) -> anyhow::Result<Vec<DockerContainerStats>> {
docker::container::container_stats(None).await
container_stats(None).await
}
}
@@ -109,10 +132,16 @@ impl Resolve<StartContainer> for State {
#[instrument(name = "StartContainer", skip(self))]
async fn resolve(
&self,
req: StartContainer,
StartContainer { name }: StartContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::start_container(&req.name).await)
Ok(
run_monitor_command(
"docker start",
format!("docker start {name}"),
)
.await,
)
}
}
@@ -122,15 +151,26 @@ impl Resolve<StopContainer> for State {
#[instrument(name = "StopContainer", skip(self))]
async fn resolve(
&self,
req: StopContainer,
StopContainer { name, signal, time }: StopContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_container(
&req.name, req.signal, req.time,
)
.await,
)
let command = stop_container_command(&name, signal, time);
let log = run_monitor_command("docker stop", command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(&name, None, time);
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -140,15 +180,31 @@ impl Resolve<RemoveContainer> for State {
#[instrument(name = "RemoveContainer", skip(self))]
async fn resolve(
&self,
req: RemoveContainer,
RemoveContainer { name, signal, time }: RemoveContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::stop_and_remove_container(
&req.name, req.signal, req.time,
)
.await,
)
let stop_command = stop_container_command(&name, signal, time);
let command =
format!("{stop_command} && docker container rm {name}");
let log =
run_monitor_command("docker stop and remove", command).await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command = stop_container_command(&name, None, time);
let command =
format!("{stop_command} && docker container rm {name}");
let mut log = run_monitor_command("docker stop", command).await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
format!("\n\n{}", log.stderr)
} else {
String::new()
}
);
Ok(log)
} else {
Ok(log)
}
}
}
@@ -158,16 +214,15 @@ impl Resolve<RenameContainer> for State {
#[instrument(name = "RenameContainer", skip(self))]
async fn resolve(
&self,
req: RenameContainer,
RenameContainer {
curr_name,
new_name,
}: RenameContainer,
_: (),
) -> anyhow::Result<Log> {
Ok(
docker::container::rename_container(
&req.curr_name,
&req.new_name,
)
.await,
)
let new = to_monitor_name(&new_name);
let command = format!("docker rename {curr_name} {new}");
Ok(run_monitor_command("docker rename", command).await)
}
}
@@ -180,14 +235,18 @@ impl Resolve<PruneContainers> for State {
_: PruneContainers,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::container::prune_containers().await)
let command = String::from("docker container prune -f");
Ok(run_monitor_command("prune containers", command).await)
}
}
//
impl Resolve<Deploy> for State {
#[instrument(name = "Deploy", skip(self, replacers))]
#[instrument(
name = "Deploy",
skip(self, core_replacers, aws_ecr, registry_token)
)]
async fn resolve(
&self,
Deploy {
@@ -195,22 +254,206 @@ impl Resolve<Deploy> for State {
stop_signal,
stop_time,
registry_token,
replacers,
replacers: core_replacers,
aws_ecr,
}: Deploy,
_: (),
) -> anyhow::Result<Log> {
let res = docker::container::deploy(
&deployment,
stop_signal
.unwrap_or(deployment.config.termination_signal)
.into(),
stop_time
.unwrap_or(deployment.config.termination_timeout)
.into(),
registry_token,
replacers,
if let Err(e) = docker_login(
&deployment.config.image_registry,
registry_token.as_deref(),
aws_ecr.as_ref(),
)
.await;
Ok(res)
.await
{
return Ok(Log::error(
"docker login",
format_serror(
&e.context("failed to login to docker registry").into(),
),
));
}
let image = if let DeploymentImage::Image { image } =
&deployment.config.image
{
if image.is_empty() {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
}
image
} else {
return Ok(Log::error(
"get image",
String::from("deployment does not have image attached"),
));
};
let _ = pull_image(image).await;
debug!("image pulled");
let _ = State
.resolve(
RemoveContainer {
name: deployment.name.clone(),
signal: stop_signal,
time: stop_time,
},
(),
)
.await;
debug!("container stopped and removed");
let command = docker_run_command(&deployment, image);
debug!("docker run command: {command}");
if deployment.config.skip_secret_interp {
Ok(run_monitor_command("docker run", command).await)
} else {
let command = svi::interpolate_variables(
&command,
&periphery_config().secrets,
svi::Interpolator::DoubleBrackets,
true,
)
.context(
"failed to interpolate secrets into docker run command",
);
if let Err(e) = command {
return Ok(Log::error("docker run", format!("{e:?}")));
}
let (command, mut replacers) = command.unwrap();
replacers.extend(core_replacers);
let mut log = run_monitor_command("docker run", command).await;
log.command = svi::replace_in_string(&log.command, &replacers);
log.stdout = svi::replace_in_string(&log.stdout, &replacers);
log.stderr = svi::replace_in_string(&log.stderr, &replacers);
Ok(log)
}
}
}
//
fn docker_run_command(
Deployment {
name,
config:
DeploymentConfig {
volumes,
ports,
network,
command,
restart,
environment,
labels,
extra_args,
..
},
..
}: &Deployment,
image: &str,
) -> String {
let name = to_monitor_name(name);
let ports = parse_conversions(ports, "-p");
let volumes = volumes.to_owned();
let volumes = parse_conversions(&volumes, "-v");
let network = parse_network(network);
let restart = parse_restart(restart);
let environment = parse_environment(environment);
let labels = parse_labels(labels);
let command = parse_command(command);
let extra_args = parse_extra_args(extra_args);
format!("docker run -d --name {name}{ports}{volumes}{network}{restart}{environment}{labels}{extra_args} {image}{command}")
}
fn parse_conversions(
conversions: &[Conversion],
flag: &str,
) -> String {
conversions
.iter()
.map(|p| format!(" {flag} {}:{}", p.local, p.container))
.collect::<Vec<_>>()
.join("")
}
fn parse_environment(environment: &[EnvironmentVar]) -> String {
environment
.iter()
.map(|p| format!(" --env {}=\"{}\"", p.variable, p.value))
.collect::<Vec<_>>()
.join("")
}
fn parse_network(network: &str) -> String {
format!(" --network {network}")
}
fn parse_restart(restart: &RestartMode) -> String {
let restart = match restart {
RestartMode::OnFailure => "on-failure:10".to_string(),
_ => restart.to_string(),
};
format!(" --restart {restart}")
}
fn parse_command(command: &str) -> String {
if command.is_empty() {
String::new()
} else {
format!(" {command}")
}
}
//
async fn container_stats(
container_name: Option<String>,
) -> anyhow::Result<Vec<DockerContainerStats>> {
let format = "--format \"{{ json . }}\"";
let container_name = match container_name {
Some(name) => format!(" {name}"),
None => "".to_string(),
};
let command =
format!("docker stats{container_name} --no-stream {format}");
let output = async_run_command(&command).await;
if output.success() {
let res = output
.stdout
.split('\n')
.filter(|e| !e.is_empty())
.map(|e| {
let parsed = serde_json::from_str(e)
.context(format!("failed at parsing entry {e}"))?;
Ok(parsed)
})
.collect::<anyhow::Result<Vec<DockerContainerStats>>>()?;
Ok(res)
} else {
Err(anyhow!("{}", output.stderr.replace('\n', "")))
}
}
#[instrument]
async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_monitor_command("docker pull", command).await
}
fn stop_container_command(
container_name: &str,
signal: Option<TerminationSignal>,
time: Option<i32>,
) -> String {
let container_name = to_monitor_name(container_name);
let signal = signal
.map(|signal| format!(" --signal {signal}"))
.unwrap_or_default();
let time = time
.map(|time| format!(" --time {time}"))
.unwrap_or_default();
format!("docker stop{signal}{time} {container_name}")
}

View File

@@ -11,7 +11,7 @@ use serde::{Deserialize, Serialize};
use crate::{
config::{accounts_response, secrets_response},
docker, State,
State,
};
mod build;
@@ -154,6 +154,7 @@ impl Resolve<PruneSystem> for State {
PruneSystem {}: PruneSystem,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::prune_system().await)
let command = String::from("docker system prune -a -f");
Ok(run_monitor_command("prune system", command).await)
}
}

View File

@@ -1,3 +1,4 @@
use command::run_monitor_command;
use monitor_client::entities::{
server::docker_network::DockerNetwork, update::Log,
};
@@ -6,10 +7,7 @@ use periphery_client::api::network::{
};
use resolver_api::Resolve;
use crate::{
docker::{self, client::docker_client},
State,
};
use crate::{docker::docker_client, State};
//
@@ -33,7 +31,12 @@ impl Resolve<CreateNetwork> for State {
CreateNetwork { name, driver }: CreateNetwork,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::create_network(&name, driver).await)
let driver = match driver {
Some(driver) => format!(" -d {driver}"),
None => String::new(),
};
let command = format!("docker network create{driver} {name}");
Ok(run_monitor_command("create network", command).await)
}
}
@@ -46,7 +49,8 @@ impl Resolve<DeleteNetwork> for State {
DeleteNetwork { name }: DeleteNetwork,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::delete_network(&name).await)
let command = format!("docker network rm {name}");
Ok(run_monitor_command("delete network", command).await)
}
}
@@ -59,6 +63,7 @@ impl Resolve<PruneNetworks> for State {
_: PruneNetworks,
_: (),
) -> anyhow::Result<Log> {
Ok(docker::network::prune_networks().await)
let command = String::from("docker network prune -f");
Ok(run_monitor_command("prune networks", command).await)
}
}

Some files were not shown because too many files have changed in this diff Show More