Compare commits

..

133 Commits

Author SHA1 Message Date
mbecker20
392e691f92 add repo build webhook 2024-08-11 18:26:51 -07:00
mbecker20
495e208ccd add building in repo busy check 2024-08-11 17:47:10 -07:00
mbecker20
14474adb90 add building state to repo 2024-08-11 17:46:26 -07:00
mbecker20
896784e2e3 fix repo action UI responsiveness 2024-08-11 17:38:15 -07:00
mbecker20
2e690bce24 repo table just show repo and branch 2024-08-11 17:36:00 -07:00
mbecker20
7172d24512 add message if fail to remove_dir_all after compose deploy 2024-08-11 17:21:19 -07:00
mbecker20
b754c89118 validate config makes sure ids not empty 2024-08-11 17:09:53 -07:00
Maxwell Becker
31a23dfe2d v1.13.1 improve stack edge cases, and UI action responsiveness (#26)
* get stack state from project

* move custom image name / tag below image setting for build config

* services also trigger stack action state

* add status to stack page

* 1.13.1 patch
2024-08-11 17:01:09 -07:00
mbecker20
b0f80cafc3 improve action responsiveness by improving when update is sent out rel to action state set 2024-08-11 14:59:34 -07:00
mbecker20
85a16f6c6f ensure run directory is normalized before create dir all 2024-08-11 14:14:17 -07:00
mbecker20
29a7e4c27b add link to builder in build info 2024-08-11 13:24:29 -07:00
mbecker20
a73b572725 improve dashboard responsiveness 2024-08-11 12:07:14 -07:00
mbecker20
aa44bf04e8 validate repo builder id in diff (new field) 2024-08-11 05:06:17 -07:00
mbecker20
93348621c5 replace repo builder_id with name for toml export 2024-08-11 05:00:34 -07:00
mbecker20
4b2139ede2 docker compose ls --all 2024-08-11 04:56:28 -07:00
mbecker20
3251216be7 update server address config placeholder 2024-08-11 03:35:34 -07:00
mbecker20
1f980a45e8 fix compose example file to reference monitor-mongo 2024-08-11 03:34:19 -07:00
mbecker20
94da1dce99 fill out Procedure execute types 2024-08-11 02:38:15 -07:00
mbecker20
d4fc015494 cli don't panic of no HOME env var 2024-08-11 02:26:18 -07:00
mbecker20
5800fc91d2 repo don't show build button if builder not attached 2024-08-11 02:12:39 -07:00
mbecker20
91785e1e8f monitor_cli install instructions 2024-08-11 01:56:56 -07:00
mbecker20
41fccdb16e demo link in readme 2024-08-10 23:58:42 -07:00
mbecker20
78cf93da8a improve dashboard recents responsiveness 2024-08-10 23:52:24 -07:00
mbecker20
ea36549dbe fix stack service routing page - works with updates 2024-08-10 23:01:31 -07:00
mbecker20
a319095869 improve readme 2024-08-10 16:02:29 -07:00
mbecker20
a6d7a80cbc capitalize Monitor Docs 2024-08-10 15:33:17 -07:00
mbecker20
20f051c890 readme 2024-08-10 15:32:22 -07:00
mbecker20
2fef954ad5 add link to demo docs 2024-08-10 15:30:54 -07:00
mbecker20
e1b9367ee3 remove attempt at parsing json out of config 2024-08-10 14:27:58 -07:00
mbecker20
c7717fbfdf disable create variable for non admin 2024-08-10 13:59:42 -07:00
mbecker20
bf918042c3 point to right mongo 2024-08-10 13:33:09 -07:00
mbecker20
46ac16100d fix example compose "depends on" 2024-08-10 12:55:31 -07:00
mbecker20
eca0378c56 apply spellcheck={false} 2024-08-10 12:50:21 -07:00
mbecker20
bfd5c5390d fix use before init loop 2024-08-10 12:18:07 -07:00
mbecker20
db41878278 guard against running on_clone / on_pull on core 2024-08-10 11:56:18 -07:00
mbecker20
26468ed8ea add infos for stack repo branch commit 2024-08-10 11:49:23 -07:00
mbecker20
707751708d update to latest rust docker base 2024-08-10 11:31:52 -07:00
mbecker20
d28d3422a3 disable server builder cancel in UI 2024-08-10 11:04:51 -07:00
mbecker20
9e2b1ede93 tweak layout: items-center 2024-08-10 10:22:03 -07:00
mbecker20
37e37deb04 clarify what "sync resources" means 2024-08-10 09:42:42 -07:00
mbecker20
e73a6ca72c fix docsite link error 2024-08-10 09:39:54 -07:00
mbecker20
6082b7b1bd update client version in toml 2024-08-10 09:35:20 -07:00
Maxwell Becker
678767c24b [v1.13 - Komodo] Docker compose support with the Stack resource (#24) Co-authored with @karamvirsingh98
* add some network stuff to container summary

* improve settings tables UI

* periphery build supports additional tags

* fix variable container sizing

* alert types newline wrap

* plumbing for Stack resource

* plumbing for Stack resource

* mount stack api

* stack resource sync

* get remote compose file

* support image_name and image_tag

* add server config placeholders. default server config address

* configure image name and image tag

* deployment work with build image_name and image_tag

* stack UI

* fe builds

* configure registry provider and account

* implement periphery stack api

* stack poll interval

* add UI provider management

* deploy stacks

* build push commit hash tag.

* Destroy stack

* update default core port to 9120

* remove git_account alias

* finish stack (and container) api

* frontend builds

* cant cancel server based builds

* fix

* use git pull -f

* 9120

* start UI updates (#15)

* fix  From<Stack> for CloneArgs

* remove unused imports

* UI Updates (#16)

* cleanup dashboard charts for resources

* bring back solid scrollbars

* enable sidebar scrolling

* remove alerts from all resources

* pass jwt secret

* stacks dont delete the target

* parse services from yaml

* stacks deploy

* close

* looking good

* closer

* destroy stack when file missing. onboard stacks

* figure out stack container name matching

* get stack state correct

* work with service views

* UI Updates - Sidebar, Topbar Alerts, and All Resources page (#17)

* move sidebar to use fixed positioning instead of sticky

* add alert details dialog to topbar alerts

* cleanup all resources page layout

* ensure resource links don't propagate clicks

* periphery support passing env with --env-file

* StackServicePage

* default run_directory to ./ for clarify

* add stack webhook listeners

* add default compose name of stack name

* stacks controlled with project name

* migrate to dotenvy

* add stack to dashboard

* remove deploying / destroying stack services

* update config files

* fix getting service logs

* git / docker provider management api

* implement passing git / registry token from db

* rename system user Github to Git Webhook

* seperate deployed and latest services on stack info

* add stack service level operations

* UI Updates - Update Shadcn/UI components, prevent navbar menu layout shift (#20)

* add dashboard pie for resource syncs

* dashboard items same height

* update shadcn components

* ensure centered following sheet update

* cleanup layout, prevent navbar menu layout shifts

* add manual filter, fix toast call

* guard webhooks

* remove deployed_message, latest_message from StackListItemInfo

* stop all containers on server correctly

* support multiple compose files

* cache all containers networks images projects

* remove project missing from db cache

* work on sync deploy stuff

* rework deployment sync deploy to support stacks. they can depend on each other.

* UI Updates - Remove topbar transparency, pretty status badges, tidy resource page layout with a 'back' button (#21)

* remove topbar transparency

* cleanup unused

* responsive dashboard

* better mobile header

* dont need to calc 64px less since header is using position fixed

* add status badge component

* update status badges

* further simplify layout

* allow undefined status as prop

* use new status badges for alerts

* update status badges for all resources

* undo layout change

* tidy up resource page layout, add back button

* no need for button wrapper

* remove unused

* build cancel log

* update ts types

* fix fe type changes

* fe tweaks

* remove on build logs

* core refresh cache immediately on startup

* jwt_ttl

* canonicalize run directory on host

* update canonicalize error message

* core use docker-compose

* fix incorrect project missing, add status string to stack info

* remove entries in "after" that aren't deploying

* fix dockerfiel

* build custom tag postfix

* sync fixes

* ensure UpdateGitProviderAccount doesn't change id

* ensure UpdateDockerRegistryAccount doesn't change id

*  configure providers in the UI

* add // comment support to env, conversions

* add updates for provider deletes

* improve sync pending deploy log

* add more deployment actions

* add backward compat with v1.12 for clone repo

* stack deploy format

* fe

* alert menus clone when click resource link

* rename stacks

* don't close on click

* snake case stack state, in line with deployment state

* sync redeploy stack if newer hash (optional behind resource field 'latest_hash')

* remove nav to tree

* RefreshStack/Sync debug instruments

* improve inline UI docs

* implement resource base_permission backend

* plumbing for Repo build

* build repos

* write env file repos

* add latest hash / message to build info

* add optional hash to update

* keep built_hash updated

* add backend for build / repo latest hash management

* remove unused resources

* clean up repo dirs after cache update

* fix repo info deser error

* add build / repo git status

* fix page layouts

* improve layout responsive

* most config incline docs

* add descriptions for all resource types

* default local auth false

* fix omnibar arrow keys issue

* add compose file to example config

* image registry

* dashboard display no resources messge

* update deps.

* show when no config

* resource sync use config git_provider

* fix networks

* fix deploy error due to after

* update lots of docs

* fix server stat charts not working

* update screenshots

* update changelog

* add a disclaimer

* remove file paths docs stuff

* build repo

* v1.13 - Komodo

* update docs for cli

* fill out the compose example more

---------

Co-authored-by: Karamvir Singh <67458484+karamvirsingh98@users.noreply.github.com>
2024-08-10 09:33:14 -07:00
mbecker20
59cb86d599 serde default on token re Issue 10 2024-08-02 11:13:31 -07:00
mbecker20
5f0a9ad652 remove env vars / conversions / labels # comment support 2024-07-31 13:04:57 -07:00
mbecker20
fc758121da note on login if no auth methods configured 2024-07-31 12:56:59 -07:00
mbecker20
95ccf1af0b reset version on copy 2024-07-31 05:03:33 -07:00
mbecker20
627f7ab585 detect aarch64 periphery install 2024-07-31 04:58:12 -07:00
mbecker20
4238abf61a fix resource sync delete operation 2024-07-31 02:19:24 -07:00
mbecker20
66bfe69983 add note about user periphery install 2024-07-31 00:14:19 -07:00
mbecker20
42b493ae10 host network in example 2024-07-30 15:56:01 -07:00
mbecker20
f4d6c50b67 ensure core config startup log redacted 2024-07-30 14:43:15 -07:00
mbecker20
17176a7d56 add note about upgrading periphery 2024-07-30 14:20:15 -07:00
mbecker20
140b95b70c skip secret interp respected for core secrets 2024-07-30 00:18:28 -07:00
mbecker20
3a2cb73088 improve git https config look 2024-07-29 20:51:55 -07:00
mbecker20
4585533bc5 migration optional env vars 2024-07-29 20:28:57 -07:00
mbecker20
83099f03a1 changelog 2024-07-29 19:36:48 -07:00
mbecker20
9e619c0250 add sync screenshots 2024-07-29 19:34:10 -07:00
mbecker20
edf49dc685 update resource syncs 2024-07-29 19:30:28 -07:00
mbecker20
beffc8c159 consistent dockerfile 2024-07-29 19:18:14 -07:00
mbecker20
d99cf87da0 update client to 1.12 2024-07-29 18:36:42 -07:00
mbecker20
8e19eb7b0f versions 2024-07-29 18:33:14 -07:00
mbecker20
78a0b56c73 migrator readme 2024-07-29 18:32:11 -07:00
mbecker20
bf5dc52237 fix upgrades docs 2024-07-29 18:29:49 -07:00
mbecker20
482ea59d4c add docsite upgrades 2024-07-29 18:28:12 -07:00
Maxwell Becker
7740d36f49 v1.12 Custom Git Providers / Docker Registries (#8)
* update deps

* remove patch when 0 for deployments using specific build version

* implement custom git provider and image registry support

* common providers api

* toml array alias

* username alias account

* get fe to build

* http or https

* fix frontend build

* improve registry / provider config

* frontend build

* rework deployment / builds image registry

* frontend builds

* update build config fe

* configure builder additional accounts / secrets

* guard against managing non-github repo webhooks

* fmt

* md size dashboard

* lowercase organization in image name

* update config docs

* update example env

* provider configuration

* distribute migrator

* fix casing mismatch

* docs
2024-07-29 18:23:58 -07:00
mbecker20
820754deda roadmap 2024-07-24 00:11:58 -07:00
mbecker20
4219884198 roadmapx 2024-07-24 00:11:08 -07:00
mbecker20
d9e24cc35a add roadmap 2024-07-24 00:10:32 -07:00
mbecker20
8d2ce884d9 1.11.1 updated hetzner instances 2024-07-20 02:49:38 -07:00
mbecker20
313b000e64 update hetzner server types 2024-07-20 01:16:52 -07:00
mbecker20
c2f9e29605 close failed procedure execution updates 2024-07-19 23:21:21 -07:00
Maxwell Becker
8c6f38cafb v1.11 Improve permission management (#6)
* add "all permissions" feature on user and user group schema

* prepare support for group all

* implement user.all and user_group.all for broad base permissioning

* clean up unused deps

* sync support user group permissions regex

* 1.11

* fix fe ? issue

* this doesn't work

* sync handle user group all set

* retain above non earlier

* remove permissions that already exist

* update docs

* add user group docs

* minimize user group permissions for execute

* sync toml

* add sync name to slack alert title

* add syncs to alerter white/blacklist

* use \\ instead of $reg

* share resource type base permissions api users and user groups

* manage user / group base permissions ui

* manage user / group base resource type permissions

* update api permission handling

* manage all resource permissions in table

* user show group membership

* update client to 1.11
2024-07-19 02:11:36 -07:00
mbecker20
4a03eba99a granular invalidations 2024-07-17 14:51:51 -07:00
mbecker20
79fe078e3b 1.10.5 cpu/mem only update alert if severity increases (or resolved) 2024-07-17 14:36:22 -07:00
mbecker20
6be032fcd4 update client to 1.10.4 2024-07-16 16:06:38 -07:00
mbecker20
d0c94278ec 1.10.4 fix EnvVar parsing when value contains '=' 2024-07-16 16:05:11 -07:00
mbecker20
03ae7268fd fix server table search when sorting by deployments 2024-07-10 12:09:42 -07:00
mbecker20
f443294818 add clear link to api docs 2024-07-10 02:33:14 -07:00
mbecker20
2202835d86 improve core setup docs 2024-07-10 02:26:58 -07:00
mbecker20
98fbc7a506 improve migrator and add Dockerfile 2024-07-10 02:25:44 -07:00
mbecker20
8ee89296e1 frontend only invalidate on update Complete 2024-07-09 13:50:03 -07:00
mbecker20
989c3d2d01 more compact webhook button labels 2024-07-09 02:26:50 -07:00
mbecker20
dc72883b90 update config example 2024-07-09 02:09:17 -07:00
mbecker20
e99364430f update local client version 2024-07-09 02:06:30 -07:00
mbecker20
e106e38cd9 1.10.3 support multiple github webhook app installations 2024-07-09 02:05:38 -07:00
mbecker20
e4d0c56e49 debug git logs 2024-07-09 00:50:24 -07:00
mbecker20
7427a158f4 full err too large for alert 2024-07-09 00:40:11 -07:00
mbecker20
b926f89954 log on build unsuccessful and alerting 2024-07-09 00:20:03 -07:00
mbecker20
e666a22f08 debug instrument git calls 2024-07-09 00:09:06 -07:00
mbecker20
4107f779a5 fix build increment major version 2024-07-08 13:15:52 -07:00
mbecker20
828d6cdfed improve responsive 2024-07-05 20:19:20 -07:00
mbecker20
fe82400a99 1.10.2 ResourceSync manage repo webhooks 2024-07-05 20:02:20 -07:00
mbecker20
e37fc6adde publish 1.10.1 2024-07-05 03:32:24 -07:00
mbecker20
c21c8f99ae manage webhooks working 2024-07-05 03:29:23 -07:00
mbecker20
78a63f92bb build repo webhook management 2024-07-05 03:17:29 -07:00
mbecker20
ce67655021 core info provide owners 2024-07-05 02:26:18 -07:00
mbecker20
2ccecf38f2 default pk path /github/private-key.pem 2024-07-05 02:15:35 -07:00
mbecker20
1ddae31aad update config example 2024-07-05 02:06:27 -07:00
mbecker20
097fbefa63 1.10.1 2024-07-05 02:02:59 -07:00
mbecker20
b51442a661 ts types 2024-07-05 02:02:25 -07:00
mbecker20
a21d49d224 build / repo webhook write api 2024-07-05 02:02:03 -07:00
mbecker20
c99a33880e Create / Delete webhook api 2024-07-05 01:31:15 -07:00
mbecker20
6ee55262ba webhook management api aware if repo can be managed 2024-07-05 01:18:21 -07:00
mbecker20
878b9b55bb see whether webhooks enabled 2024-07-05 01:05:27 -07:00
mbecker20
af6193f83a update async_timing_util 2024-07-04 21:15:38 -07:00
mbecker20
b8fefddd8b EC2 2024-07-04 19:13:49 -07:00
mbecker20
7f490f5bf2 tweak 2024-07-04 19:12:02 -07:00
mbecker20
efa7c13286 docs 2024-07-04 19:08:48 -07:00
mbecker20
f913be7a0b builder setup guide 2024-07-04 19:03:43 -07:00
mbecker20
35901ef7ea actions can wrap 2024-07-04 17:53:24 -07:00
mbecker20
5b938490fc response 2024-07-04 17:29:45 -07:00
mbecker20
a7326a0116 user group toml export replace target ids with names 2024-07-04 17:10:36 -07:00
mbecker20
877bda91d7 improve log responsiveness 2024-07-04 16:49:08 -07:00
mbecker20
439a091e50 improve resource responsive 2024-07-04 16:29:13 -07:00
mbecker20
b0e89f4963 fix dashboard 2024-07-04 15:46:43 -07:00
mbecker20
b1e4b55ba1 more responsive 2024-07-04 14:41:40 -07:00
mbecker20
d4a1891c70 delete user group 2024-07-04 14:17:03 -07:00
mbecker20
9db7592d7e all_resources tables use right search 2024-07-04 01:25:40 -07:00
mbecker20
84fb603951 1.10 2024-07-01 03:18:26 -07:00
mbecker20
55bac0dd13 check right thing for empty 2024-07-01 03:12:22 -07:00
mbecker20
b143f42363 update mungos 2024-07-01 02:47:06 -07:00
mbecker20
007efd136a 1.10.0 pre 2024-07-01 02:38:24 -07:00
mbecker20
b329767f9e 1.10.0-pre-0 2024-07-01 02:33:01 -07:00
mbecker20
b4231957d5 config for secret args 2024-07-01 02:31:53 -07:00
mbecker20
b4dc446f95 interpolate core variables / secrets into build secret_args 2024-07-01 02:27:03 -07:00
mbecker20
c92515cecc combine into router 2024-07-01 01:44:07 -07:00
mbecker20
f3712feea2 finish periphery clean 2024-07-01 01:39:03 -07:00
mbecker20
0e81d17860 shrink periphery implementation 2024-07-01 01:19:25 -07:00
mbecker20
c3f1557b83 fix mem alert 2024-06-30 00:27:37 -07:00
mbecker20
5f88e4b436 seperate webhook actions 2024-06-25 01:22:38 -07:00
mbecker20
473c6b3867 dont send failed build alert on build cancel 2024-06-24 16:59:34 -07:00
mbecker20
c10edaa5d1 fix builder toml export 2024-06-23 03:00:31 -07:00
370 changed files with 27360 additions and 11705 deletions

4
.gitignore vendored
View File

@@ -6,4 +6,6 @@ dist
.env
.env.development
creds.toml
core.config.toml
core.config.toml
.syncs
.stacks

915
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,19 +3,19 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.9.0"
version = "1.13.1"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/mbecker20/monitor"
homepage = "https://docs.monitor.mogh.tech"
homepage = "https://docs.monitor.dev"
[patch.crates-io]
monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.9.0"
monitor_client = "1.13.1"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
@@ -24,24 +24,24 @@ git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.4.3", default-features = false }
serror = { version = "0.4.6", default-features = false }
slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
async_timing_util = "0.1.14"
async_timing_util = "1.0.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
mongo_indexed = "1.0.0"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
mungos = "1.0.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
tokio = { version = "1.39.2", features = ["full"] }
reqwest = { version = "0.12.5", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
@@ -55,14 +55,15 @@ tokio-tungstenite = "0.23.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.117"
toml = "0.8.14"
serde = { version = "1.0.204", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.122"
serde_yaml = "0.9.34"
toml = "0.8.19"
# ERROR
anyhow = "1.0.86"
thiserror = "1.0.61"
thiserror = "1.0.63"
# LOGGING
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
@@ -73,13 +74,14 @@ opentelemetry = "0.23.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.7", features = ["derive"] }
dotenv = "0.15.0"
clap = { version = "4.5.13", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.10.0", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
@@ -89,16 +91,19 @@ jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
bollard = "0.16.1"
sysinfo = "0.30.12"
bollard = "0.17.0"
sysinfo = "0.31.2"
# CLOUD
aws-config = "1.5.1"
aws-sdk-ec2 = "1.51.1"
aws-sdk-ecr = "1.33.0"
aws-config = "1.5.4"
aws-sdk-ec2 = "1.62.0"
aws-sdk-ecr = "1.37.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.3"
octorust = "0.7.0"
colored = "2.1.0"
regex = "1.10.6"
bson = "2.11.0"

View File

@@ -19,5 +19,5 @@ tracing.workspace = true
axum.workspace = true
anyhow.workspace = true
serde.workspace = true
dotenv.workspace = true
dotenvy.workspace = true
envy.workspace = true

View File

@@ -21,7 +21,7 @@ fn default_port() -> u16 {
}
async fn app() -> anyhow::Result<()> {
dotenv::dotenv().ok();
dotenvy::dotenv().ok();
logger::init(&Default::default())?;
let Env { port } =

View File

@@ -8,6 +8,8 @@ Monitor CLI is a tool to sync monitor resources and execute operations.
cargo install monitor_cli
```
Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-dev`.
## Usage
### Credentials
@@ -26,33 +28,6 @@ You can also bypass using any file and pass the information using `--url`, `--ke
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
```
### Run Syncs
```sh
## Sync resources in a single file
monitor sync ./resources/deployments.toml
## Sync resources gathered across multiple files in a directory
monitor sync ./resources
## Path defaults to './resources', in this case you can just use:
monitor sync
```
#### Manual
```md
Runs syncs on resource files
Usage: monitor sync [OPTIONS] [PATH]
Arguments:
[PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources]
Options:
--delete Will delete any resources that aren't included in the resource files
-h, --help Print help
```
### Run Executions
```sh
@@ -70,16 +45,31 @@ Commands:
none The "null" execution. Does nothing
run-procedure Runs the target procedure. Response: [Update]
run-build Runs the target build. Response: [Update]
cancel-build Cancels the target build. Only does anything if the build is `building` when called. Response: [Update]
deploy Deploys the container for the target deployment. Response: [Update]
start-container Starts the container for the target deployment. Response: [Update]
restart-container Restarts the container for the target deployment. Response: [Update]
pause-container Pauses the container for the target deployment. Response: [Update]
unpause-container Unpauses the container for the target deployment. Response: [Update]
stop-container Stops the container for the target deployment. Response: [Update]
stop-all-containers Stops all deployments on the target server. Response: [Update]
remove-container Stops and removes the container for the target deployment. Reponse: [Update]
clone-repo Clones the target repo. Response: [Update]
pull-repo Pulls the target repo. Response: [Update]
build-repo Builds the target repo, using the attached builder. Response: [Update]
cancel-repo-build Cancels the target repo build. Only does anything if the repo build is `building` when called. Response: [Update]
stop-all-containers Stops all containers on the target server. Response: [Update]
prune-networks Prunes the docker networks on the target server. Response: [Update]
prune-images Prunes the docker images on the target server. Response: [Update]
prune-containers Prunes the docker containers on the target server. Response: [Update]
run-sync Runs the target resource sync. Response: [Update]
deploy-stack Deploys the target stack. `docker compose up`. Response: [Update]
start-stack Starts the target stack. `docker compose start`. Response: [Update]
restart-stack Restarts the target stack. `docker compose restart`. Response: [Update]
pause-stack Pauses the target stack. `docker compose pause`. Response: [Update]
unpause-stack Unpauses the target stack. `docker compose unpause`. Response: [Update]
stop-stack Starts the target stack. `docker compose stop`. Response: [Update]
destroy-stack Destoys the target stack. `docker compose down`. Response: [Update]
sleep
help Print this message or the help of the given subcommand(s)
Options:

View File

@@ -32,30 +32,19 @@ pub struct CliArgs {
}
fn default_creds() -> String {
let home = std::env::var("HOME")
.expect("no HOME env var. cannot get default config path.");
let home =
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
format!("{home}/.config/monitor/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Runs syncs on resource files
Sync {
/// The path of the resource folder / file
/// Folder paths will recursively incorporate all the resources it finds under the folder
#[arg(default_value_t = String::from("./resources"))]
path: String,
/// Will delete any resources that aren't included in the resource files.
#[arg(long, default_value_t = false)]
delete: bool,
},
/// Runs an execution
Execute {
#[command(subcommand)]
execution: Execution,
},
// Room for more
}
#[derive(Debug, Deserialize)]

View File

@@ -27,12 +27,24 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -48,6 +60,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneNetworks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -60,6 +78,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -78,12 +117,24 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunBuild(request) => {
monitor_client().execute(request).await
}
Execution::CancelBuild(request) => {
monitor_client().execute(request).await
}
Execution::Deploy(request) => {
monitor_client().execute(request).await
}
Execution::StartContainer(request) => {
monitor_client().execute(request).await
}
Execution::RestartContainer(request) => {
monitor_client().execute(request).await
}
Execution::PauseContainer(request) => {
monitor_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
monitor_client().execute(request).await
}
Execution::StopContainer(request) => {
monitor_client().execute(request).await
}
@@ -99,6 +150,12 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::PullRepo(request) => {
monitor_client().execute(request).await
}
Execution::BuildRepo(request) => {
monitor_client().execute(request).await
}
Execution::CancelRepoBuild(request) => {
monitor_client().execute(request).await
}
Execution::PruneNetworks(request) => {
monitor_client().execute(request).await
}
@@ -111,6 +168,27 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(request) => {
monitor_client().execute(request).await
}
Execution::DeployStack(request) => {
monitor_client().execute(request).await
}
Execution::StartStack(request) => {
monitor_client().execute(request).await
}
Execution::RestartStack(request) => {
monitor_client().execute(request).await
}
Execution::PauseStack(request) => {
monitor_client().execute(request).await
}
Execution::UnpauseStack(request) => {
monitor_client().execute(request).await
}
Execution::StopStack(request) => {
monitor_client().execute(request).await
}
Execution::DestroyStack(request) => {
monitor_client().execute(request).await
}
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);

View File

@@ -7,9 +7,7 @@ use monitor_client::api::read::GetVersion;
mod args;
mod exec;
mod helpers;
mod maps;
mod state;
mod sync;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
@@ -20,9 +18,6 @@ async fn main() -> anyhow::Result<()> {
info!("monitor version: {}", version.to_string().blue().bold());
match &state::cli_args().command {
args::Command::Sync { path, delete } => {
sync::run(path, *delete).await?
}
args::Command::Execute { execution } => {
exec::run(execution.to_owned()).await?
}

View File

@@ -1,328 +0,0 @@
use std::{collections::HashMap, sync::OnceLock};
use monitor_client::{
api::read,
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup,
variable::Variable,
},
};
use crate::state::monitor_client;
pub fn name_to_build() -> &'static HashMap<String, Build> {
static NAME_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
NAME_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
.map(|build| (build.name.clone(), build))
.collect()
})
}
pub fn id_to_build() -> &'static HashMap<String, Build> {
static ID_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
ID_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
.map(|build| (build.id.clone(), build))
.collect()
})
}
pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
static NAME_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
NAME_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
.map(|deployment| (deployment.name.clone(), deployment))
.collect()
})
}
pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
static ID_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
ID_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
.map(|deployment| (deployment.id.clone(), deployment))
.collect()
})
}
pub fn name_to_server() -> &'static HashMap<String, Server> {
static NAME_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
NAME_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
.map(|server| (server.name.clone(), server))
.collect()
})
}
pub fn id_to_server() -> &'static HashMap<String, Server> {
static ID_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
ID_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
.map(|server| (server.id.clone(), server))
.collect()
})
}
pub fn name_to_builder() -> &'static HashMap<String, Builder> {
static NAME_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
NAME_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
.map(|builder| (builder.name.clone(), builder))
.collect()
})
}
pub fn id_to_builder() -> &'static HashMap<String, Builder> {
static ID_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
ID_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
.map(|builder| (builder.id.clone(), builder))
.collect()
})
}
pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
.map(|alerter| (alerter.name.clone(), alerter))
.collect()
})
}
pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
.map(|alerter| (alerter.id.clone(), alerter))
.collect()
})
}
pub fn name_to_repo() -> &'static HashMap<String, Repo> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
.map(|repo| (repo.name.clone(), repo))
.collect()
})
}
pub fn id_to_repo() -> &'static HashMap<String, Repo> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
.map(|repo| (repo.id.clone(), repo))
.collect()
})
}
pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
static NAME_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
NAME_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
})
}
pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
static ID_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
ID_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
})
}
pub fn name_to_server_template(
) -> &'static HashMap<String, ServerTemplate> {
static NAME_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplate>,
> = OnceLock::new();
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
})
}
pub fn id_to_server_template(
) -> &'static HashMap<String, ServerTemplate> {
static ID_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplate>,
> = OnceLock::new();
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
})
}
pub fn name_to_resource_sync(
) -> &'static HashMap<String, ResourceSync> {
static NAME_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
NAME_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.name.clone(), sync))
.collect()
})
}
pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
{
static ID_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
ID_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.id.clone(), sync))
.collect()
})
}
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
OnceLock::new();
NAME_TO_USER_GROUP.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListUserGroups::default()),
)
.expect("failed to get user groups from monitor")
.into_iter()
.map(|user_group| (user_group.name.clone(), user_group))
.collect()
})
}
pub fn name_to_variable() -> &'static HashMap<String, Variable> {
static NAME_TO_VARIABLE: OnceLock<HashMap<String, Variable>> =
OnceLock::new();
NAME_TO_VARIABLE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListVariables::default()),
)
.expect("failed to get user groups from monitor")
.variables
.into_iter()
.map(|variable| (variable.name.clone(), variable))
.collect()
})
}
pub fn id_to_user() -> &'static HashMap<String, User> {
static ID_TO_USER: OnceLock<HashMap<String, User>> =
OnceLock::new();
ID_TO_USER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListUsers::default()),
)
.expect("failed to get users from monitor")
.into_iter()
.map(|user| (user.id.clone(), user))
.collect()
})
}
pub fn id_to_tag() -> &'static HashMap<String, Tag> {
static ID_TO_TAG: OnceLock<HashMap<String, Tag>> = OnceLock::new();
ID_TO_TAG.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListTags::default()),
)
.expect("failed to get tags from monitor")
.into_iter()
.map(|tag| (tag.id.clone(), tag))
.collect()
})
}

View File

@@ -1,80 +0,0 @@
use std::{
fs,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, Context};
use colored::Colorize;
use monitor_client::entities::toml::ResourcesToml;
use serde::de::DeserializeOwned;
pub fn read_resources(path: &str) -> anyhow::Result<ResourcesToml> {
let mut res = ResourcesToml::default();
let path =
PathBuf::from_str(path).context("invalid resources path")?;
read_resources_recursive(&path, &mut res)?;
Ok(res)
}
fn read_resources_recursive(
path: &Path,
resources: &mut ResourcesToml,
) -> anyhow::Result<()> {
let res =
fs::metadata(path).context("failed to get path metadata")?;
if res.is_file() {
if !path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let more = match parse_toml_file::<ResourcesToml>(path) {
Ok(res) => res,
Err(e) => {
warn!("failed to parse {:?}. skipping file | {e:#}", path);
return Ok(());
}
};
info!(
"{} from {}",
"adding resources".green().bold(),
path.display().to_string().blue().bold()
);
resources.servers.extend(more.servers);
resources.deployments.extend(more.deployments);
resources.builds.extend(more.builds);
resources.repos.extend(more.repos);
resources.procedures.extend(more.procedures);
resources.builders.extend(more.builders);
resources.alerters.extend(more.alerters);
resources.server_templates.extend(more.server_templates);
resources.resource_syncs.extend(more.resource_syncs);
resources.user_groups.extend(more.user_groups);
resources.variables.extend(more.variables);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
if let Err(e) =
read_resources_recursive(&entry.path(), resources)
{
warn!("failed to read additional resources at path | {e:#}");
}
}
Ok(())
} else {
Err(anyhow!("resources path is neither file nor directory"))
}
}
fn parse_toml_file<T: DeserializeOwned>(
path: impl AsRef<std::path::Path>,
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents).context("failed to parse toml contents")
}

View File

@@ -1,174 +0,0 @@
use colored::Colorize;
use monitor_client::entities::{
self, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
};
use crate::{helpers::wait_for_enter, state::cli_args};
mod file;
mod resource;
mod resources;
mod user_group;
mod variables;
use resource::ResourceSync;
pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> {
info!("resources path: {}", path.blue().bold());
if delete {
warn!("Delete mode {}", "enabled".bold());
}
let resources = file::read_resources(path)?;
info!("computing sync actions...");
let (server_creates, server_updates, server_deletes) =
resource::get_updates::<Server>(resources.servers, delete)?;
let (deployment_creates, deployment_updates, deployment_deletes) =
resource::get_updates::<Deployment>(
resources.deployments,
delete,
)?;
let (build_creates, build_updates, build_deletes) =
resource::get_updates::<Build>(resources.builds, delete)?;
let (repo_creates, repo_updates, repo_deletes) =
resource::get_updates::<Repo>(resources.repos, delete)?;
let (procedure_creates, procedure_updates, procedure_deletes) =
resource::get_updates::<Procedure>(resources.procedures, delete)?;
let (builder_creates, builder_updates, builder_deletes) =
resource::get_updates::<Builder>(resources.builders, delete)?;
let (alerter_creates, alerter_updates, alerter_deletes) =
resource::get_updates::<Alerter>(resources.alerters, delete)?;
let (
server_template_creates,
server_template_updates,
server_template_deletes,
) = resource::get_updates::<ServerTemplate>(
resources.server_templates,
delete,
)?;
let (
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
) = resource::get_updates::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
)?;
let (variable_creates, variable_updates, variable_deletes) =
variables::get_updates(resources.variables, delete)?;
let (user_group_creates, user_group_updates, user_group_deletes) =
user_group::get_updates(resources.user_groups, delete).await?;
if resource_sync_creates.is_empty()
&& resource_sync_updates.is_empty()
&& resource_sync_deletes.is_empty()
&& server_template_creates.is_empty()
&& server_template_updates.is_empty()
&& server_template_deletes.is_empty()
&& server_creates.is_empty()
&& server_updates.is_empty()
&& server_deletes.is_empty()
&& deployment_creates.is_empty()
&& deployment_updates.is_empty()
&& deployment_deletes.is_empty()
&& build_creates.is_empty()
&& build_updates.is_empty()
&& build_deletes.is_empty()
&& builder_creates.is_empty()
&& builder_updates.is_empty()
&& builder_deletes.is_empty()
&& alerter_creates.is_empty()
&& alerter_updates.is_empty()
&& alerter_deletes.is_empty()
&& repo_creates.is_empty()
&& repo_updates.is_empty()
&& repo_deletes.is_empty()
&& procedure_creates.is_empty()
&& procedure_updates.is_empty()
&& procedure_deletes.is_empty()
&& user_group_creates.is_empty()
&& user_group_updates.is_empty()
&& user_group_deletes.is_empty()
&& variable_creates.is_empty()
&& variable_updates.is_empty()
&& variable_deletes.is_empty()
{
info!("{}. exiting.", "nothing to do".green().bold());
return Ok(());
}
if !cli_args().yes {
wait_for_enter("run sync")?;
}
// No deps
entities::sync::ResourceSync::run_updates(
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
)
.await;
ServerTemplate::run_updates(
server_template_creates,
server_template_updates,
server_template_deletes,
)
.await;
Server::run_updates(server_creates, server_updates, server_deletes)
.await;
Alerter::run_updates(
alerter_creates,
alerter_updates,
alerter_deletes,
)
.await;
// Dependant on server
Builder::run_updates(
builder_creates,
builder_updates,
builder_deletes,
)
.await;
Repo::run_updates(repo_creates, repo_updates, repo_deletes).await;
// Dependant on builder
Build::run_updates(build_creates, build_updates, build_deletes)
.await;
// Dependant on server / build
Deployment::run_updates(
deployment_creates,
deployment_updates,
deployment_deletes,
)
.await;
// Dependant on everything
Procedure::run_updates(
procedure_creates,
procedure_updates,
procedure_deletes,
)
.await;
variables::run_updates(
variable_creates,
variable_updates,
variable_deletes,
)
.await;
user_group::run_updates(
user_group_creates,
user_group_updates,
user_group_deletes,
)
.await;
Ok(())
}

View File

@@ -1,358 +0,0 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
resource::Resource, toml::ResourceToml, update::ResourceTarget,
},
};
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use serde::Serialize;
use crate::maps::id_to_tag;
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
pub struct ToUpdateItem<T: Default> {
pub id: String,
pub resource: ResourceToml<T>,
pub update_description: bool,
pub update_tags: bool,
}
pub trait ResourceSync: Sized {
type Config: Clone
+ Default
+ Send
+ From<Self::PartialConfig>
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
+ 'static;
type Info: Default + 'static;
type PartialConfig: std::fmt::Debug
+ Clone
+ Send
+ Default
+ From<Self::Config>
+ From<Self::ConfigDiff>
+ Serialize
+ MaybeNone
+ 'static;
type ConfigDiff: Diff + MaybeNone;
fn display() -> &'static str;
fn resource_target(id: String) -> ResourceTarget;
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>;
/// Creates the resource and returns created id.
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String>;
/// Updates the resource at id with the partial config.
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()>;
/// Apply any changes to incoming toml partial config
/// before it is diffed against existing config
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
/// Diffs the declared toml (partial) against the full existing config.
/// Removes all fields from toml (partial) that haven't changed.
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff>;
/// Apply any changes to computed config diff
/// before logging
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
/// Deletes the target resource
async fn delete(id_or_name: String) -> anyhow::Result<()>;
async fn run_updates(
to_create: ToCreate<Self::PartialConfig>,
to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource).await {
Ok(id) => id,
Err(e) => {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
continue;
}
};
run_update_tags::<Self>(id.clone(), &name, tags).await;
run_update_description::<Self>(id, &name, description).await;
info!(
"{} {} '{}'",
"created".green().bold(),
Self::display(),
name.bold(),
);
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Self>(
id.clone(),
&name,
description,
)
.await;
}
if update_tags {
run_update_tags::<Self>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) = Self::update(id, resource).await {
warn!(
"failed to update config on {} {name} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}' configuration",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
}
for resource in to_delete {
if let Err(e) = Self::delete(resource.clone()).await {
warn!(
"failed to delete {} {resource} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}'",
"deleted".red().bold(),
Self::display(),
resource.bold(),
);
}
}
}
}
/// Gets all the resources to update, logging along the way.
pub fn get_updates<Resource: ResourceSync>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
let map = Resource::name_to_resource();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
for mut resource in resources {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: Resource::Config = resource.config.into();
resource.config = config.into();
Resource::validate_partial_config(&mut resource.config);
let mut diff = Resource::get_diff(
original.config.clone(),
resource.config,
)?;
Resource::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| {
id_to_tag().get(id).map(|t| t.name.clone())
})
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
println!(
"\n{}: {}: '{}'\n-------------------",
"UPDATE".blue(),
Resource::display(),
resource.name.bold(),
);
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
resource.description.green()
))
}
if resource.tags != original_tags {
let from = format!("{:?}", original_tags).red();
let to = format!("{:?}", resource.tags).green();
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
"field".dimmed(),
"from".dimmed(),
"to".dimmed(),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
from.red(),
"to".dimmed(),
to.green()
)
},
));
println!("{}", lines.join("\n-------------------\n"));
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
};
to_update.push(update);
}
None => {
println!(
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
"CREATE".green(),
Resource::display(),
resource.name.bold().green(),
"description".dimmed(),
resource.description,
"tags".dimmed(),
resource.tags,
"config".dimmed(),
serde_json::to_string_pretty(&resource.config)?
);
to_create.push(resource);
}
}
}
for name in &to_delete {
println!(
"\n{}: {}: '{}'\n-------------------",
"DELETE".red(),
Resource::display(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_update_tags<Resource: ResourceSync>(
id: String,
name: &str,
tags: Vec<String>,
) {
// Update tags
if let Err(e) = crate::state::monitor_client()
.write(UpdateTagsOnResource {
target: Resource::resource_target(id),
tags,
})
.await
{
warn!(
"failed to update tags on {} {name} | {e:#}",
Resource::display(),
);
} else {
info!(
"{} {} '{}' tags",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}
pub async fn run_update_description<Resource: ResourceSync>(
id: String,
name: &str,
description: String,
) {
if let Err(e) = crate::state::monitor_client()
.write(UpdateDescription {
target: Resource::resource_target(id.clone()),
description,
})
.await
{
warn!("failed to update resource {id} description | {e:#}");
} else {
info!(
"{} {} '{}' description",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}

View File

@@ -1,77 +0,0 @@
use partial_derive2::PartialDiff;
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter},
entities::{
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use crate::{
maps::name_to_alerter, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Alerter {
type Config = AlerterConfig;
type Info = ();
type PartialConfig = PartialAlerterConfig;
type ConfigDiff = AlerterConfigDiff;
fn display() -> &'static str {
"alerter"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Alerter(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_alerter()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateAlerter {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateAlerter {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteAlerter { id }).await?;
Ok(())
}
}

View File

@@ -1,93 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateBuild, DeleteBuild, UpdateBuild},
entities::{
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo,
PartialBuildConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_builder, name_to_build},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Build {
type Config = BuildConfig;
type Info = BuildInfo;
type PartialConfig = PartialBuildConfig;
type ConfigDiff = BuildConfigDiff;
fn display() -> &'static str {
"build"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Build(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_build()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateBuild {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateBuild {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the builder id with name
original.builder_id = id_to_builder()
.get(&original.builder_id)
.map(|b| b.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
fn validate_diff(diff: &mut Self::ConfigDiff) {
if let Some((_, to)) = &diff.version {
if to.is_none() {
diff.version = None;
}
}
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuild { id }).await?;
Ok(())
}
}

View File

@@ -1,86 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder},
entities::{
builder::{
Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_builder},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Builder {
type Config = BuilderConfig;
type Info = ();
type PartialConfig = PartialBuilderConfig;
type ConfigDiff = BuilderConfigDiff;
fn display() -> &'static str {
"builder"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Builder(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_builder()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateBuilder {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateBuilder {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace server builder id with name
if let BuilderConfig::Server(config) = &mut original {
config.server_id = id_to_server()
.get(&config.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuilder { id }).await?;
Ok(())
}
}

View File

@@ -1,98 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{self, DeleteDeployment},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentConfigDiff,
DeploymentImage, PartialDeploymentConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_build, id_to_server, name_to_deployment},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Deployment {
type Config = DeploymentConfig;
type Info = ();
type PartialConfig = PartialDeploymentConfig;
type ConfigDiff = DeploymentConfigDiff;
fn display() -> &'static str {
"deployment"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_deployment()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(write::CreateDeployment {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(write::UpdateDeployment {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = id_to_server()
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: id_to_build()
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteDeployment { id }).await?;
Ok(())
}
}

View File

@@ -1,9 +0,0 @@
mod alerter;
mod build;
mod builder;
mod deployment;
mod procedure;
mod repo;
mod server;
mod server_template;
mod sync;

View File

@@ -1,275 +0,0 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::{
execute::Execution,
write::{CreateProcedure, DeleteProcedure, UpdateProcedure},
},
entities::{
procedure::{
PartialProcedureConfig, Procedure, ProcedureConfig,
ProcedureConfigDiff,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
use crate::{
maps::{
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
id_to_resource_sync, id_to_server, name_to_procedure,
},
state::monitor_client,
sync::resource::{
run_update_description, run_update_tags, ResourceSync, ToCreate,
ToDelete, ToUpdate, ToUpdateItem,
},
};
impl ResourceSync for Procedure {
type Config = ProcedureConfig;
type Info = ();
type PartialConfig = PartialProcedureConfig;
type ConfigDiff = ProcedureConfigDiff;
fn display() -> &'static str {
"procedure"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Procedure(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_procedure()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateProcedure {
name: resource.name,
config: resource.config,
})
.await
.map(|p| p.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateProcedure {
id,
config: resource.config,
})
.await?;
Ok(())
}
async fn run_updates(
mut to_create: ToCreate<Self::PartialConfig>,
mut to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for name in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteProcedure { id: name.clone() })
.await
{
warn!("failed to delete procedure {name} | {e:#}",);
} else {
info!(
"{} procedure '{}'",
"deleted".red().bold(),
name.bold(),
);
}
}
if to_update.is_empty() && to_create.is_empty() {
return;
}
for i in 0..10 {
let mut to_pull = Vec::new();
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in &to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if *update_description {
run_update_description::<Procedure>(
id.clone(),
&name,
description,
)
.await;
}
if *update_tags {
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) =
Self::update(id.clone(), resource.clone()).await
{
if i == 9 {
warn!(
"failed to update {} {name} | {e:#}",
Self::display()
);
}
continue;
}
}
info!("{} {name} updated", Self::display());
// have to clone out so to_update is mutable
to_pull.push(id.clone());
}
//
to_update.retain(|resource| !to_pull.contains(&resource.id));
let mut to_pull = Vec::new();
for resource in &to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource.clone()).await {
Ok(id) => id,
Err(e) => {
if i == 9 {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
}
continue;
}
};
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
run_update_description::<Procedure>(id, &name, description)
.await;
info!("{} {name} created", Self::display());
to_pull.push(name);
}
to_create.retain(|resource| !to_pull.contains(&resource.name));
if to_update.is_empty() && to_create.is_empty() {
// info!("all procedures synced");
return;
}
}
warn!("procedure sync loop exited after max iterations");
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
for stage in &mut original.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::None(_) | Execution::Sleep(_) => {}
Execution::RunProcedure(config) => {
config.procedure = id_to_procedure()
.get(&config.procedure)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = id_to_build()
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CloneRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PullRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneNetworks(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneImages(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RunSync(config) => {
config.sync = id_to_resource_sync()
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
}
}
}
Ok(original.partial_diff(update))
}
async fn delete(_: String) -> anyhow::Result<()> {
unreachable!()
}
}

View File

@@ -1,84 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateRepo, DeleteRepo, UpdateRepo},
entities::{
repo::{
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_repo},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Repo {
type Config = RepoConfig;
type Info = RepoInfo;
type PartialConfig = PartialRepoConfig;
type ConfigDiff = RepoConfigDiff;
fn display() -> &'static str {
"repo"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Repo(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_repo()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateRepo {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateRepo {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// Need to replace server id with name
original.server_id = id_to_server()
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteRepo { id }).await?;
Ok(())
}
}

View File

@@ -1,77 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateServer, DeleteServer, UpdateServer},
entities::{
resource::Resource,
server::{
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_server, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Server {
type Config = ServerConfig;
type Info = ();
type PartialConfig = PartialServerConfig;
type ConfigDiff = ServerConfigDiff;
fn display() -> &'static str {
"server"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Server(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateServer {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateServer {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServer { id }).await?;
Ok(())
}
}

View File

@@ -1,80 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{
CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate,
},
entities::{
resource::Resource,
server_template::{
PartialServerTemplateConfig, ServerTemplate,
ServerTemplateConfig, ServerTemplateConfigDiff,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_server_template, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for ServerTemplate {
type Config = ServerTemplateConfig;
type Info = ();
type PartialConfig = PartialServerTemplateConfig;
type ConfigDiff = ServerTemplateConfigDiff;
fn display() -> &'static str {
"server template"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ServerTemplate(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server_template()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateServerTemplate {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateServerTemplate {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServerTemplate { id }).await?;
Ok(())
}
}

View File

@@ -1,81 +0,0 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{
CreateResourceSync, DeleteResourceSync, UpdateResourceSync,
},
entities::{
self,
resource::Resource,
sync::{
PartialResourceSyncConfig, ResourceSyncConfig,
ResourceSyncConfigDiff, ResourceSyncInfo,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_resource_sync, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for entities::sync::ResourceSync {
type Config = ResourceSyncConfig;
type Info = ResourceSyncInfo;
type PartialConfig = PartialResourceSyncConfig;
type ConfigDiff = ResourceSyncConfigDiff;
fn display() -> &'static str {
"resource sync"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_resource_sync()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateResourceSync {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateResourceSync {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteResourceSync { id }).await?;
Ok(())
}
}

View File

@@ -1,388 +0,0 @@
use std::cmp::Ordering;
use anyhow::Context;
use colored::Colorize;
use monitor_client::{
api::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
toml::{PermissionToml, UserGroupToml},
update::ResourceTarget,
},
};
use crate::maps::{
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server,
id_to_server_template, id_to_user, name_to_user_group,
};
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
}
pub struct DeleteItem {
id: String,
name: String,
}
pub async fn get_updates(
user_groups: Vec<UserGroupToml>,
delete: bool,
) -> anyhow::Result<(
Vec<UserGroupToml>,
Vec<UpdateItem>,
Vec<DeleteItem>,
)> {
let map = name_to_user_group();
let mut to_create = Vec::<UserGroupToml>::new();
let mut to_update = Vec::<UpdateItem>::new();
let mut to_delete = Vec::<DeleteItem>::new();
if delete {
for user_group in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
to_delete.push(DeleteItem {
id: user_group.id.clone(),
name: user_group.name.clone(),
});
}
}
}
let id_to_user = id_to_user();
for mut user_group in user_groups {
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
println!(
"\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
"CREATE".green(),
user_group.name.bold().green(),
"users".dimmed(),
user_group.users,
"permissions".dimmed(),
user_group.permissions,
);
to_create.push(user_group);
continue;
}
};
let mut original_users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = crate::state::monitor_client()
.read(ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
})
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = id_to_build()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = id_to_builder()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = id_to_deployment()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = id_to_server()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = id_to_repo()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = id_to_alerter()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = id_to_procedure()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = id_to_server_template()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = id_to_resource_sync()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
println!(
"\n{}: user group: '{}'\n-------------------",
"UPDATE".blue(),
user_group.name.bold(),
);
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
.iter()
.filter(|user| !original_users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_users
.iter()
.filter(|user| !user_group.users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed(),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
});
}
}
for d in &to_delete {
println!(
"\n{}: user group: '{}'\n-------------------",
"DELETE".red(),
d.name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
/// order permissions in deterministic way
fn sort_permissions(
a: &PermissionToml,
b: &PermissionToml,
) -> Ordering {
let (a_t, a_id) = a.target.extract_variant_id();
let (b_t, b_id) = b.target.extract_variant_id();
match (a_t.cmp(&b_t), a_id.cmp(b_id)) {
(Ordering::Greater, _) => Ordering::Greater,
(Ordering::Less, _) => Ordering::Less,
(_, Ordering::Greater) => Ordering::Greater,
(_, Ordering::Less) => Ordering::Less,
_ => Ordering::Equal,
}
}
pub async fn run_updates(
to_create: Vec<UserGroupToml>,
to_update: Vec<UpdateItem>,
to_delete: Vec<DeleteItem>,
) {
// Create the non-existant user groups
for user_group in to_create {
// Create the user group
if let Err(e) = crate::state::monitor_client()
.write(CreateUserGroup {
name: user_group.name.clone(),
})
.await
{
warn!(
"failed to create user group {} | {e:#}",
user_group.name
);
continue;
} else {
info!(
"{} user group '{}'",
"created".green().bold(),
user_group.name.bold(),
);
};
set_users(user_group.name.clone(), user_group.users).await;
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
// Update the existing user groups
for UpdateItem {
user_group,
update_users,
update_permissions,
} in to_update
{
if update_users {
set_users(user_group.name.clone(), user_group.users).await;
}
if update_permissions {
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
}
for user_group in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteUserGroup { id: user_group.id })
.await
{
warn!(
"failed to delete user group {} | {e:#}",
user_group.name
);
} else {
info!(
"{} user group '{}'",
"deleted".red().bold(),
user_group.name.bold(),
);
}
}
}
async fn set_users(user_group: String, users: Vec<String>) {
if let Err(e) = crate::state::monitor_client()
.write(SetUsersInUserGroup {
user_group: user_group.clone(),
users,
})
.await
{
warn!("failed to set users in group {user_group} | {e:#}");
} else {
info!(
"{} user group '{}' users",
"updated".blue().bold(),
user_group.bold(),
);
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
) {
for PermissionToml { target, level } in permissions {
if let Err(e) = crate::state::monitor_client()
.write(UpdatePermissionOnTarget {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_target: target.clone(),
permission: level,
})
.await
{
warn!(
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
);
} else {
info!(
"{} user group '{}' permissions",
"updated".blue().bold(),
user_group.bold(),
);
}
}
}

View File

@@ -1,206 +0,0 @@
use colored::Colorize;
use monitor_client::{
api::write::{
CreateVariable, DeleteVariable, UpdateVariableDescription,
UpdateVariableValue,
},
entities::variable::Variable,
};
use crate::{maps::name_to_variable, state::monitor_client};
pub struct ToUpdateItem {
pub variable: Variable,
pub update_value: bool,
pub update_description: bool,
}
pub fn get_updates(
variables: Vec<Variable>,
delete: bool,
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
let map = name_to_variable();
let mut to_create = Vec::<Variable>::new();
let mut to_update = Vec::<ToUpdateItem>::new();
let mut to_delete = Vec::<String>::new();
if delete {
for variable in map.values() {
if !variables.iter().any(|v| v.name == variable.name) {
to_delete.push(variable.name.clone());
}
}
}
for variable in variables {
match map.get(&variable.name) {
Some(original) => {
let item = ToUpdateItem {
update_value: original.value != variable.value,
update_description: original.description
!= variable.description,
variable,
};
if !item.update_value && !item.update_description {
continue;
}
println!(
"\n{}: variable: '{}'\n-------------------",
"UPDATE".blue(),
item.variable.name.bold(),
);
let mut lines = Vec::<String>::new();
if item.update_value {
lines.push(format!(
"{}: 'value'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.value.red(),
"to".dimmed(),
item.variable.value.green()
))
}
if item.update_description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
item.variable.description.green()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(item);
}
None => {
if variable.description.is_empty() {
println!(
"\n{}: variable: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"value".dimmed(),
variable.value,
);
} else {
println!(
"\n{}: variable: {}\n{}: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"description".dimmed(),
variable.description,
"value".dimmed(),
variable.value,
);
}
to_create.push(variable)
}
}
}
for name in &to_delete {
println!(
"\n{}: variable: '{}'\n-------------------",
"DELETE".red(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_updates(
to_create: Vec<Variable>,
to_update: Vec<ToUpdateItem>,
to_delete: Vec<String>,
) {
for variable in to_create {
if let Err(e) = monitor_client()
.write(CreateVariable {
name: variable.name.clone(),
value: variable.value,
description: variable.description,
})
.await
{
warn!("failed to create variable {} | {e:#}", variable.name);
} else {
info!(
"{} variable '{}'",
"created".green().bold(),
variable.name.bold(),
);
};
}
for ToUpdateItem {
variable,
update_value,
update_description,
} in to_update
{
if update_value {
if let Err(e) = monitor_client()
.write(UpdateVariableValue {
name: variable.name.clone(),
value: variable.value,
})
.await
{
warn!(
"failed to update variable value for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' value",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
if update_description {
if let Err(e) = monitor_client()
.write(UpdateVariableDescription {
name: variable.name.clone(),
description: variable.description,
})
.await
{
warn!(
"failed to update variable description for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' description",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
}
for variable in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteVariable {
name: variable.clone(),
})
.await
{
warn!("failed to delete variable {variable} | {e:#}",);
} else {
info!(
"{} variable '{}'",
"deleted".red().bold(),
variable.bold(),
);
}
}
}

View File

@@ -44,18 +44,22 @@ tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenv.workspace = true
dotenvy.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,11 +1,11 @@
# Build Core
FROM rust:1.79.0-bookworm as core-builder
FROM rust:1.80.1-bookworm AS core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
# Build Frontend
FROM node:20.12-alpine as frontend-builder
FROM node:20.12-alpine AS frontend-builder
WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
@@ -17,6 +17,8 @@ FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl -SL https://github.com/docker/compose/releases/download/v2.29.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose && \
chmod +x /usr/local/bin/docker-compose && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install

View File

@@ -1,23 +1,20 @@
use std::{collections::HashSet, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{format_serror, muted};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
api::execute::{CancelBuild, Deploy, RunBuild},
entities::{
alert::{Alert, AlertData},
all_logs_success,
build::{Build, CloudRegistryConfig, ImageRegistry},
builder::{AwsBuilderConfig, Builder, BuilderConfig},
build::{Build, ImageRegistry, StandardRegistryConfig},
builder::{Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
permission::PermissionLevel,
server::{stats::SeverityLevel, Server},
server_template::aws::AwsServerTemplateConfig,
server::stats::SeverityLevel,
to_monitor_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
@@ -31,30 +28,20 @@ use mungos::{
options::FindOneOptions,
},
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
};
use periphery_client::api::{self, git::RepoActionResponseV1_13};
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
ecr,
},
BuildCleanupData,
},
cloud::aws::ecr,
config::core_config,
helpers::{
alert::send_alerts,
builder::{cleanup_builder_instance, get_builder_periphery},
channel::build_cancel_channel,
periphery_client,
git_token,
query::{get_deployment_state, get_global_variables},
registry_token,
update::update_update,
},
resource::{self, refresh_build_state_cache},
@@ -79,8 +66,9 @@ impl Resolve<RunBuild, (User, Update)> for State {
)
.await?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
if build.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to RunBuild"));
}
// get the action state for the build (or insert default).
let action_state =
@@ -95,12 +83,31 @@ impl Resolve<RunBuild, (User, Update)> for State {
update.version = build.config.version;
update_update(update.clone()).await?;
let git_token = git_token(
&build.config.git_provider,
&build.config.git_account,
|https| build.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
)?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let mut cancel_recv =
build_cancel_channel().receiver.resubscribe();
let build_id = build.id.clone();
let builder =
resource::get::<Builder>(&build.config.builder_id).await?;
let is_server_builder =
matches!(&builder.config, BuilderConfig::Server(_));
tokio::spawn(async move {
let poll = async {
loop {
@@ -109,16 +116,19 @@ impl Resolve<RunBuild, (User, Update)> for State {
id = cancel_recv.recv() => id?
};
if incoming_build_id == build_id {
update.push_simple_log(
"cancel acknowledged",
"the build cancellation has been queued, it may still take some time",
);
if is_server_builder {
update.push_error_log("Cancel acknowledged", "Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
} else {
update.push_simple_log("Cancel acknowledged", "The build cancellation has been queued, it may still take some time.");
}
update.finalize();
let id = update.id.clone();
if let Err(e) = update_update(update).await {
warn!("failed to update Update {id} | {e:#}");
warn!("failed to modify Update {id} on db | {e:#}");
}
if !is_server_builder {
cancel_clone.cancel();
}
cancel_clone.cancel();
return Ok(());
}
}
@@ -133,53 +143,60 @@ impl Resolve<RunBuild, (User, Update)> for State {
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) =
match get_build_builder(&build, &mut update).await {
Ok(builder) => {
info!("got builder for build");
builder
}
Err(e) => {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update, build.id, build.name)
.await;
}
};
let core_config = core_config();
let variables = get_global_variables().await?;
let (periphery, cleanup_data) = match get_builder_periphery(
build.name.clone(),
Some(build.config.version),
builder,
&mut update,
)
.await
{
Ok(builder) => builder,
Err(e) => {
warn!(
"failed to get builder for build {} | {e:#}",
build.name
);
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(
update, build.id, build.name, false,
)
.await;
}
};
// CLONE REPO
let github_token = core_config
.github_accounts
.get(&build.config.github_account)
.cloned();
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
github_token,
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
}) => res,
_ = cancel.cancelled() => {
info!("build cancelled during clone, cleaning up builder");
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name).await
return handle_early_return(update, build.id, build.name, true).await
},
};
match res {
Ok(clone_logs) => {
info!("finished repo clone");
update.logs.extend(clone_logs);
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
let res: RepoActionResponseV1_13 = res.into();
update.logs.extend(res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
res.commit_message.unwrap_or_default()
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
@@ -187,58 +204,103 @@ impl Resolve<RunBuild, (User, Update)> for State {
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
}
};
update_update(update.clone()).await?;
if all_logs_success(&update.logs) {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
arg.value = res;
}
let secret_replacers = if !build.config.skip_secret_interp {
let core_config = core_config();
let variables = get_global_variables().await?;
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut secret_replacers_for_log = HashSet::new();
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
// Interpolate into build args
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers
.iter()
.map(|(_, variable)| variable.clone()),
);
secret_replacers.extend(more_replacers);
arg.value = res;
}
// Interpolate into secret args
for arg in &mut build.config.secret_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.into_iter().map(|(_, variable)| variable),
);
// Secret args don't need to be in replacers sent to periphery.
// The secret args don't end up in the command like build args do.
arg.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers_for_log.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers_for_log
.into_iter()
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
secret_replacers
} else {
Default::default()
};
let res = tokio::select! {
res = periphery
@@ -247,19 +309,25 @@ impl Resolve<RunBuild, (User, Update)> for State {
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
// Push a commit hash tagged image
additional_tags: if update.commit_hash.is_empty() {
Default::default()
} else {
vec![update.commit_hash.clone()]
},
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name).await
return handle_early_return(update, build.id, build.name, true).await
},
};
match res {
Ok(logs) => {
info!("finished build");
debug!("finished build");
update.logs.extend(logs);
}
Err(e) => {
@@ -281,14 +349,13 @@ impl Resolve<RunBuild, (User, Update)> for State {
.builds
.update_one(
doc! { "name": &build.name },
doc! {
"$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": monitor_timestamp(),
}
},
None,
doc! { "$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": monitor_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
)
.await;
}
@@ -320,12 +387,11 @@ impl Resolve<RunBuild, (User, Update)> for State {
// don't hold response up for user
tokio::spawn(async move {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
@@ -337,7 +403,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
err,
version,
},
};
@@ -354,6 +419,7 @@ async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
@@ -371,10 +437,10 @@ async fn handle_early_return(
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
@@ -387,7 +453,6 @@ async fn handle_early_return(
id: build_id,
name: build_name,
version,
err,
},
};
send_alerts(&[alert]).await
@@ -406,24 +471,28 @@ pub async fn validate_cancel_build(
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
db.updates
.find_one(doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
@@ -445,7 +514,7 @@ impl Resolve<CancelBuild, (User, Update)> for State {
&self,
CancelBuild { build }: CancelBuild,
(user, mut update): (User, Update),
) -> anyhow::Result<CancelBuildResponse> {
) -> anyhow::Result<Update> {
let build = resource::get_check_permissions::<Build>(
&build,
&user,
@@ -470,16 +539,15 @@ impl Resolve<CancelBuild, (User, Update)> for State {
);
update_update(update.clone()).await?;
let update_id = update.id.clone();
build_cancel_channel()
.sender
.lock()
.await
.send((build.id, update))?;
.send((build.id, update.clone()))?;
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
let update_id = update.id.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
@@ -490,160 +558,11 @@ impl Resolve<CancelBuild, (User, Update)> for State {
)
.await
{
warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}")
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
}
});
Ok(CancelBuildResponse {})
}
}
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_build_builder(
build: &Build,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
if build.config.builder_id.is_empty() {
return Err(anyhow!("build has not configured a builder"));
}
let builder =
resource::get::<Builder>(&build.config.builder_id).await?;
match builder.config {
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: build.name.clone(),
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(build, config, update).await
}
}
}
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_aws_builder(
build: &Build,
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = monitor_timestamp();
let instance_name =
format!("BUILDER-{}-v{}", build.name, build.config.version);
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(log);
update_update(update.clone()).await?;
let periphery_address = format!("http://{ip}:{}", config.port);
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = monitor_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = periphery
.request(api::GetVersion {})
.await
.context("failed to reach periphery client on builder");
if let Ok(GetVersionResponse { version }) = &version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!(
"established contact with periphery on builder\nperiphery version: v{}",
version
),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(connect_log);
update_update(update.clone()).await?;
return Ok((
periphery,
BuildCleanupData::Aws {
instance_id,
region: config.region,
},
));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
// Spawn terminate task in failure case (if loop is passed without return)
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
.await;
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(
res.err().unwrap().context(
"failed to start usable builder. terminating instance.",
),
)
}
#[instrument(skip(periphery, update))]
async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(region, &_instance_id)
.await;
});
update.push_simple_log(
"terminate instance",
format!("termination queued for instance id {instance_id}"),
);
}
Ok(update)
}
}
@@ -705,66 +624,22 @@ async fn handle_post_build_redeploy(build_id: &str) {
}
}
fn start_aws_builder_log(
instance_id: &str,
ip: &str,
config: &AwsBuilderConfig,
) -> String {
let AwsBuilderConfig {
ami_id,
instance_type,
volume_gb,
subnet_id,
assign_public_ip,
security_group_ids,
use_public_ip,
..
} = config;
let readable_sec_group_ids = security_group_ids.join(", ");
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
match &build.config.image_registry {
ImageRegistry::None(_) => Ok((None, None)),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use DockerHub image registry"
));
}
Ok((core_config().docker_accounts.get(account).cloned(), None))
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use GithubContainerRegistry"
));
}
Ok((core_config().github_accounts.get(account).cloned(), None))
}
let (domain, account) = match &build.config.image_registry {
// Early return for None
ImageRegistry::None(_) => return Ok((None, None)),
// Early return for AwsEcr
ImageRegistry::AwsEcr(label) => {
let config = core_config().aws_ecr_registries.get(label);
// Note that aws ecr config still only lives in config file
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| &reg.label == label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
@@ -791,10 +666,24 @@ async fn validate_account_extract_registry_token_aws_ecr(
}
None => None,
};
Ok((token, config.map(AwsEcrConfig::from)))
}
ImageRegistry::Custom(_) => {
Err(anyhow!("Custom image registry is not implemented"))
return Ok((token, config.map(AwsEcrConfig::from)));
}
ImageRegistry::Standard(StandardRegistryConfig {
domain,
account,
..
}) => (domain.as_str(), account),
};
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use registry provider {domain}"
));
}
let registry_token = registry_token(domain, account).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
)?;
Ok((registry_token, None))
}

View File

@@ -1,23 +1,21 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::*,
entities::{
build::{Build, ImageRegistry},
config::core::AwsEcrConfig,
deployment::{Deployment, DeploymentImage},
deployment::{
extract_registry_domain, Deployment, DeploymentImage,
},
get_image_name,
permission::PermissionLevel,
server::ServerState,
server::{Server, ServerState},
update::{Log, Update},
user::User,
Version,
},
};
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
@@ -25,16 +23,40 @@ use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
periphery_client,
query::{get_global_variables, get_server_with_status},
interpolate_variables_secrets_into_environment, periphery_client,
query::get_server_with_status, registry_token,
update::update_update,
},
monitor::update_cache_for_server,
resource,
state::{action_states, db_client, State},
state::{action_states, State},
};
use crate::helpers::update::init_execution_update;
async fn setup_deployment_execution(
deployment: &str,
user: &User,
) -> anyhow::Result<(Deployment, Server)> {
let deployment = resource::get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Execute,
)
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
Ok((deployment, server))
}
impl Resolve<Deploy, (User, Update)> for State {
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
@@ -47,17 +69,8 @@ impl Resolve<Deploy, (User, Update)> for State {
}: Deploy,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut deployment =
resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (mut deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -70,145 +83,117 @@ impl Resolve<Deploy, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.deploying = true)?;
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
// This block gets the version of the image to deploy in the Build case.
// It also gets the name of the image from the build and attaches it directly.
let version = match deployment.config.image {
let (version, registry_token, aws_ecr) = match &deployment
.config
.image
{
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let build = resource::get::<Build>(build_id).await?;
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.get(label)
.iter()
.find(|reg| &reg.label == label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
version
*version
};
// Remove ending patch if it is 0, this means use latest patch.
let version_str = if version.patch == 0 {
format!("{}.{}", version.major, version.minor)
} else {
version.to_string()
};
// Potentially add the build image_tag postfix
let version_str = if build.config.image_tag.is_empty() {
version_str
} else {
format!("{version_str}-{}", build.config.image_tag)
};
// replace image with corresponding build image.
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version}"),
image: format!("{image_name}:{version_str}"),
};
// set image registry to match build docker account if it's not overridden by deployment
if matches!(
&deployment.config.image_registry,
ImageRegistry::None(_)
) {
deployment.config.image_registry =
build.config.image_registry;
}
version
}
DeploymentImage::Image { .. } => Version::default(),
};
let variables = get_global_variables().await?;
let core_config = core_config();
// Interpolate variables into environment
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for env in &mut deployment.config.environment {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
update.version = version;
update_update(update.clone()).await?;
let (registry_token, aws_ecr) = match &deployment
.config
.image_registry
{
ImageRegistry::None(_) => (None, None),
ImageRegistry::DockerHub(params) => (
core_config.docker_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::Ghcr(params) => (
core_config.github_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::AwsEcr(label) => {
let config = core_config
.aws_ecr_registries
.get(label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
(
Some(
ecr::get_ecr_token(
match build.config.image_registry {
ImageRegistry::None(_) => (version, None, None),
ImageRegistry::AwsEcr(label) => {
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| reg.label == label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
let token = ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?,
),
Some(AwsEcrConfig::from(config)),
)
.context("failed to create aws ecr login token")?;
(version, Some(token), Some(AwsEcrConfig::from(config)))
}
ImageRegistry::Standard(params) => {
if deployment.config.image_registry_account.is_empty() {
deployment.config.image_registry_account =
params.account
}
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&params.domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", params.domain, deployment.config.image_registry_account),
)?
} else {
None
};
(version, token, None)
}
}
}
ImageRegistry::Custom(_) => {
return Err(anyhow!("Custom ImageRegistry not yet supported"))
DeploymentImage::Image { image } => {
let domain = extract_registry_domain(image)?;
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
)?
} else {
None
};
(Version::default(), token, None)
}
};
let secret_replacers = if !deployment.config.skip_secret_interp {
interpolate_variables_secrets_into_environment(
&mut deployment.config.environment,
&mut update,
)
.await?
} else {
Default::default()
};
update.version = version;
update_update(update.clone()).await?;
match periphery
.request(api::container::Deploy {
deployment,
@@ -247,12 +232,8 @@ impl Resolve<StartContainer, (User, Update)> for State {
StartContainer { deployment }: StartContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -265,17 +246,8 @@ impl Resolve<StartContainer, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.starting = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
@@ -301,6 +273,154 @@ impl Resolve<StartContainer, (User, Update)> for State {
}
}
impl Resolve<RestartContainer, (User, Update)> for State {
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartContainer { deployment }: RestartContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.restarting = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::RestartContainer {
name: deployment.name.clone(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"restart container",
format_serror(
&e.context("failed to restart container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PauseContainer, (User, Update)> for State {
#[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseContainer { deployment }: PauseContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pausing = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::PauseContainer {
name: deployment.name.clone(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"pause container",
format_serror(&e.context("failed to pause container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<UnpauseContainer, (User, Update)> for State {
#[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseContainer { deployment }: UnpauseContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.unpausing = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::UnpauseContainer {
name: deployment.name.clone(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"unpause container",
format_serror(
&e.context("failed to unpause container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -312,12 +432,8 @@ impl Resolve<StopContainer, (User, Update)> for State {
}: StopContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -330,17 +446,8 @@ impl Resolve<StopContainer, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.stopping = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
@@ -372,94 +479,6 @@ impl Resolve<StopContainer, (User, Update)> for State {
}
}
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.stopping_containers = true)?;
let deployments = find_collect(
&db_client().await.deployments,
doc! {
"config.server_id": &server.id
},
None,
)
.await
.context("failed to find deployments on server")?;
let futures = deployments.iter().map(|deployment| async {
let req = super::ExecuteRequest::StopContainer(StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
});
(
async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
},
(user.clone(), update),
)
.await
}
.await,
deployment.name.clone(),
deployment.id.clone(),
)
});
let results = join_all(futures).await;
let deployment_names = deployments
.iter()
.map(|d| format!("{} ({})", d.name, d.id))
.collect::<Vec<_>>()
.join("\n");
update.push_simple_log("stopping containers", deployment_names);
for (res, name, id) in results {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format_serror(
&e.context(format!(
"failed to stop container {name} ({id})"
))
.into(),
),
);
}
}
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RemoveContainer, (User, Update)> for State {
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -471,12 +490,8 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
}: RemoveContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -489,17 +504,8 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.removing = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;

View File

@@ -29,6 +29,7 @@ mod procedure;
mod repo;
mod server;
mod server_template;
mod stack;
mod sync;
#[typeshare]
@@ -38,6 +39,7 @@ mod sync;
#[serde(tag = "type", content = "params")]
pub enum ExecuteRequest {
// ==== SERVER ====
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
PruneImages(PruneImages),
PruneNetworks(PruneNetworks),
@@ -45,10 +47,21 @@ pub enum ExecuteRequest {
// ==== DEPLOYMENT ====
Deploy(Deploy),
StartContainer(StartContainer),
RestartContainer(RestartContainer),
PauseContainer(PauseContainer),
UnpauseContainer(UnpauseContainer),
StopContainer(StopContainer),
StopAllContainers(StopAllContainers),
RemoveContainer(RemoveContainer),
// ==== STACK ====
DeployStack(DeployStack),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
// ==== BUILD ====
RunBuild(RunBuild),
CancelBuild(CancelBuild),
@@ -56,6 +69,8 @@ pub enum ExecuteRequest {
// ==== REPO ====
CloneRepo(CloneRepo),
PullRepo(PullRepo),
BuildRepo(BuildRepo),
CancelRepoBuild(CancelRepoBuild),
// ==== PROCEDURE ====
RunProcedure(RunProcedure),

View File

@@ -69,6 +69,8 @@ fn resolve_inner(
let _action_guard =
action_state.update(|state| state.running = true)?;
update_update(update.clone()).await?;
let update = Mutex::new(update);
let res = execute_procedure(&procedure, &update).await;

View File

@@ -1,30 +1,45 @@
use anyhow::anyhow;
use std::{future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
alert::{Alert, AlertData},
builder::{Builder, BuilderConfig},
monitor_timestamp, optional_string,
permission::PermissionLevel,
repo::Repo,
server::Server,
server::{stats::SeverityLevel, Server},
update::{Log, Update},
user::User,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
mongodb::{
bson::{doc, to_document},
options::FindOneOptions,
},
};
use periphery_client::api;
use periphery_client::api::{self, git::RepoActionResponseV1_13};
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use crate::{
config::core_config,
helpers::{periphery_client, update::update_update},
helpers::{
alert::send_alerts,
builder::{cleanup_builder_instance, get_builder_periphery},
channel::repo_cancel_channel,
git_token, periphery_client,
update::update_update,
},
resource::{self, refresh_repo_state_cache},
state::{action_states, db_client, State},
};
use super::ExecuteRequest;
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -32,7 +47,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
CloneRepo { repo }: CloneRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
@@ -48,6 +63,18 @@ impl Resolve<CloneRepo, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.cloning = true)?;
update_update(update.clone()).await?;
let git_token = git_token(
&repo.config.git_provider,
&repo.config.git_account,
|https| repo.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
)?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
@@ -57,19 +84,20 @@ impl Resolve<CloneRepo, (User, Update)> for State {
let periphery = periphery_client(&server)?;
let github_token = core_config()
.github_accounts
.get(&repo.config.github_account)
.cloned();
let logs = match periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
github_token,
git_token,
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
})
.await
{
Ok(logs) => logs,
Ok(res) => {
let res: RepoActionResponseV1_13 = res.into();
res.logs
}
Err(e) => {
vec![Log::error(
"clone repo",
@@ -85,7 +113,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
handle_server_update_return(update).await
}
}
@@ -112,6 +140,8 @@ impl Resolve<PullRepo, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.pulling = true)?;
update_update(update.clone()).await?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
@@ -127,10 +157,17 @@ impl Resolve<PullRepo, (User, Update)> for State {
branch: optional_string(&repo.config.branch),
commit: optional_string(&repo.config.commit),
on_pull: repo.config.on_pull.into_option(),
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
})
.await
{
Ok(logs) => logs,
Ok(res) => {
let res: RepoActionResponseV1_13 = res.into();
update.commit_hash = res.commit_hash.unwrap_or_default();
res.logs
}
Err(e) => {
vec![Log::error(
"pull repo",
@@ -147,12 +184,12 @@ impl Resolve<PullRepo, (User, Update)> for State {
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
handle_server_update_return(update).await
}
}
#[instrument(skip_all, fields(update_id = update.id))]
async fn handle_update_return(
async fn handle_server_update_return(
update: Update,
) -> anyhow::Result<Update> {
// Need to manually update the update before cache refresh,
@@ -181,7 +218,6 @@ async fn update_last_pulled_time(repo_name: &str) {
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {
@@ -190,3 +226,366 @@ async fn update_last_pulled_time(repo_name: &str) {
);
}
}
impl Resolve<BuildRepo, (User, Update)> for State {
#[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
BuildRepo { repo }: BuildRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
if repo.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to BuildRepo"));
}
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.building = true)?;
update_update(update.clone()).await?;
let git_token = git_token(
&repo.config.git_provider,
&repo.config.git_account,
|https| repo.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
)?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let mut cancel_recv =
repo_cancel_channel().receiver.resubscribe();
let repo_id = repo.id.clone();
let builder =
resource::get::<Builder>(&repo.config.builder_id).await?;
let is_server_builder =
matches!(&builder.config, BuilderConfig::Server(_));
tokio::spawn(async move {
let poll = async {
loop {
let (incoming_repo_id, mut update) = tokio::select! {
_ = cancel_clone.cancelled() => return Ok(()),
id = cancel_recv.recv() => id?
};
if incoming_repo_id == repo_id {
if is_server_builder {
update.push_error_log("Cancel acknowledged", "Repo Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
} else {
update.push_simple_log("Cancel acknowledged", "The repo build cancellation has been queued, it may still take some time.");
}
update.finalize();
let id = update.id.clone();
if let Err(e) = update_update(update).await {
warn!("failed to modify Update {id} on db | {e:#}");
}
if !is_server_builder {
cancel_clone.cancel();
}
return Ok(());
}
}
#[allow(unreachable_code)]
anyhow::Ok(())
};
tokio::select! {
_ = cancel_clone.cancelled() => {}
_ = poll => {}
}
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
repo.name.clone(),
None,
builder,
&mut update,
)
.await
{
Ok(builder) => builder,
Err(e) => {
warn!("failed to get builder for repo {} | {e:#}", repo.name);
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_builder_early_return(
update, repo.id, repo.name, false,
)
.await;
}
};
// CLONE REPO
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_builder_early_return(update, repo.id, repo.name, true).await
},
};
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
let res: RepoActionResponseV1_13 = res.into();
update.logs.extend(res.logs);
update.commit_hash = res.commit_hash.unwrap_or_default();
res.commit_message.unwrap_or_default()
}
Err(e) => {
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
};
update.finalize();
let db = db_client().await;
if update.success {
let _ = db
.repos
.update_one(
doc! { "name": &repo.name },
doc! { "$set": {
"info.last_built_at": monitor_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
)
.await;
}
// stop the cancel listening task from going forever
cancel.cancel();
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
id: repo.id,
name: repo.name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
#[instrument(skip(update))]
async fn handle_builder_early_return(
mut update: Update,
repo_id: String,
repo_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
id: repo_id,
name: repo_name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_repo_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
if let ExecuteRequest::CancelRepoBuild(req) = request {
let repo = resource::get::<Repo>(&req.repo).await?;
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates
.find_one(doc! {
"operation": "BuildRepo",
"target.id": &repo.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelRepoBuild",
"target.id": &repo.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
(Some(build), Some(cancel)) => {
if cancel.start_ts > build.start_ts {
return Err(anyhow!(
"Repo build has already been cancelled"
));
}
}
(None, _) => return Err(anyhow!("No repo build in progress")),
_ => {}
};
}
Ok(())
}
impl Resolve<CancelRepoBuild, (User, Update)> for State {
#[instrument(name = "CancelRepoBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelRepoBuild { repo }: CancelRepoBuild,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
// make sure the build is building
if !action_states()
.repo
.get(&repo.id)
.await
.and_then(|s| s.get().ok().map(|s| s.building))
.unwrap_or_default()
{
return Err(anyhow!("Repo is not building."));
}
update.push_simple_log(
"cancel triggered",
"the repo build cancel has been triggered",
);
update_update(update.clone()).await?;
repo_cancel_channel()
.sender
.lock()
.await
.send((repo.id, update.clone()))?;
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
let update_id = update.id.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
&db_client().await.updates,
&update_id,
doc! { "$set": { "status": "Complete" } },
None,
)
.await
{
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
}
});
Ok(update)
}
}

View File

@@ -1,11 +1,11 @@
use anyhow::Context;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
monitor_timestamp,
all_logs_success, monitor_timestamp,
permission::PermissionLevel,
server::Server,
server::{Server, ServerState},
update::{Log, Update, UpdateStatus},
user::User,
},
@@ -14,11 +14,59 @@ use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{periphery_client, update::update_update},
helpers::{
periphery_client, query::get_server_with_status,
update::update_update,
},
resource,
state::{action_states, State},
};
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.stopping_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StopAllContainers {})
.await
.context("failed to stop all container on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log("stop all containers", String::from("All containers have successfully been stopped on the host."));
}
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PruneContainers, (User, Update)> for State {
#[instrument(name = "PruneContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -44,6 +92,8 @@ impl Resolve<PruneContainers, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.pruning_containers = true)?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
@@ -98,6 +148,8 @@ impl Resolve<PruneNetworks, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.pruning_networks = true)?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
@@ -150,6 +202,8 @@ impl Resolve<PruneImages, (User, Update)> for State {
let _action_guard =
action_state.update(|state| state.pruning_images = true)?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log =

View File

@@ -36,12 +36,9 @@ impl Resolve<LaunchServer, (User, Update)> for State {
if db_client()
.await
.servers
.find_one(
doc! {
"name": &name
},
None,
)
.find_one(doc! {
"name": &name
})
.await
.context("failed to query db for servers")?
.is_some()

View File

@@ -0,0 +1,318 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
permission::PermissionLevel, stack::StackInfo, update::Update,
user::User,
},
};
use mungos::mongodb::bson::{doc, to_document};
use periphery_client::api::compose::*;
use resolver_api::Resolve;
use crate::{
helpers::{
interpolate_variables_secrets_into_environment, periphery_client,
stack::{
execute::execute_compose, get_stack_and_server,
services::extract_services_into_res,
},
update::update_update,
},
monitor::update_cache_for_server,
state::{action_states, db_client, State},
};
impl Resolve<DeployStack, (User, Update)> for State {
#[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeployStack { stack, stop_time }: DeployStack,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Execute,
true,
)
.await?;
// get the action state for the stack (or insert default).
let action_state =
action_states().stack.get_or_insert_default(&stack.id).await;
// Will check to ensure stack not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.deploying = true)?;
update_update(update.clone()).await?;
let git_token = crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
|https| stack.config.git_https = https,
).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
)?;
let registry_token = crate::helpers::registry_token(
&stack.config.registry_provider,
&stack.config.registry_account,
).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
)?;
if !stack.config.skip_secret_interp {
interpolate_variables_secrets_into_environment(
&mut stack.config.environment,
&mut update,
)
.await?;
}
let ComposeUpResponse {
logs,
deployed,
file_contents,
missing_files,
remote_errors,
commit_hash,
commit_message,
} = periphery_client(&server)?
.request(ComposeUp {
stack: stack.clone(),
service: None,
git_token,
registry_token,
})
.await?;
update.logs.extend(logs);
let update_info = async {
let latest_services = if !file_contents.is_empty() {
let mut services = Vec::new();
for contents in &file_contents {
if let Err(e) = extract_services_into_res(
&stack.project_name(true),
&contents.contents,
&mut services,
) {
update.push_error_log(
"extract services",
format_serror(&e.context(format!("Failed to extract stack services for compose file path {}. Things probably won't work correctly", contents.path)).into())
);
}
}
services
} else {
// maybe better to do something else here for services.
stack.info.latest_services.clone()
};
let project_name = stack.project_name(true);
let (
deployed_services,
deployed_contents,
deployed_hash,
deployed_message,
) = if deployed {
(
Some(latest_services.clone()),
Some(file_contents.clone()),
commit_hash.clone(),
commit_message.clone(),
)
} else {
(
stack.info.deployed_services,
stack.info.deployed_contents,
stack.info.deployed_hash,
stack.info.deployed_message,
)
};
let info = StackInfo {
missing_files,
deployed_project_name: project_name.into(),
deployed_services,
deployed_contents,
deployed_hash,
deployed_message,
latest_services,
remote_contents: stack
.config
.file_contents
.is_empty()
.then_some(file_contents),
remote_errors: stack
.config
.file_contents
.is_empty()
.then_some(remote_errors),
latest_hash: commit_hash,
latest_message: commit_message,
};
let info = to_document(&info)
.context("failed to serialize stack info to bson")?;
db_client()
.await
.stacks
.update_one(
doc! { "name": &stack.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update stack info on db")?;
anyhow::Ok(())
};
// This will be weird with single service deploys. Come back to it.
if let Err(e) = update_info.await {
update.push_error_log(
"refresh stack info",
format_serror(
&e.context("failed to refresh stack info on db").into(),
),
)
}
// Ensure cached stack state up to date by updating server cache
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartStack, (User, Update)> for State {
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartStack { stack, service }: StartStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<StartStack>(
&stack,
service,
&user,
|state| state.starting = true,
update,
(),
)
.await
}
}
impl Resolve<RestartStack, (User, Update)> for State {
#[instrument(name = "RestartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartStack { stack, service }: RestartStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<RestartStack>(
&stack,
service,
&user,
|state| {
state.restarting = true;
},
update,
(),
)
.await
}
}
impl Resolve<PauseStack, (User, Update)> for State {
#[instrument(name = "PauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseStack { stack, service }: PauseStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<PauseStack>(
&stack,
service,
&user,
|state| state.pausing = true,
update,
(),
)
.await
}
}
impl Resolve<UnpauseStack, (User, Update)> for State {
#[instrument(name = "UnpauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseStack { stack, service }: UnpauseStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<UnpauseStack>(
&stack,
service,
&user,
|state| state.unpausing = true,
update,
(),
)
.await
}
}
impl Resolve<StopStack, (User, Update)> for State {
#[instrument(name = "StopStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopStack {
stack,
stop_time,
service,
}: StopStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<StopStack>(
&stack,
service,
&user,
|state| state.stopping = true,
update,
stop_time,
)
.await
}
}
impl Resolve<DestroyStack, (User, Update)> for State {
#[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DestroyStack {
stack,
remove_orphans,
stop_time,
}: DestroyStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<DestroyStack>(
&stack,
None,
&user,
|state| state.destroying = true,
update,
(stop_time, remove_orphans),
)
.await
}
}

View File

@@ -1,3 +1,5 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
@@ -8,12 +10,14 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
update::{Log, Update},
user::{sync_user, User},
},
@@ -25,7 +29,9 @@ use crate::{
helpers::{
query::get_id_to_tags,
sync::{
deployment,
deploy::{
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
resource::{
get_updates_for_execution, AllResourcesById, ResourceSync,
},
@@ -52,6 +58,9 @@ impl Resolve<RunSync, (User, Update)> for State {
return Err(anyhow!("resource sync repo not configured"));
}
// Send update here for FE to recheck action state
update_update(update.clone()).await?;
let (res, logs, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
@@ -62,8 +71,28 @@ impl Resolve<RunSync, (User, Update)> for State {
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| (deployment.name.clone(), deployment.clone()))
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deploy_cache = build_deploy_cache(SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
})
.await?;
let (servers_to_create, servers_to_update, servers_to_delete) =
get_updates_for_execution::<Server>(
@@ -77,13 +106,21 @@ impl Resolve<RunSync, (User, Update)> for State {
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = deployment::get_updates_for_execution(
) = get_updates_for_execution::<Deployment>(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
get_updates_for_execution::<Stack>(
resources.stacks,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
get_updates_for_execution::<Build>(
resources.builds,
@@ -169,7 +206,8 @@ impl Resolve<RunSync, (User, Update)> for State {
)
.await?;
if resource_syncs_to_create.is_empty()
if deploy_cache.is_empty()
&& resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
@@ -181,6 +219,9 @@ impl Resolve<RunSync, (User, Update)> for State {
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& stacks_to_create.is_empty()
&& stacks_to_update.is_empty()
&& stacks_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
@@ -305,15 +346,25 @@ impl Resolve<RunSync, (User, Update)> for State {
);
// Dependant on server / build
if let Some(res) = deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await
{
update.logs.extend(res);
}
maybe_extend(
&mut update.logs,
Deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await,
);
// stack only depends on server, but maybe will depend on build later.
maybe_extend(
&mut update.logs,
Stack::run_updates(
stacks_to_create,
stacks_to_update,
stacks_to_delete,
)
.await,
);
// Dependant on everything
maybe_extend(
@@ -326,6 +377,9 @@ impl Resolve<RunSync, (User, Update)> for State {
.await,
);
// Execute the deploy cache
deploy_from_cache(deploy_cache, &mut update.logs).await;
let db = db_client().await;
if let Err(e) = update_one_by_id(

View File

@@ -3,7 +3,7 @@ use monitor_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{update::ResourceTargetVariant, user::User},
entities::{deployment::Deployment, server::Server, user::User},
};
use mungos::{
by_id::find_one_by_id,
@@ -14,7 +14,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource::get_resource_ids_for_user,
state::{db_client, State},
};
@@ -28,16 +28,10 @@ impl Resolve<ListAlerts, User> for State {
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
)
.await?;
let server_ids =
get_resource_ids_for_user::<Server>(&user).await?;
let deployment_ids =
get_resource_ids_for_user::<Deployment>(&user).await?;
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },

View File

@@ -1,21 +1,17 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
alerter::{Alerter, AlerterListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
@@ -61,26 +57,19 @@ impl Resolve<GetAlertersSummary, User> for State {
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
let query =
match resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
Some(query)
};
let total = db_client()
.await
.alerters
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {

View File

@@ -1,7 +1,4 @@
use std::{
collections::{HashMap, HashSet},
sync::OnceLock,
};
use std::collections::{HashMap, HashSet};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
@@ -10,6 +7,7 @@ use monitor_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
user::User,
@@ -20,12 +18,14 @@ use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::{Resolve, ResolveToString};
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, build_state_cache, db_client, State},
state::{
action_states, build_state_cache, db_client, github_client, State,
},
};
impl Resolve<GetBuild, User> for State {
@@ -147,16 +147,13 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
.find(doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
None,
)
"operation": Operation::RunBuild.to_string(),
})
.await
.context("failed to get updates cursor")?;
@@ -193,16 +190,16 @@ fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}
impl Resolve<GetBuildVersions, User> for State {
impl Resolve<ListBuildVersions, User> for State {
async fn resolve(
&self,
GetBuildVersions {
ListBuildVersions {
build,
major,
minor,
patch,
limit,
}: GetBuildVersions,
}: ListBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
let build = resource::get_check_permissions::<Build>(
@@ -250,42 +247,6 @@ impl Resolve<GetBuildVersions, User> for State {
}
}
fn github_organizations() -> &'static String {
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
GITHUB_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().github_organizations)
.expect("failed to serialize github organizations")
})
}
impl ResolveToString<ListGithubOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListGithubOrganizations {}: ListGithubOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(github_organizations().clone())
}
}
fn docker_organizations() -> &'static String {
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
DOCKER_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().docker_organizations)
.expect("failed to serialize docker organizations")
})
}
impl ResolveToString<ListDockerOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListDockerOrganizations {}: ListDockerOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(docker_organizations().clone())
}
}
impl Resolve<ListCommonBuildExtraArgs, User> for State {
async fn resolve(
&self,
@@ -310,3 +271,78 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
Ok(res)
}
}
impl Resolve<GetBuildWebhookEnabled, User> for State {
async fn resolve(
&self,
GetBuildWebhookEnabled { build }: GetBuildWebhookEnabled,
user: User,
) -> anyhow::Result<GetBuildWebhookEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
if build.config.git_provider != "github.com"
|| build.config.repo.is_empty()
{
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: true,
});
}
}
Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: false,
})
}
}

View File

@@ -1,21 +1,17 @@
use std::{collections::HashSet, str::FromStr};
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::{self, *},
api::read::*,
entities::{
builder::{Builder, BuilderConfig, BuilderListItem},
builder::{Builder, BuilderListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
@@ -61,26 +57,19 @@ impl Resolve<GetBuildersSummary, User> for State {
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
let query =
match resource::get_resource_ids_for_user::<Builder>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
Some(query)
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {
@@ -89,52 +78,3 @@ impl Resolve<GetBuildersSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetBuilderAvailableAccounts, User> for State {
async fn resolve(
&self,
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
user: User,
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
let builder = resource::get_check_permissions::<Builder>(
&builder,
&user,
PermissionLevel::Read,
)
.await?;
let (github, docker) = match builder.config {
BuilderConfig::Aws(config) => {
(config.github_accounts, config.docker_accounts)
}
BuilderConfig::Server(config) => {
let res = self
.resolve(
read::GetAvailableAccounts {
server: Some(config.server_id),
},
user,
)
.await?;
(res.github, res.docker)
}
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
Ok(GetBuilderAvailableAccountsResponse { github, docker })
}
}

View File

@@ -1,16 +1,33 @@
use std::time::Instant;
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::anyhow;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::read::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use monitor_client::{
api::read::*,
entities::{
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
repo::Repo,
server::Server,
sync::ResourceSync,
update::ResourceTarget,
user::User,
},
};
use resolver_api::{
derive::Resolver, Resolve, ResolveToString, Resolver,
};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, config::core_config, state::State};
use crate::{
auth::auth_request, config::core_config, helpers::periphery_client,
resource, state::State,
};
mod alert;
mod alerter;
@@ -19,10 +36,12 @@ mod builder;
mod deployment;
mod permission;
mod procedure;
mod provider;
mod repo;
mod search;
mod server;
mod server_template;
mod stack;
mod sync;
mod tag;
mod toml;
@@ -37,17 +56,24 @@ mod variable;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
#[to_string_resolver]
GetVersion(GetVersion),
#[to_string_resolver]
GetCoreInfo(GetCoreInfo),
GetAvailableAwsEcrLabels(GetAvailableAwsEcrLabels),
#[to_string_resolver]
ListAwsEcrLabels(ListAwsEcrLabels),
ListSecrets(ListSecrets),
ListGitProvidersFromConfig(ListGitProvidersFromConfig),
ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig),
// ==== USER ====
ListUsers(ListUsers),
GetUsername(GetUsername),
GetPermissionLevel(GetPermissionLevel),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
GetPermissionLevel(GetPermissionLevel),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
@@ -66,71 +92,84 @@ enum ReadRequest {
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
GetServerTemplatesSummary(GetServerTemplatesSummary),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
ListFullServers(ListFullServers),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
GetDockerImages(GetDockerImages),
GetDockerNetworks(GetDockerNetworks),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
GetAvailableAccounts(GetAvailableAccounts),
GetAvailableSecrets(GetAvailableSecrets),
ListServers(ListServers),
ListFullServers(ListFullServers),
#[to_string_resolver]
ListDockerContainers(ListDockerContainers),
#[to_string_resolver]
ListDockerNetworks(ListDockerNetworks),
#[to_string_resolver]
ListDockerImages(ListDockerImages),
#[to_string_resolver]
ListComposeProjects(ListComposeProjects),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
// ==== BUILD ====
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
ListBuildVersions(ListBuildVersions),
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
#[to_string_resolver]
ListGithubOrganizations(ListGithubOrganizations),
#[to_string_resolver]
ListDockerOrganizations(ListDockerOrganizations),
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
GetRepoActionState(GetRepoActionState),
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
GetRepoActionState(GetRepoActionState),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
GetResourceSyncActionState(GetResourceSyncActionState),
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
GetResourceSyncActionState(GetResourceSyncActionState),
// ==== STACK ====
GetStacksSummary(GetStacksSummary),
GetStack(GetStack),
GetStackActionState(GetStackActionState),
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
GetStackServiceLog(GetStackServiceLog),
SearchStackServiceLog(SearchStackServiceLog),
ListStacks(ListStacks),
ListFullStacks(ListFullStacks),
ListStackServices(ListStackServices),
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
ListBuilders(ListBuilders),
ListFullBuilders(ListFullBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
@@ -160,11 +199,17 @@ enum ReadRequest {
#[to_string_resolver]
GetSystemStats(GetSystemStats),
#[to_string_resolver]
GetSystemProcesses(GetSystemProcesses),
ListSystemProcesses(ListSystemProcesses),
// ==== VARIABLE ====
GetVariable(GetVariable),
ListVariables(ListVariables),
// ==== PROVIDER ====
GetGitProviderAccount(GetGitProviderAccount),
ListGitProviderAccounts(ListGitProviderAccounts),
GetDockerRegistryAccount(GetDockerRegistryAccount),
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
}
pub fn router() -> Router {
@@ -199,46 +244,344 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res?))
}
impl Resolve<GetVersion, User> for State {
#[instrument(name = "GetVersion", level = "debug", skip(self))]
async fn resolve(
fn version() -> &'static String {
static VERSION: OnceLock<String> = OnceLock::new();
VERSION.get_or_init(|| {
serde_json::to_string(&GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
.context("failed to serialize GetVersionResponse")
.unwrap()
})
}
impl ResolveToString<GetVersion, User> for State {
async fn resolve_to_string(
&self,
GetVersion {}: GetVersion,
_: User,
) -> anyhow::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
) -> anyhow::Result<String> {
Ok(version().to_string())
}
}
impl Resolve<GetCoreInfo, User> for State {
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
async fn resolve(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<GetCoreInfoResponse> {
fn core_info() -> &'static String {
static CORE_INFO: OnceLock<String> = OnceLock::new();
CORE_INFO.get_or_init(|| {
let config = core_config();
Ok(GetCoreInfoResponse {
let info = GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
github_webhook_base_url: config
.github_webhook_base_url
webhook_base_url: config
.webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
})
github_webhook_owners: config
.github_webhook_app
.installations
.iter()
.map(|i| i.namespace.to_string())
.collect(),
};
serde_json::to_string(&info)
.context("failed to serialize GetCoreInfoResponse")
.unwrap()
})
}
impl ResolveToString<GetCoreInfo, User> for State {
async fn resolve_to_string(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<String> {
Ok(core_info().to_string())
}
}
impl Resolve<GetAvailableAwsEcrLabels, User> for State {
async fn resolve(
fn ecr_labels() -> &'static String {
static ECR_LABELS: OnceLock<String> = OnceLock::new();
ECR_LABELS.get_or_init(|| {
serde_json::to_string(
&core_config()
.aws_ecr_registries
.iter()
.map(|reg| reg.label.clone())
.collect::<Vec<_>>(),
)
.context("failed to serialize ecr registries")
.unwrap()
})
}
impl ResolveToString<ListAwsEcrLabels, User> for State {
async fn resolve_to_string(
&self,
GetAvailableAwsEcrLabels {}: GetAvailableAwsEcrLabels,
ListAwsEcrLabels {}: ListAwsEcrLabels,
_: User,
) -> anyhow::Result<GetAvailableAwsEcrLabelsResponse> {
Ok(core_config().aws_ecr_registries.keys().cloned().collect())
) -> anyhow::Result<String> {
Ok(ecr_labels().to_string())
}
}
impl Resolve<ListSecrets, User> for State {
async fn resolve(
&self,
ListSecrets { target }: ListSecrets,
_: User,
) -> anyhow::Result<ListSecretsResponse> {
let mut secrets = core_config()
.secrets
.keys()
.cloned()
.collect::<HashSet<_>>();
if let Some(target) = target {
let server_id = match target {
ResourceTarget::Server(id) => Some(id),
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => Some(config.server_id),
BuilderConfig::Aws(config) => {
secrets.extend(config.secrets);
None
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
};
if let Some(id) = server_id {
let server = resource::get::<Server>(&id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListSecrets {})
.await
.with_context(|| {
format!(
"failed to get secrets from server {}",
server.name
)
})?;
secrets.extend(more);
}
}
let mut secrets = secrets.into_iter().collect::<Vec<_>>();
secrets.sort();
Ok(secrets)
}
}
impl Resolve<ListGitProvidersFromConfig, User> for State {
async fn resolve(
&self,
ListGitProvidersFromConfig { target }: ListGitProvidersFromConfig,
user: User,
) -> anyhow::Result<ListGitProvidersFromConfigResponse> {
let mut providers = core_config().git_providers.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_git_providers_for_server(&mut providers, &id).await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_git_providers_for_server(
&mut providers,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_git_providers(
&mut providers,
config.git_providers,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user
),
resource::list_full_for_user::<Repo>(Default::default(), &user),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user
),
)?;
for build in builds {
if !providers
.iter()
.any(|provider| provider.domain == build.config.git_provider)
{
providers.push(GitProvider {
domain: build.config.git_provider,
https: build.config.git_https,
accounts: Default::default(),
});
}
}
for repo in repos {
if !providers
.iter()
.any(|provider| provider.domain == repo.config.git_provider)
{
providers.push(GitProvider {
domain: repo.config.git_provider,
https: repo.config.git_https,
accounts: Default::default(),
});
}
}
for sync in syncs {
if !providers
.iter()
.any(|provider| provider.domain == sync.config.git_provider)
{
providers.push(GitProvider {
domain: sync.config.git_provider,
https: sync.config.git_https,
accounts: Default::default(),
});
}
}
providers.sort();
Ok(providers)
}
}
impl Resolve<ListDockerRegistriesFromConfig, User> for State {
async fn resolve(
&self,
ListDockerRegistriesFromConfig { target }: ListDockerRegistriesFromConfig,
_: User,
) -> anyhow::Result<ListDockerRegistriesFromConfigResponse> {
let mut registries = core_config().docker_registries.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_docker_registries_for_server(&mut registries, &id)
.await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_docker_registries_for_server(
&mut registries,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_docker_registries(
&mut registries,
config.docker_registries,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
registries.sort();
Ok(registries)
}
}
async fn merge_git_providers_for_server(
providers: &mut Vec<GitProvider>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListGitProviders {})
.await
.with_context(|| {
format!(
"failed to get git providers from server {}",
server.name
)
})?;
merge_git_providers(providers, more);
Ok(())
}
fn merge_git_providers(
providers: &mut Vec<GitProvider>,
more: Vec<GitProvider>,
) {
for incoming_provider in more {
if let Some(provider) = providers
.iter_mut()
.find(|provider| provider.domain == incoming_provider.domain)
{
for account in incoming_provider.accounts {
if !provider.accounts.contains(&account) {
provider.accounts.push(account);
}
}
} else {
providers.push(incoming_provider);
}
}
}
async fn merge_docker_registries_for_server(
registries: &mut Vec<DockerRegistry>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListDockerRegistries {})
.await
.with_context(|| {
format!(
"failed to get docker registries from server {}",
server.name
)
})?;
merge_docker_registries(registries, more);
Ok(())
}
fn merge_docker_registries(
registries: &mut Vec<DockerRegistry>,
more: Vec<DockerRegistry>,
) {
for incoming_registry in more {
if let Some(registry) = registries
.iter_mut()
.find(|registry| registry.domain == incoming_registry.domain)
{
for account in incoming_registry.accounts {
if !registry.accounts.contains(&account) {
registry.accounts.push(account);
}
}
} else {
registries.push(incoming_registry);
}
}
}

View File

@@ -11,7 +11,7 @@ use mungos::{find::find_collect, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{
helpers::query::get_user_permission_on_resource,
helpers::query::get_user_permission_on_target,
state::{db_client, State},
};
@@ -43,8 +43,7 @@ impl Resolve<GetPermissionLevel, User> for State {
if user.admin {
return Ok(PermissionLevel::Write);
}
let (variant, id) = target.extract_variant_id();
get_user_permission_on_resource(&user.id, variant, id).await
get_user_permission_on_target(&user, &target).await
}
}

View File

@@ -0,0 +1,116 @@
use anyhow::{anyhow, Context};
use mongo_indexed::{doc, Document};
use monitor_client::{
api::read::{
GetDockerRegistryAccount, GetDockerRegistryAccountResponse,
GetGitProviderAccount, GetGitProviderAccountResponse,
ListDockerRegistryAccounts, ListDockerRegistryAccountsResponse,
ListGitProviderAccounts, ListGitProviderAccountsResponse,
},
entities::user::User,
};
use mungos::{
by_id::find_one_by_id, find::find_collect,
mongodb::options::FindOptions,
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<GetGitProviderAccount, User> for State {
async fn resolve(
&self,
GetGitProviderAccount { id }: GetGitProviderAccount,
user: User,
) -> anyhow::Result<GetGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read git provider accounts"
));
}
find_one_by_id(&db_client().await.git_accounts, &id)
.await
.context("failed to query db for git provider accounts")?
.context("did not find git provider account with the given id")
}
}
impl Resolve<ListGitProviderAccounts, User> for State {
async fn resolve(
&self,
ListGitProviderAccounts { domain, username }: ListGitProviderAccounts,
user: User,
) -> anyhow::Result<ListGitProviderAccountsResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read git provider accounts"
));
}
let mut filter = Document::new();
if let Some(domain) = domain {
filter.insert("domain", domain);
}
if let Some(username) = username {
filter.insert("username", username);
}
find_collect(
&db_client().await.git_accounts,
filter,
FindOptions::builder()
.sort(doc! { "domain": 1, "username": 1 })
.build(),
)
.await
.context("failed to query db for git provider accounts")
}
}
impl Resolve<GetDockerRegistryAccount, User> for State {
async fn resolve(
&self,
GetDockerRegistryAccount { id }: GetDockerRegistryAccount,
user: User,
) -> anyhow::Result<GetDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read docker registry accounts"
));
}
find_one_by_id(&db_client().await.registry_accounts, &id)
.await
.context("failed to query db for docker registry accounts")?
.context(
"did not find docker registry account with the given id",
)
}
}
impl Resolve<ListDockerRegistryAccounts, User> for State {
async fn resolve(
&self,
ListDockerRegistryAccounts { domain, username }: ListDockerRegistryAccounts,
user: User,
) -> anyhow::Result<ListDockerRegistryAccountsResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read docker registry accounts"
));
}
let mut filter = Document::new();
if let Some(domain) = domain {
filter.insert("domain", domain);
}
if let Some(username) = username {
filter.insert("username", username);
}
find_collect(
&db_client().await.registry_accounts,
filter,
FindOptions::builder()
.sort(doc! { "domain": 1, "username": 1 })
.build(),
)
.await
.context("failed to query db for docker registry accounts")
}
}

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
user::User,
@@ -10,8 +11,9 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, repo_state_cache, State},
state::{action_states, github_client, repo_state_cache, State},
};
impl Resolve<GetRepo, User> for State {
@@ -105,11 +107,16 @@ impl Resolve<GetReposSummary, User> for State {
(_, action_states) if action_states.pulling => {
res.pulling += 1;
}
(_, action_states) if action_states.building => {
res.building += 1;
}
(RepoState::Ok, _) => res.ok += 1,
(RepoState::Failed, _) => res.failed += 1,
(RepoState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(RepoState::Cloning, _) | (RepoState::Pulling, _) => {
(RepoState::Cloning, _)
| (RepoState::Pulling, _)
| (RepoState::Building, _) => {
unreachable!()
}
}
@@ -118,3 +125,101 @@ impl Resolve<GetReposSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetRepoWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetRepoWebhooksEnabled { repo }: GetRepoWebhooksEnabled,
user: User,
) -> anyhow::Result<GetRepoWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await?;
if repo.config.git_provider != "github.com"
|| repo.config.repo.is_empty()
{
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let clone_url =
format!("{host}/listener/github/repo/{}/clone", repo.id);
let pull_url =
format!("{host}/listener/github/repo/{}/pull", repo.id);
let build_url =
format!("{host}/listener/github/repo/{}/build", repo.id);
let mut clone_enabled = false;
let mut pull_enabled = false;
let mut build_enabled = false;
for webhook in webhooks {
if !webhook.active {
continue;
}
if webhook.config.url == clone_url {
clone_enabled = true
}
if webhook.config.url == pull_url {
pull_enabled = true
}
if webhook.config.url == build_url {
build_enabled = true
}
}
Ok(GetRepoWebhooksEnabledResponse {
managed: true,
clone_enabled,
pull_enabled,
build_enabled,
})
}
}

View File

@@ -1,5 +1,5 @@
use std::{
collections::{HashMap, HashSet},
collections::HashMap,
sync::{Arc, OnceLock},
};
@@ -10,10 +10,8 @@ use async_timing_util::{
use monitor_client::{
api::read::*,
entities::{
deployment::ContainerSummary,
permission::PermissionLevel,
server::{
docker_image::ImageSummary, docker_network::DockerNetwork,
Server, ServerActionState, ServerListItem, ServerState,
},
user::User,
@@ -23,12 +21,11 @@ use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api::{self, GetAccountsResponse};
use periphery_client::api as periphery;
use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
config::core_config,
helpers::periphery_client,
resource,
state::{action_states, db_client, server_status_cache, State},
@@ -192,7 +189,7 @@ impl ResolveToString<GetSystemInformation, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemInformation {})
.request(periphery::stats::GetSystemInformation {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -240,10 +237,10 @@ fn processes_cache() -> &'static ProcessesCache {
PROCESSES_CACHE.get_or_init(Default::default)
}
impl ResolveToString<GetSystemProcesses, User> for State {
impl ResolveToString<ListSystemProcesses, User> for State {
async fn resolve_to_string(
&self,
GetSystemProcesses { server }: GetSystemProcesses,
ListSystemProcesses { server }: ListSystemProcesses,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
@@ -259,7 +256,7 @@ impl ResolveToString<GetSystemProcesses, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemProcesses {})
.request(periphery::stats::GetSystemProcesses {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -329,123 +326,98 @@ impl Resolve<GetHistoricalServerStats, User> for State {
}
}
impl Resolve<GetDockerImages, User> for State {
async fn resolve(
impl ResolveToString<ListDockerImages, User> for State {
async fn resolve_to_string(
&self,
GetDockerImages { server }: GetDockerImages,
ListDockerImages { server }: ListDockerImages,
user: User,
) -> anyhow::Result<Vec<ImageSummary>> {
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::build::GetImageList {})
.await
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(images) = &cache.images {
serde_json::to_string(images)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<GetDockerNetworks, User> for State {
async fn resolve(
impl ResolveToString<ListDockerNetworks, User> for State {
async fn resolve_to_string(
&self,
GetDockerNetworks { server }: GetDockerNetworks,
ListDockerNetworks { server }: ListDockerNetworks,
user: User,
) -> anyhow::Result<Vec<DockerNetwork>> {
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::network::GetNetworkList {})
.await
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(networks) = &cache.networks {
serde_json::to_string(networks)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<GetDockerContainers, User> for State {
async fn resolve(
impl ResolveToString<ListDockerContainers, User> for State {
async fn resolve_to_string(
&self,
GetDockerContainers { server }: GetDockerContainers,
ListDockerContainers { server }: ListDockerContainers,
user: User,
) -> anyhow::Result<Vec<ContainerSummary>> {
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::container::GetContainerList {})
.await
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(containers) = &cache.containers {
serde_json::to_string(containers)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<GetAvailableAccounts, User> for State {
async fn resolve(
impl ResolveToString<ListComposeProjects, User> for State {
async fn resolve_to_string(
&self,
GetAvailableAccounts { server }: GetAvailableAccounts,
ListComposeProjects { server }: ListComposeProjects,
user: User,
) -> anyhow::Result<GetAvailableAccountsResponse> {
let (github, docker) = match server {
Some(server) => {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
(github, docker)
}
None => Default::default(),
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
let res = GetAvailableAccountsResponse { github, docker };
Ok(res)
}
}
impl Resolve<GetAvailableSecrets, User> for State {
async fn resolve(
&self,
GetAvailableSecrets { server }: GetAvailableSecrets,
user: User,
) -> anyhow::Result<GetAvailableSecretsResponse> {
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let mut secrets = periphery_client(&server)?
.request(api::GetSecrets {})
.await
.context("failed to get accounts from periphery")?;
secrets.sort();
Ok(secrets)
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(projects) = &cache.projects {
serde_json::to_string(projects)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}

View File

@@ -1,18 +1,16 @@
use std::str::FromStr;
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
update::ResourceTargetVariant, user::User,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
@@ -58,26 +56,20 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match resource::get_resource_ids_for_user::<
ServerTemplate,
>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.server_templates
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {

View File

@@ -0,0 +1,313 @@
use std::collections::HashSet;
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
stack::{Stack, StackActionState, StackListItem, StackState},
user::User,
},
};
use periphery_client::api::compose::{
GetComposeServiceLog, GetComposeServiceLogSearch,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{periphery_client, stack::get_stack_and_server},
resource,
state::{action_states, github_client, stack_status_cache, State},
};
impl Resolve<GetStack, User> for State {
async fn resolve(
&self,
GetStack { stack }: GetStack,
user: User,
) -> anyhow::Result<Stack> {
resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListStackServices, User> for State {
async fn resolve(
&self,
ListStackServices { stack }: ListStackServices,
user: User,
) -> anyhow::Result<ListStackServicesResponse> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
let services = stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.services
.clone();
Ok(services)
}
}
impl Resolve<GetStackServiceLog, User> for State {
async fn resolve(
&self,
GetStackServiceLog {
stack,
service,
tail,
}: GetStackServiceLog,
user: User,
) -> anyhow::Result<GetStackServiceLogResponse> {
let (stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Read,
true,
)
.await?;
periphery_client(&server)?
.request(GetComposeServiceLog {
project: stack.project_name(false),
service,
tail,
})
.await
.context("failed to get stack service log from periphery")
}
}
impl Resolve<SearchStackServiceLog, User> for State {
async fn resolve(
&self,
SearchStackServiceLog {
stack,
service,
terms,
combinator,
invert,
}: SearchStackServiceLog,
user: User,
) -> anyhow::Result<SearchStackServiceLogResponse> {
let (stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Read,
true,
)
.await?;
periphery_client(&server)?
.request(GetComposeServiceLogSearch {
project: stack.project_name(false),
service,
terms,
combinator,
invert,
})
.await
.context("failed to get stack service log from periphery")
}
}
impl Resolve<ListCommonStackExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for stack in stacks {
for extra_arg in stack.config.extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}
impl Resolve<ListStacks, User> for State {
async fn resolve(
&self,
ListStacks { query }: ListStacks,
user: User,
) -> anyhow::Result<Vec<StackListItem>> {
resource::list_for_user::<Stack>(query, &user).await
}
}
impl Resolve<ListFullStacks, User> for State {
async fn resolve(
&self,
ListFullStacks { query }: ListFullStacks,
user: User,
) -> anyhow::Result<ListFullStacksResponse> {
resource::list_full_for_user::<Stack>(query, &user).await
}
}
impl Resolve<GetStackActionState, User> for State {
async fn resolve(
&self,
GetStackActionState { stack }: GetStackActionState,
user: User,
) -> anyhow::Result<StackActionState> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.stack
.get(&stack.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetStacksSummary, User> for State {
async fn resolve(
&self,
GetStacksSummary {}: GetStacksSummary,
user: User,
) -> anyhow::Result<GetStacksSummaryResponse> {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
&user,
)
.await
.context("failed to get stacks from db")?;
let mut res = GetStacksSummaryResponse::default();
let cache = stack_status_cache();
for stack in stacks {
res.total += 1;
match cache.get(&stack.id).await.unwrap_or_default().curr.state
{
StackState::Running => res.running += 1,
StackState::Paused => res.paused += 1,
StackState::Stopped => res.stopped += 1,
StackState::Restarting => res.restarting += 1,
StackState::Created => res.created += 1,
StackState::Removing => res.removing += 1,
StackState::Dead => res.dead += 1,
StackState::Unhealthy => res.unhealthy += 1,
StackState::Down => res.down += 1,
StackState::Unknown => res.unknown += 1,
}
}
Ok(res)
}
}
impl Resolve<GetStackWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetStackWebhooksEnabled { stack }: GetStackWebhooksEnabled,
user: User,
) -> anyhow::Result<GetStackWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
if stack.config.git_provider != "github.com"
|| stack.config.repo.is_empty()
{
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/stack/{}/refresh", stack.id);
let deploy_url =
format!("{host}/listener/github/stack/{}/deploy", stack.id);
let mut refresh_enabled = false;
let mut deploy_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == deploy_url {
deploy_enabled = true
}
}
Ok(GetStackWebhooksEnabledResponse {
managed: true,
refresh_enabled,
deploy_enabled,
})
}
}

View File

@@ -2,6 +2,7 @@ use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
@@ -13,8 +14,11 @@ use monitor_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{action_states, resource_sync_state_cache, State},
state::{
action_states, github_client, resource_sync_state_cache, State,
},
};
impl Resolve<GetResourceSync, User> for State {
@@ -137,3 +141,88 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
Ok(res)
}
}
impl Resolve<GetSyncWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetSyncWebhooksEnabled { sync }: GetSyncWebhooksEnabled,
user: User,
) -> anyhow::Result<GetSyncWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
if sync.config.git_provider != "github.com"
|| sync.config.repo.is_empty()
{
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/sync/{}/refresh", sync.id);
let sync_url =
format!("{host}/listener/github/sync/{}/sync", sync.id);
let mut refresh_enabled = false;
let mut sync_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == sync_url {
sync_enabled = true
}
}
Ok(GetSyncWebhooksEnabledResponse {
managed: true,
refresh_enabled,
sync_enabled,
})
}
}

View File

@@ -25,6 +25,7 @@ use monitor_client::{
resource::{Resource, ResourceQuery},
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
toml::{
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
@@ -89,6 +90,15 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags.clone()).build(),
@@ -230,7 +240,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
// replace server id of builder
if let BuilderConfig::Server(config) = &mut builder.config {
config.server_id.clone_from(
names.servers.get(&id).unwrap_or(&String::new()),
names
.servers
.get(&config.server_id)
.unwrap_or(&String::new()),
)
}
res
@@ -296,8 +309,33 @@ impl Resolve<ExportResourcesToToml, User> for State {
.get(&repo.config.server_id)
.unwrap_or(&String::new()),
);
// replace repo builder with name
repo.config.builder_id.clone_from(
names
.builders
.get(&repo.config.builder_id)
.unwrap_or(&String::new()),
);
res.repos.push(convert_resource::<Repo>(repo, &names.tags))
}
ResourceTarget::Stack(id) => {
let mut stack = resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
// replace stack server with name
stack.config.server_id.clone_from(
names
.servers
.get(&stack.config.server_id)
.unwrap_or(&String::new()),
);
res
.stacks
.push(convert_resource::<Stack>(stack, &names.tags))
}
ResourceTarget::Procedure(id) => {
add_procedure(&id, &mut res, &user, &names)
.await
@@ -309,7 +347,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
};
}
add_user_groups(user_groups, &mut res, &user)
add_user_groups(user_groups, &mut res, &names, &user)
.await
.context("failed to add user groups")?;
@@ -352,6 +390,9 @@ async fn add_procedure(
Execution::RunBuild(exec) => exec.build.clone_from(
names.builds.get(&exec.build).unwrap_or(&String::new()),
),
Execution::CancelBuild(exec) => exec.build.clone_from(
names.builds.get(&exec.build).unwrap_or(&String::new()),
),
Execution::Deploy(exec) => exec.deployment.clone_from(
names
.deployments
@@ -366,6 +407,30 @@ async fn add_procedure(
.unwrap_or(&String::new()),
)
}
Execution::RestartContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::PauseContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::UnpauseContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::StopContainer(exec) => exec.deployment.clone_from(
names
.deployments
@@ -386,6 +451,12 @@ async fn add_procedure(
Execution::PullRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::BuildRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::CancelRepoBuild(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::StopAllContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
@@ -401,6 +472,27 @@ async fn add_procedure(
Execution::RunSync(exec) => exec.sync.clone_from(
names.syncs.get(&exec.sync).unwrap_or(&String::new()),
),
Execution::DeployStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::StartStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::RestartStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::PauseStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::UnpauseStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::StopStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::DestroyStack(exec) => exec.stack.clone_from(
names.stacks.get(&exec.stack).unwrap_or(&String::new()),
),
Execution::Sleep(_) | Execution::None(_) => {}
}
}
@@ -421,6 +513,9 @@ struct ResourceNames {
deployments: HashMap<String, String>,
procedures: HashMap<String, String>,
syncs: HashMap<String, String>,
stacks: HashMap<String, String>,
alerters: HashMap<String, String>,
templates: HashMap<String, String>,
}
impl ResourceNames {
@@ -475,6 +570,24 @@ impl ResourceNames {
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
stacks: find_collect(&db.stacks, None, None)
.await
.context("failed to get all stacks")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
alerters: find_collect(&db.alerters, None, None)
.await
.context("failed to get all alerters")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
templates: find_collect(&db.server_templates, None, None)
.await
.context("failed to get all server templates")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
})
}
}
@@ -482,6 +595,7 @@ impl ResourceNames {
async fn add_user_groups(
user_groups: Vec<String>,
res: &mut ResourcesToml,
names: &ResourceNames,
user: &User,
) -> anyhow::Result<()> {
let db = db_client().await;
@@ -509,9 +623,46 @@ async fn add_user_groups(
)
.await?
.into_iter()
.map(|permission| PermissionToml {
target: permission.resource_target,
level: permission.level,
.map(|mut permission| {
match &mut permission.resource_target {
ResourceTarget::Build(id) => {
*id = names.builds.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = names.builders.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id =
names.deployments.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = names.servers.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = names.repos.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = names.alerters.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id =
names.procedures.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = names.templates.get(id).cloned().unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = names.syncs.get(id).cloned().unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = names.stacks.get(id).cloned().unwrap_or_default()
}
ResourceTarget::System(_) => {}
}
PermissionToml {
target: permission.resource_target,
level: permission.level,
}
})
.collect();
res.user_groups.push(UserGroupToml {
@@ -521,6 +672,7 @@ async fn add_user_groups(
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect(),
all: ug.all,
permissions,
});
}
@@ -544,6 +696,7 @@ fn convert_resource<R: MonitorResource>(
description: resource.description,
deploy: false,
after: Default::default(),
latest_hash: false,
config,
}
}
@@ -640,6 +793,30 @@ fn serialize_resources_toml(
);
}
for stack in &resources.stacks {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[stack]]\n");
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&stack)?)?;
let config = parsed
.get_mut("config")
.context("stack has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(environment) = &stack.config.environment {
config.insert(
"environment".to_string(),
Value::String(environment_vars_to_string(environment)),
);
}
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize stacks to toml")?,
);
}
for build in &resources.builds {
if !res.is_empty() {
res.push_str("\n\n##\n\n");

View File

@@ -13,10 +13,9 @@ use monitor_client::{
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
},
update::{ResourceTarget, Update, UpdateListItem},
user::User,
},
};
@@ -29,7 +28,6 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
@@ -45,58 +43,121 @@ impl Resolve<ListUpdates, User> for State {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Server,
let server_query =
resource::get_resource_ids_for_user::<Server>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
resource::get_resource_ids_for_user::<Deployment>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query =
resource::get_resource_ids_for_user::<Stack>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query =
resource::get_resource_ids_for_user::<Build>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
resource::get_resource_ids_for_user::<Repo>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
resource::get_resource_ids_for_user::<Procedure>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query =
resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query = resource::get_resource_ids_for_user::<ServerTemplate>(
&user,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query = resource::get_resource_ids_for_user::<ResourceSync>(
&user,
)
.await?;
let build_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Build,
)
.await?;
let repo_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Repo,
)
.await?;
let procedure_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Procedure,
)
.await?;
let builder_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?;
let alerter_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?;
let server_template_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?;
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = query.unwrap_or_default();
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
{ "target.type": "ServerTemplate", "target.id": { "$in": &server_template_ids } },
server_query,
deployment_query,
stack_query,
build_query,
repo_query,
procedure_query,
alerter_query,
builder_query,
server_template_query,
resource_sync_query,
]
});
query.into()
@@ -247,6 +308,14 @@ impl Resolve<GetUpdate, User> for State {
)
.await?;
}
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
}
Ok(update)
}

View File

@@ -1,9 +1,10 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{
GetUsername, GetUsernameResponse, ListApiKeys,
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
ListApiKeysResponse, ListUsers, ListUsersResponse,
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
ListApiKeys, ListApiKeysForServiceUser,
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
};
@@ -14,7 +15,10 @@ use mungos::{
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
impl Resolve<GetUsername, User> for State {
async fn resolve(
@@ -40,6 +44,19 @@ impl Resolve<GetUsername, User> for State {
}
}
impl Resolve<FindUser, User> for State {
async fn resolve(
&self,
FindUser { user }: FindUser,
admin: User,
) -> anyhow::Result<FindUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
get_user(&user).await
}
}
impl Resolve<ListUsers, User> for State {
async fn resolve(
&self,
@@ -87,22 +104,21 @@ impl Resolve<ListApiKeys, User> for State {
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
ListApiKeysForServiceUser { user }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for users")?
.context("user at id not found")?;
let user = get_user(&user).await?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": user_id },
doc! { "user_id": &user.id },
None,
)
.await

View File

@@ -37,7 +37,7 @@ impl Resolve<GetUserGroup, User> for State {
db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")

View File

@@ -11,7 +11,6 @@ use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_variable,
state::{db_client, State},
};
@@ -32,16 +31,12 @@ impl Resolve<ListVariables, User> for State {
ListVariables {}: ListVariables,
_: User,
) -> anyhow::Result<ListVariablesResponse> {
let variables = find_collect(
find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")?;
Ok(ListVariablesResponse {
variables,
secrets: core_config().secrets.keys().cloned().collect(),
})
.context("failed to query db for variables")
}
}

View File

@@ -11,10 +11,7 @@ use monitor_client::{
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
user::User,
},
entities::{api_key::ApiKey, monitor_timestamp, user::User},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
@@ -23,8 +20,8 @@ use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::{auth_request, random_string},
helpers::query::get_user,
auth::auth_request,
helpers::{query::get_user, random_string},
state::{db_client, State},
};
@@ -90,33 +87,21 @@ impl Resolve<PushRecentlyViewed, User> for State {
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
let (resource_type, id) = resource.extract_variant_id();
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { format!("recents.{resource_type}"): to_bson(&recents)? }
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
None => {
doc! { format!("recents.{resource_type}"): [id] }
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
@@ -124,7 +109,9 @@ impl Resolve<PushRecentlyViewed, User> for State {
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
.with_context(|| {
format!("failed to update recents.{resource_type}")
})?;
Ok(PushRecentlyViewedResponse {})
}
@@ -187,7 +174,7 @@ impl Resolve<CreateApiKey, User> for State {
db_client()
.await
.api_keys
.insert_one(api_key, None)
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
@@ -208,7 +195,7 @@ impl Resolve<DeleteApiKey, User> for State {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
@@ -217,7 +204,7 @@ impl Resolve<DeleteApiKey, User> for State {
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})

View File

@@ -1,10 +1,27 @@
use anyhow::{anyhow, Context};
use mongo_indexed::doc;
use monitor_client::{
api::write::*,
entities::{build::Build, permission::PermissionLevel, user::User},
entities::{
build::{Build, BuildInfo, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
user::User,
CloneArgs, NoData,
},
};
use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
helpers::{git_token, random_string},
resource,
state::{db_client, github_client, State},
};
impl Resolve<CreateBuild, User> for State {
#[instrument(name = "CreateBuild", skip(self, user))]
@@ -24,13 +41,15 @@ impl Resolve<CopyBuild, User> for State {
CopyBuild { name, id }: CopyBuild,
user: User,
) -> anyhow::Result<Build> {
let Build { config, .. } =
let Build { mut config, .. } =
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// reset version to 0.0.0
config.version = Default::default();
resource::create::<Build>(&name, config.into(), &user).await
}
}
@@ -56,3 +75,267 @@ impl Resolve<UpdateBuild, User> for State {
resource::update::<Build>(&id, config, &user).await
}
}
impl Resolve<RefreshBuildCache, User> for State {
#[instrument(name = "RefreshBuildCache", skip(self, user))]
async fn resolve(
&self,
RefreshBuildCache { build }: RefreshBuildCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// build should be able to do this.
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
let config = core_config();
let repo_dir = config.repo_directory.join(random_string(10));
let mut clone_args: CloneArgs = (&build).into();
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
clone_args.destination = Some(repo_dir.display().to_string());
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
let (_, latest_hash, latest_message, _) = git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
)
.await
.context("failed to clone build repo")?;
let info = BuildInfo {
last_built_at: build.info.last_built_at,
built_hash: build.info.built_hash,
built_message: build.info.built_message,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize build info to bson")?;
db_client()
.await
.builds
.update_one(
doc! { "name": &build.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update build info on db")?;
if repo_dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_dir) {
warn!("failed to remove build cache update repo directory | {e:?}")
}
}
Ok(NoData {})
}
}
impl Resolve<CreateBuildWebhook, User> for State {
#[instrument(name = "CreateBuildWebhook", skip(self, user))]
async fn resolve(
&self,
CreateBuildWebhook { build }: CreateBuildWebhook,
user: User,
) -> anyhow::Result<CreateBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !build.config.webhook_enabled {
self
.resolve(
UpdateBuild {
id: build.id,
config: PartialBuildConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update build to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteBuildWebhook, User> for State {
#[instrument(name = "DeleteBuildWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteBuildWebhook { build }: DeleteBuildWebhook,
user: User,
) -> anyhow::Result<DeleteBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't delete webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -4,7 +4,7 @@ use monitor_client::{
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, update::ResourceTarget, user::User,
},
};
@@ -100,6 +100,14 @@ impl Resolve<UpdateDescription, User> for State {
)
.await?;
}
ResourceTarget::Stack(id) => {
resource::update_description::<Stack>(
&id,
&description,
&user,
)
.await?;
}
}
Ok(UpdateDescriptionResponse {})
}

View File

@@ -19,10 +19,12 @@ mod deployment;
mod description;
mod permissions;
mod procedure;
mod provider;
mod repo;
mod server;
mod server_template;
mod service_user;
mod stack;
mod sync;
mod tag;
mod user_group;
@@ -50,6 +52,7 @@ pub enum WriteRequest {
// ==== PERMISSIONS ====
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
@@ -75,6 +78,9 @@ pub enum WriteRequest {
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
RefreshBuildCache(RefreshBuildCache),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
@@ -93,6 +99,9 @@ pub enum WriteRequest {
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
RefreshRepoCache(RefreshRepoCache),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
@@ -112,6 +121,18 @@ pub enum WriteRequest {
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== STACK ====
CreateStack(CreateStack),
CopyStack(CopyStack),
DeleteStack(DeleteStack),
UpdateStack(UpdateStack),
RenameStack(RenameStack),
RefreshStackCache(RefreshStackCache),
CreateStackWebhook(CreateStackWebhook),
DeleteStackWebhook(DeleteStackWebhook),
// ==== TAG ====
CreateTag(CreateTag),
@@ -124,6 +145,14 @@ pub enum WriteRequest {
UpdateVariableValue(UpdateVariableValue),
UpdateVariableDescription(UpdateVariableDescription),
DeleteVariable(DeleteVariable),
// ==== PROVIDERS ====
CreateGitProviderAccount(CreateGitProviderAccount),
UpdateGitProviderAccount(UpdateGitProviderAccount),
DeleteGitProviderAccount(DeleteGitProviderAccount),
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
}
pub fn router() -> Router {

View File

@@ -3,8 +3,10 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
UpdatePermissionOnResourceType,
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
UpdatePermissionOnTargetResponse, UpdateUserBasePermissions,
UpdateUserBasePermissionsResponse,
},
entities::{
permission::{UserTarget, UserTargetVariant},
@@ -41,6 +43,7 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query mongo for user")?
@@ -73,6 +76,73 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
}
}
impl Resolve<UpdatePermissionOnResourceType, User> for State {
#[instrument(
name = "UpdatePermissionOnResourceType",
skip(self, admin)
)]
async fn resolve(
&self,
UpdatePermissionOnResourceType {
user_target,
resource_type,
permission,
}: UpdatePermissionOnResourceType,
admin: User,
) -> anyhow::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
if !user.enabled {
return Err(anyhow!("user not enabled"));
}
}
let (user_target_variant, user_target_id) =
extract_user_target_with_validation(&user_target).await?;
let id = ObjectId::from_str(&user_target_id)
.context("id is not ObjectId")?;
let field = format!("all.{resource_type}");
let filter = doc! { "_id": id };
let update = doc! { "$set": { &field: permission.as_ref() } };
match user_target_variant {
UserTargetVariant::User => {
db_client()
.await
.users
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
UserTargetVariant::UserGroup => {
db_client()
.await
.user_groups
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
}
Ok(UpdatePermissionOnResourceTypeResponse {})
}
}
impl Resolve<UpdatePermissionOnTarget, User> for State {
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
async fn resolve(
@@ -129,8 +199,8 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.with_options(UpdateOptions::builder().upsert(true).build())
.await?;
Ok(UpdatePermissionOnTargetResponse {})
@@ -150,7 +220,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for users")?
.context("no matching user found")?
@@ -165,7 +235,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
@@ -192,7 +262,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builds
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
@@ -207,7 +277,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builders
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
@@ -222,7 +292,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.deployments
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
@@ -237,7 +307,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.servers
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
@@ -252,7 +322,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.repos
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
@@ -267,7 +337,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.alerters
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
@@ -282,7 +352,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.procedures
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
@@ -297,7 +367,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
@@ -312,12 +382,27 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?
.id;
Ok((ResourceTargetVariant::ResourceSync, id))
}
ResourceTarget::Stack(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.stacks
.find_one(filter)
.await
.context("failed to query db for stacks")?
.context("no matching stack found")?
.id;
Ok((ResourceTargetVariant::Stack, id))
}
}
}

View File

@@ -0,0 +1,402 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{
provider::{DockerRegistryAccount, GitProviderAccount},
update::ResourceTarget,
user::User,
Operation,
},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
mongodb::bson::{doc, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::update::{add_update, make_update},
state::{db_client, State},
};
impl Resolve<CreateGitProviderAccount, User> for State {
async fn resolve(
&self,
CreateGitProviderAccount { account }: CreateGitProviderAccount,
user: User,
) -> anyhow::Result<CreateGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can create git provider accounts"
));
}
let mut account: GitProviderAccount = account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string."));
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string."));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateGitProviderAccount,
&user,
);
account.id = db_client()
.await
.git_accounts
.insert_one(&account)
.await
.context("failed to create git provider account on db")?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create git provider account",
format!(
"Created git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<UpdateGitProviderAccount, User> for State {
async fn resolve(
&self,
UpdateGitProviderAccount { id, mut account }: UpdateGitProviderAccount,
user: User,
) -> anyhow::Result<UpdateGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can update git provider accounts"
));
}
if let Some(domain) = &account.domain {
if domain.is_empty() {
return Err(anyhow!(
"cannot update git provider with empty domain"
));
}
}
if let Some(username) = &account.username {
if username.is_empty() {
return Err(anyhow!(
"cannot update git provider with empty username"
));
}
}
// Ensure update does not change id
account.id = None;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
);
let account = to_document(&account).context(
"failed to serialize partial git provider account to bson",
)?;
let db = db_client().await;
update_one_by_id(
&db.git_accounts,
&id,
doc! { "$set": account },
None,
)
.await
.context("failed to update git provider account on db")?;
let Some(account) =
find_one_by_id(&db.git_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
update.push_simple_log(
"update git provider account",
format!(
"Updated git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<DeleteGitProviderAccount, User> for State {
async fn resolve(
&self,
DeleteGitProviderAccount { id }: DeleteGitProviderAccount,
user: User,
) -> anyhow::Result<DeleteGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can delete git provider accounts"
));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
);
let db = db_client().await;
let Some(account) =
find_one_by_id(&db.git_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
delete_one_by_id(&db.git_accounts, &id, None)
.await
.context("failed to delete git account on db")?;
update.push_simple_log(
"delete git provider account",
format!(
"Deleted git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<CreateDockerRegistryAccount, User> for State {
async fn resolve(
&self,
CreateDockerRegistryAccount { account }: CreateDockerRegistryAccount,
user: User,
) -> anyhow::Result<CreateDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can create docker registry account accounts"
));
}
let mut account: DockerRegistryAccount = account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string."));
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string."));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateDockerRegistryAccount,
&user,
);
account.id = db_client()
.await
.registry_accounts
.insert_one(&account)
.await
.context(
"failed to create docker registry account account on db",
)?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create docker registry account",
format!(
"Created docker registry account account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<UpdateDockerRegistryAccount, User> for State {
async fn resolve(
&self,
UpdateDockerRegistryAccount { id, mut account }: UpdateDockerRegistryAccount,
user: User,
) -> anyhow::Result<UpdateDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can update docker registry accounts"
));
}
if let Some(domain) = &account.domain {
if domain.is_empty() {
return Err(anyhow!(
"cannot update docker registry account with empty domain"
));
}
}
if let Some(username) = &account.username {
if username.is_empty() {
return Err(anyhow!(
"cannot update docker registry account with empty username"
));
}
}
account.id = None;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
);
let account = to_document(&account).context(
"failed to serialize partial docker registry account account to bson",
)?;
let db = db_client().await;
update_one_by_id(
&db.registry_accounts,
&id,
doc! { "$set": account },
None,
)
.await
.context(
"failed to update docker registry account account on db",
)?;
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
.await
.context("failed to query db for registry accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
update.push_simple_log(
"update docker registry account",
format!(
"Updated docker registry account account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<DeleteDockerRegistryAccount, User> for State {
async fn resolve(
&self,
DeleteDockerRegistryAccount { id }: DeleteDockerRegistryAccount,
user: User,
) -> anyhow::Result<DeleteDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can delete docker registry accounts"
));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
);
let db = db_client().await;
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
delete_one_by_id(&db.registry_accounts, &id, None)
.await
.context("failed to delete registry account on db")?;
update.push_simple_log(
"delete registry account",
format!(
"Deleted registry account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}

View File

@@ -1,10 +1,27 @@
use anyhow::{anyhow, Context};
use mongo_indexed::doc;
use monitor_client::{
api::write::*,
entities::{permission::PermissionLevel, repo::Repo, user::User},
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo, RepoInfo},
user::User,
CloneArgs, NoData,
},
};
use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{resource, state::State};
use crate::{
config::core_config,
helpers::{git_token, random_string},
resource,
state::{db_client, github_client, State},
};
impl Resolve<CreateRepo, User> for State {
#[instrument(name = "CreateRepo", skip(self, user))]
@@ -56,3 +73,291 @@ impl Resolve<UpdateRepo, User> for State {
resource::update::<Repo>(&id, config, &user).await
}
}
impl Resolve<RefreshRepoCache, User> for State {
#[instrument(name = "RefreshRepoCache", skip(self, user))]
async fn resolve(
&self,
RefreshRepoCache { repo }: RefreshRepoCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// repo should be able to do this.
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
let config = core_config();
let repo_dir = config.repo_directory.join(random_string(10));
let mut clone_args: CloneArgs = (&repo).into();
// No reason to to the commands here.
clone_args.on_clone = None;
clone_args.on_pull = None;
clone_args.destination = Some(repo_dir.display().to_string());
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
let (_, latest_hash, latest_message, _) = git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
)
.await
.context("failed to clone repo (the resource) repo")?;
let info = RepoInfo {
last_pulled_at: repo.info.last_pulled_at,
last_built_at: repo.info.last_built_at,
built_hash: repo.info.built_hash,
built_message: repo.info.built_message,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize repo info to bson")?;
db_client()
.await
.repos
.update_one(
doc! { "name": &repo.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update repo info on db")?;
if repo_dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_dir) {
warn!(
"failed to remove repo (resource) cache update repo directory | {e:?}"
)
}
}
Ok(NoData {})
}
}
impl Resolve<CreateRepoWebhook, User> for State {
#[instrument(name = "CreateRepoWebhook", skip(self, user))]
async fn resolve(
&self,
CreateRepoWebhook { repo, action }: CreateRepoWebhook,
user: User,
) -> anyhow::Result<CreateRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo_name, &request)
.await
.context("failed to create webhook")?;
if !repo.config.webhook_enabled {
self
.resolve(
UpdateRepo {
id: repo.id,
config: PartialRepoConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update repo to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteRepoWebhook, User> for State {
#[instrument(name = "DeleteRepoWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteRepoWebhook { repo, action }: DeleteRepoWebhook,
user: User,
) -> anyhow::Result<DeleteRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo_name, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -51,17 +51,14 @@ impl Resolve<CreateServiceUser, User> for State {
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user, None)
.insert_one(&user)
.await
.context("failed to create service user on db")?
.inserted_id
@@ -91,7 +88,7 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("no user with given username")?;
@@ -102,12 +99,11 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("user with username not found")
@@ -155,7 +151,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.find_one(doc! { "key": &key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
@@ -168,7 +164,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.delete_one(doc! { "key": key })
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})

View File

@@ -0,0 +1,488 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
config::core::CoreConfig,
monitor_timestamp,
permission::PermissionLevel,
stack::{PartialStackConfig, Stack, StackInfo},
update::Update,
user::User,
NoData, Operation,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
stack::{
remote::get_remote_compose_contents,
services::extract_services_into_res,
},
update::{add_update, make_update},
},
monitor::update_cache_for_stack,
resource,
state::{db_client, github_client, State},
};
impl Resolve<CreateStack, User> for State {
#[instrument(name = "CreateStack", skip(self, user))]
async fn resolve(
&self,
CreateStack { name, config }: CreateStack,
user: User,
) -> anyhow::Result<Stack> {
let res = resource::create::<Stack>(&name, config, &user).await;
if let Ok(stack) = &res {
if let Err(e) = self
.resolve(RefreshStackCache { stack: name }, user.clone())
.await
{
let mut update =
make_update(stack, Operation::RefreshStackCache, &user);
update.push_error_log(
"refresh stack cache",
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
);
add_update(update).await.ok();
};
update_cache_for_stack(stack).await;
}
res
}
}
impl Resolve<CopyStack, User> for State {
#[instrument(name = "CopyStack", skip(self, user))]
async fn resolve(
&self,
CopyStack { name, id }: CopyStack,
user: User,
) -> anyhow::Result<Stack> {
let Stack { config, .. } =
resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let res =
resource::create::<Stack>(&name, config.into(), &user).await;
if let Ok(stack) = &res {
if let Err(e) = self
.resolve(RefreshStackCache { stack: name }, user.clone())
.await
{
let mut update =
make_update(stack, Operation::RefreshStackCache, &user);
update.push_error_log(
"refresh stack cache",
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
);
add_update(update).await.ok();
};
update_cache_for_stack(stack).await;
}
res
}
}
impl Resolve<DeleteStack, User> for State {
#[instrument(name = "DeleteStack", skip(self, user))]
async fn resolve(
&self,
DeleteStack { id }: DeleteStack,
user: User,
) -> anyhow::Result<Stack> {
resource::delete::<Stack>(&id, &user).await
}
}
impl Resolve<UpdateStack, User> for State {
#[instrument(name = "UpdateStack", skip(self, user))]
async fn resolve(
&self,
UpdateStack { id, config }: UpdateStack,
user: User,
) -> anyhow::Result<Stack> {
let res = resource::update::<Stack>(&id, config, &user).await;
if let Ok(stack) = &res {
if let Err(e) = self
.resolve(RefreshStackCache { stack: id }, user.clone())
.await
{
let mut update =
make_update(stack, Operation::RefreshStackCache, &user);
update.push_error_log(
"refresh stack cache",
format_serror(&e.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
);
add_update(update).await.ok();
};
update_cache_for_stack(stack).await;
}
res
}
}
impl Resolve<RenameStack, User> for State {
#[instrument(name = "RenameStack", skip(self, user))]
async fn resolve(
&self,
RenameStack { id, name }: RenameStack,
user: User,
) -> anyhow::Result<Update> {
let stack = resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&stack, Operation::RenameStack, &user);
update_one_by_id(
&db_client().await.stacks,
&stack.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": monitor_timestamp() },
),
None,
)
.await
.context("failed to update stack name on db")?;
update.push_simple_log(
"rename stack",
format!("renamed stack from {} to {}", stack.name, name),
);
update.finalize();
add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RefreshStackCache, User> for State {
#[instrument(
name = "RefreshStackCache",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
RefreshStackCache { stack }: RefreshStackCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// stack should be able to do this.
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Execute,
)
.await?;
let file_contents_empty = stack.config.file_contents.is_empty();
if file_contents_empty && stack.config.repo.is_empty() {
// Nothing to do without one of these
return Ok(NoData {});
}
let mut missing_files = Vec::new();
let (
latest_services,
remote_contents,
remote_errors,
latest_hash,
latest_message,
) = if file_contents_empty {
// REPO BASED STACK
let (
remote_contents,
remote_errors,
_,
latest_hash,
latest_message,
) =
get_remote_compose_contents(&stack, Some(&mut missing_files))
.await
.context("failed to clone remote compose file")?;
let project_name = stack.project_name(true);
let mut services = Vec::new();
for contents in &remote_contents {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
}
}
(
services,
Some(remote_contents),
Some(remote_errors),
latest_hash,
latest_message,
)
} else {
let mut services = Vec::new();
if let Err(e) = extract_services_into_res(
// this should latest (not deployed), so make the project name fresh.
&stack.project_name(true),
&stack.config.file_contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
services.extend(stack.info.latest_services);
};
(services, None, None, None, None)
};
let info = StackInfo {
missing_files,
deployed_services: stack.info.deployed_services,
deployed_project_name: stack.info.deployed_project_name,
deployed_contents: stack.info.deployed_contents,
deployed_hash: stack.info.deployed_hash,
deployed_message: stack.info.deployed_message,
latest_services,
remote_contents,
remote_errors,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize stack info to bson")?;
db_client()
.await
.stacks
.update_one(
doc! { "name": &stack.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update stack info on db")?;
Ok(NoData {})
}
}
impl Resolve<CreateStackWebhook, User> for State {
#[instrument(name = "CreateStackWebhook", skip(self, user))]
async fn resolve(
&self,
CreateStackWebhook { stack, action }: CreateStackWebhook,
user: User,
) -> anyhow::Result<CreateStackWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Write,
)
.await?;
if stack.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Stack repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !stack.config.webhook_enabled {
self
.resolve(
UpdateStack {
id: stack.id,
config: PartialStackConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update stack to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteStackWebhook, User> for State {
#[instrument(name = "DeleteStackWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteStackWebhook { stack, action }: DeleteStackWebhook,
user: User,
) -> anyhow::Result<DeleteStackWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Write,
)
.await?;
if stack.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if stack.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,3 +1,5 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
@@ -8,38 +10,46 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::Builder,
config::core::CoreConfig,
deployment::Deployment,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{stats::SeverityLevel, Server},
server_template::ServerTemplate,
stack::Stack,
sync::{
PendingSyncUpdates, PendingSyncUpdatesData,
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
ResourceSync,
PartialResourceSyncConfig, PendingSyncUpdates,
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
PendingSyncUpdatesDataOk, ResourceSync,
},
update::ResourceTarget,
user::User,
NoData,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::{
deployment,
deploy::SyncDeployParams,
resource::{get_updates_for_view, AllResourcesById},
},
},
resource,
state::{db_client, State},
state::{db_client, github_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
@@ -95,6 +105,7 @@ impl Resolve<UpdateResourceSync, User> for State {
}
impl Resolve<RefreshResourceSyncPending, User> for State {
#[instrument(name = "RefreshResourceSyncPending", level = "debug", skip(self, user))]
async fn resolve(
&self,
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
@@ -118,8 +129,33 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
.context("failed to get remote resources")?;
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deploy_updates =
crate::helpers::sync::deploy::get_updates_for_view(
SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
},
)
.await;
let data = PendingSyncUpdatesDataOk {
server_updates: get_updates_for_view::<Server>(
@@ -130,7 +166,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
)
.await
.context("failed to get server updates")?,
deployment_updates: deployment::get_updates_for_view(
deployment_updates: get_updates_for_view::<Deployment>(
resources.deployments,
sync.config.delete,
&all_resources,
@@ -138,6 +174,14 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
)
.await
.context("failed to get deployment updates")?,
stack_updates: get_updates_for_view::<Stack>(
resources.stacks,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get stack updates")?,
build_updates: get_updates_for_view::<Build>(
resources.builds,
sync.config.delete,
@@ -212,6 +256,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
)
.await
.context("failed to get user group updates")?,
deploy_updates,
};
anyhow::Ok((hash, message, data))
}
@@ -262,14 +307,11 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
let Some(existing) = db_client()
.await
.alerts
.find_one(
doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.find_one(doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
})
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
@@ -290,7 +332,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.insert_one(&alert)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
@@ -323,3 +365,196 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
crate::resource::get::<ResourceSync>(&sync.id).await
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(
&self,
CreateSyncWebhook { sync, action }: CreateSyncWebhook,
user: User,
) -> anyhow::Result<CreateSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !sync.config.webhook_enabled {
self
.resolve(
UpdateResourceSync {
id: sync.id,
config: PartialResourceSyncConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update sync to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteSyncWebhook, User> for State {
#[instrument(name = "DeleteSyncWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteSyncWebhook { sync, action }: DeleteSyncWebhook,
user: User,
) -> anyhow::Result<DeleteSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -10,8 +10,8 @@ use monitor_client::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, sync::ResourceSync, tag::Tag,
update::ResourceTarget, user::User,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, update::ResourceTarget, user::User,
},
};
use mungos::{
@@ -46,7 +46,7 @@ impl Resolve<CreateTag, User> for State {
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.insert_one(&tag)
.await
.context("failed to create tag on db")?
.inserted_id
@@ -200,6 +200,15 @@ impl Resolve<UpdateTagsOnResource, User> for State {
.await?;
resource::update_tags::<ResourceSync>(&id, tags, user).await?
}
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Stack>(&id, tags, user).await?
}
};
Ok(UpdateTagsOnResourceResponse {})
}

View File

@@ -29,13 +29,14 @@ impl Resolve<CreateUserGroup, User> for State {
let user_group = UserGroup {
id: Default::default(),
users: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
name,
};
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.insert_one(user_group)
.await
.context("failed to create UserGroup on db")?
.inserted_id
@@ -99,7 +100,7 @@ impl Resolve<DeleteUserGroup, User> for State {
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
})
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
@@ -125,7 +126,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -138,12 +139,11 @@ impl Resolve<AddUserToUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -171,7 +171,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -184,12 +184,11 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
@@ -229,15 +228,11 @@ impl Resolve<SetUsersInUserGroup, User> for State {
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.update_one(filter.clone(), doc! { "$set": { "users": users } })
.await
.context("failed to add user to group on db")?;
.context("failed to set users on user group")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")

View File

@@ -44,7 +44,7 @@ impl Resolve<CreateVariable, User> for State {
db_client()
.await
.variables
.insert_one(&variable, None)
.insert_one(&variable)
.await
.context("failed to create variable on db")?;
@@ -71,7 +71,7 @@ impl Resolve<UpdateVariableValue, User> for State {
user: User,
) -> anyhow::Result<UpdateVariableValueResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
return Err(anyhow!("only admins can update variables"));
}
let variable = get_variable(&name).await?;
@@ -86,7 +86,6 @@ impl Resolve<UpdateVariableValue, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
None,
)
.await
.context("failed to update variable value on db")?;
@@ -119,7 +118,7 @@ impl Resolve<UpdateVariableDescription, User> for State {
user: User,
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
return Err(anyhow!("only admins can update variables"));
}
db_client()
.await
@@ -127,7 +126,6 @@ impl Resolve<UpdateVariableDescription, User> for State {
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
None,
)
.await
.context("failed to update variable description on db")?;
@@ -142,13 +140,13 @@ impl Resolve<DeleteVariable, User> for State {
user: User,
) -> anyhow::Result<DeleteVariableResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
return Err(anyhow!("only admins can delete variables"));
}
let variable = get_variable(&name).await?;
db_client()
.await
.variables
.delete_one(doc! { "name": &name }, None)
.delete_one(doc! { "name": &name })
.await
.context("failed to delete variable on db")?;

View File

@@ -9,8 +9,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {

View File

@@ -2,6 +2,7 @@ use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
user::{User, UserConfig},
@@ -66,7 +67,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.find_one(doc! { "config.data.github_id": &github_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -76,21 +77,18 @@ async fn callback(
None => {
let ts = monitor_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
enabled: no_users_exist,
enabled: no_users_exist || core_config().enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Github {
github_id,
avatar: github_user.avatar_url,
@@ -98,7 +96,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -11,8 +11,8 @@ use serde_json::Value;
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {

View File

@@ -3,6 +3,7 @@ use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
@@ -75,7 +76,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.find_one(doc! { "config.data.google_id": &google_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -85,7 +86,7 @@ async fn callback(
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
@@ -95,17 +96,14 @@ async fn callback(
.first()
.unwrap()
.to_string(),
enabled: no_users_exist,
enabled: no_users_exist || core_config().enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Google {
google_id,
avatar: google_user.picture,
@@ -113,7 +111,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
use super::random_string;
use crate::helpers::random_string;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
@@ -25,26 +25,31 @@ pub struct JwtClaims {
pub struct JwtClient {
pub key: Hmac<Sha256>,
valid_for_ms: u128,
ttl_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
impl JwtClient {
pub fn new(config: &CoreConfig) -> JwtClient {
let key = Hmac::new_from_slice(random_string(40).as_bytes())
.expect("failed at taking HmacSha256 of jwt secret");
JwtClient {
pub fn new(config: &CoreConfig) -> anyhow::Result<JwtClient> {
let secret = if config.jwt_secret.is_empty() {
random_string(40)
} else {
config.jwt_secret.clone()
};
let key = Hmac::new_from_slice(secret.as_bytes())
.context("failed at taking HmacSha256 of jwt secret")?;
Ok(JwtClient {
key,
valid_for_ms: get_timelength_in_ms(
config.jwt_valid_for.to_string().parse().unwrap(),
ttl_ms: get_timelength_in_ms(
config.jwt_ttl.to_string().parse()?,
),
exchange_tokens: Default::default(),
}
})
}
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.valid_for_ms;
let exp = iat + self.ttl_ms;
let claims = JwtClaims {
id: user_id,
iat,

View File

@@ -3,6 +3,7 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use mongo_indexed::Document;
use monitor_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
@@ -28,7 +29,9 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
CreateLocalUser { username, password }: CreateLocalUser,
_: HeaderMap,
) -> anyhow::Result<CreateLocalUserResponse> {
if !core_config().local_auth {
let core_config = core_config();
if !core_config.local_auth {
return Err(anyhow!("local auth is not enabled"));
}
@@ -40,13 +43,17 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
return Err(anyhow!("username cannot be valid ObjectId"));
}
if password.is_empty() {
return Err(anyhow!("password cannot be empty string"));
}
let password = bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")?;
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.find_one(Document::new())
.await?
.is_none();
@@ -55,24 +62,21 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let user = User {
id: Default::default(),
username,
enabled: no_users_exist,
enabled: no_users_exist || core_config.enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
};
let user_id = db_client()
.await
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user")?
.inserted_id
@@ -102,7 +106,7 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed at db query for users")?
.with_context(|| {

View File

@@ -7,7 +7,6 @@ use axum::{
};
use monitor_client::entities::{monitor_timestamp, user::User};
use mungos::mongodb::bson::doc;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
@@ -45,14 +44,6 @@ pub async fn auth_request(
Ok(next.run(req).await)
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
#[instrument(level = "debug")]
pub async fn get_user_id_from_headers(
headers: &HeaderMap,
@@ -127,7 +118,7 @@ pub async fn auth_api_key_get_user_id(
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.find_one(doc! { "key": key })
.await
.context("failed to query db")?
.context("no api key matching key")?;

View File

@@ -233,32 +233,30 @@ pub enum HetznerActionStatus {
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cx11")]
SharedIntel1Core2Ram20Disk,
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx21")]
#[serde(rename = "cx22")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx31")]
SharedIntel2Core8Ram80Disk,
#[serde(rename = "cx32")]
SharedIntel4Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx41")]
SharedIntel4Core16Ram160Disk,
#[serde(rename = "cx42")]
SharedIntel8Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx51")]
SharedIntel8Core32Ram240Disk,
#[serde(rename = "cx52")]
SharedIntel16Core32Ram320Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated

View File

@@ -218,9 +218,6 @@ fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedIntel1Core2Ram20Disk => {
common::HetznerServerType::SharedIntel1Core2Ram20Disk
}
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
@@ -236,8 +233,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel2Core8Ram80Disk => {
common::HetznerServerType::SharedIntel2Core8Ram80Disk
HetznerServerType::SharedIntel4Core8Ram80Disk => {
common::HetznerServerType::SharedIntel4Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
@@ -245,8 +242,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel4Core16Ram160Disk => {
common::HetznerServerType::SharedIntel4Core16Ram160Disk
HetznerServerType::SharedIntel8Core16Ram160Disk => {
common::HetznerServerType::SharedIntel8Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
@@ -254,8 +251,8 @@ fn hetzner_server_type(
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel8Core32Ram240Disk => {
common::HetznerServerType::SharedIntel8Core32Ram240Disk
HetznerServerType::SharedIntel16Core32Ram320Disk => {
common::HetznerServerType::SharedIntel16Core32Ram320Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk

View File

@@ -4,8 +4,9 @@ use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, HetznerCredentials, MongoConfig,
OauthCredentials,
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
GithubWebhookAppInstallationConfig, HetznerCredentials,
MongoConfig, OauthCredentials,
},
logger::LogConfig,
};
@@ -36,30 +37,68 @@ pub fn frontend_path() -> &'static String {
pub fn core_config() -> &'static CoreConfig {
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
CORE_CONFIG.get_or_init(|| {
let env: Env = envy::from_env()
.context("failed to parse core Env")
.unwrap();
let env: Env = match envy::from_env()
.context("failed to parse core Env") {
Ok(env) => env,
Err(e) => {
panic!("{e:#?}");
}
};
let config_path = &env.monitor_config_path;
let config =
parse_config_file::<CoreConfig>(config_path.as_str())
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
let installations = match (env.monitor_github_webhook_app_installations_ids, env.monitor_github_webhook_app_installations_namespaces) {
(Some(ids), Some(namespaces)) => {
if ids.len() != namespaces.len() {
panic!("MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
}
ids
.into_iter()
.zip(namespaces)
.map(|(id, namespace)| GithubWebhookAppInstallationConfig {
id,
namespace
})
.collect()
},
(Some(_), None) | (None, Some(_)) => {
panic!("Got only one of MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
}
(None, None) => {
config.github_webhook_app.installations
}
};
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.monitor_title.unwrap_or(config.title),
host: env.monitor_host.unwrap_or(config.host),
port: env.monitor_port.unwrap_or(config.port),
passkey: env.monitor_passkey.unwrap_or(config.passkey),
jwt_valid_for: env
.monitor_jwt_valid_for
.unwrap_or(config.jwt_valid_for),
sync_directory: env
.monitor_sync_directory
jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret),
jwt_ttl: env
.monitor_jwt_ttl
.unwrap_or(config.jwt_ttl),
repo_directory: env
.monitor_repo_directory
.map(|dir|
dir.parse()
.context("failed to parse env MONITOR_SYNC_DIRECTORY as valid path").unwrap())
.unwrap_or(config.sync_directory),
.context("failed to parse env MONITOR_REPO_DIRECTORY as valid path").unwrap())
.unwrap_or(config.repo_directory),
stack_poll_interval: env
.monitor_stack_poll_interval
.unwrap_or(config.stack_poll_interval),
sync_poll_interval: env
.monitor_sync_poll_interval
.unwrap_or(config.sync_poll_interval),
build_poll_interval: env
.monitor_build_poll_interval
.unwrap_or(config.build_poll_interval),
repo_poll_interval: env
.monitor_repo_poll_interval
.unwrap_or(config.repo_poll_interval),
monitoring_interval: env
.monitor_monitoring_interval
.unwrap_or(config.monitoring_interval),
@@ -69,23 +108,20 @@ pub fn core_config() -> &'static CoreConfig {
keep_alerts_for_days: env
.monitor_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days),
github_webhook_secret: env
.monitor_github_webhook_secret
.unwrap_or(config.github_webhook_secret),
github_webhook_base_url: env
.monitor_github_webhook_base_url
.or(config.github_webhook_base_url),
github_organizations: env.monitor_github_organizations
.unwrap_or(config.github_organizations),
docker_organizations: env
.monitor_docker_organizations
.unwrap_or(config.docker_organizations),
webhook_secret: env
.monitor_webhook_secret
.unwrap_or(config.webhook_secret),
webhook_base_url: env
.monitor_webhook_base_url
.or(config.webhook_base_url),
transparent_mode: env
.monitor_transparent_mode
.unwrap_or(config.transparent_mode),
ui_write_disabled: env
.monitor_ui_write_disabled
.unwrap_or(config.ui_write_disabled),
enable_new_users: env.monitor_enable_new_users
.unwrap_or(config.enable_new_users),
local_auth: env.monitor_local_auth.unwrap_or(config.local_auth),
google_oauth: OauthCredentials {
enabled: env
@@ -109,6 +145,15 @@ pub fn core_config() -> &'static CoreConfig {
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: env
.monitor_github_webhook_app_app_id
.unwrap_or(config.github_webhook_app.app_id),
pk_path: env
.monitor_github_webhook_app_pk_path
.unwrap_or(config.github_webhook_app.pk_path),
installations,
},
aws: AwsCredentials {
access_key_id: env
.monitor_aws_access_key_id
@@ -155,8 +200,8 @@ pub fn core_config() -> &'static CoreConfig {
// These can't be overridden on env
secrets: config.secrets,
github_accounts: config.github_accounts,
docker_accounts: config.docker_accounts,
git_providers: config.git_providers,
docker_registries: config.docker_registries,
aws_ecr_registries: config.aws_ecr_registries,
}
})

View File

@@ -9,9 +9,11 @@ use monitor_client::entities::{
deployment::Deployment,
permission::Permission,
procedure::Procedure,
provider::{DockerRegistryAccount, GitProviderAccount},
repo::Repo,
server::{stats::SystemStatsRecord, Server},
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
tag::Tag,
update::Update,
@@ -31,6 +33,8 @@ pub struct DbClient {
pub api_keys: Collection<ApiKey>,
pub tags: Collection<Tag>,
pub variables: Collection<Variable>,
pub git_accounts: Collection<GitProviderAccount>,
pub registry_accounts: Collection<DockerRegistryAccount>,
pub updates: Collection<Update>,
pub alerts: Collection<Alert>,
pub stats: Collection<SystemStatsRecord>,
@@ -44,6 +48,7 @@ pub struct DbClient {
pub alerters: Collection<Alerter>,
pub server_templates: Collection<ServerTemplate>,
pub resource_syncs: Collection<ResourceSync>,
pub stacks: Collection<Stack>,
//
pub db: Database,
}
@@ -90,6 +95,8 @@ impl DbClient {
api_keys: mongo_indexed::collection(&db, true).await?,
tags: mongo_indexed::collection(&db, true).await?,
variables: mongo_indexed::collection(&db, true).await?,
git_accounts: mongo_indexed::collection(&db, true).await?,
registry_accounts: mongo_indexed::collection(&db, true).await?,
updates: mongo_indexed::collection(&db, true).await?,
alerts: mongo_indexed::collection(&db, true).await?,
stats: mongo_indexed::collection(&db, true).await?,
@@ -105,6 +112,7 @@ impl DbClient {
.await?,
resource_syncs: resource_collection(&db, "ResourceSync")
.await?,
stacks: resource_collection(&db, "Stack").await?,
//
db,
};
@@ -112,7 +120,7 @@ impl DbClient {
}
}
async fn resource_collection<T>(
async fn resource_collection<T: Send + Sync>(
db: &Database,
collection_name: &str,
) -> anyhow::Result<Collection<T>> {

View File

@@ -6,7 +6,8 @@ use monitor_client::{
entities::{
build::BuildActionState, deployment::DeploymentActionState,
procedure::ProcedureActionState, repo::RepoActionState,
server::ServerActionState, sync::ResourceSyncActionState,
server::ServerActionState, stack::StackActionState,
sync::ResourceSyncActionState,
},
};
@@ -23,6 +24,7 @@ pub struct ActionStates {
Cache<String, Arc<ActionState<ProcedureActionState>>>,
pub resource_sync:
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
}
/// Need to be able to check "busy" with write lock acquired.

View File

@@ -6,6 +6,7 @@ use monitor_client::entities::{
alerter::*,
deployment::DeploymentState,
server::stats::SeverityLevel,
stack::StackState,
update::ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
@@ -19,20 +20,18 @@ pub async fn send_alerts(alerts: &[Alert]) {
return;
}
let alerters = match find_collect(
let Ok(alerters) = find_collect(
&db_client().await.alerters,
doc! { "config.enabled": true },
None,
)
.await
{
Ok(alerters) => alerters,
Err(e) => {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
return;
}
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
let handles =
@@ -307,7 +306,7 @@ async fn send_slack_alert(
..
} => {
let to = fmt_docker_container_state(to);
let text = format!("📦 container *{name}* is now {to}");
let text = format!("📦 Container *{name}* is now {to}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
@@ -320,6 +319,28 @@ async fn send_slack_alert(
];
(text, blocks.into())
}
AlertData::StackStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_stack_state(to);
let text = format!("🥞 Stack *{name}* is now {to}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
id,
)),
];
(text, blocks.into())
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
@@ -337,7 +358,7 @@ async fn send_slack_alert(
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | There are pending resource sync updates");
format!("{level} | Pending resource sync updates on {name}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
@@ -350,34 +371,32 @@ async fn send_slack_alert(
];
(text, blocks.into())
}
AlertData::BuildFailed {
id,
name,
version,
err,
} => {
AlertData::BuildFailed { id, name, version } => {
let text = format!("{level} | Build {name} has failed");
let err = err
.as_ref()
.map(|log| {
let stdout = (!log.stdout.is_empty())
.then(|| format!("\nstdout: {}", log.stdout))
.unwrap_or_default();
let stderr = (!log.stderr.is_empty())
.then(|| format!("\nstderr: {}", log.stderr))
.unwrap_or_default();
format!("\nfailed at stage: {}{stdout}{stderr}", log.stage)
})
.unwrap_or_default();
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}{err}",
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}",
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
AlertData::RepoBuildFailed { id, name } => {
let text =
format!("{level} | Repo build for {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"repo id: *{id}*\nrepo name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Repo,
id,
)),
];
(text, blocks.into())
}
AlertData::None {} => Default::default(),
};
if !text.is_empty() {
@@ -404,6 +423,16 @@ fn fmt_docker_container_state(state: &DeploymentState) -> String {
}
}
fn fmt_stack_state(state: &StackState) -> String {
match state {
StackState::Running => String::from("Running ▶️"),
StackState::Stopped => String::from("Stopped 🛑"),
StackState::Restarting => String::from("Restarting 🔄"),
StackState::Down => String::from("Down ⬇️"),
_ => state.to_string(),
}
}
fn fmt_level(level: SeverityLevel) -> &'static str {
match level {
SeverityLevel::Critical => "CRITICAL 🚨",
@@ -425,6 +454,9 @@ fn resource_link(
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Stack => {
format!("/stacks/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}

View File

@@ -0,0 +1,49 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
api::write::RefreshBuildCache, entities::user::build_user,
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
config::core_config,
state::{db_client, State},
};
pub fn spawn_build_refresh_loop() {
let interval: Timelength = core_config()
.build_poll_interval
.try_into()
.expect("Invalid build poll interval");
tokio::spawn(async move {
refresh_builds().await;
loop {
wait_until_timelength(interval, 2000).await;
refresh_builds().await;
}
});
}
async fn refresh_builds() {
let Ok(builds) =
find_collect(&db_client().await.builds, None, None)
.await
.inspect_err(|e| {
warn!("failed to get builds from db in refresh task | {e:#}")
})
else {
return;
};
for build in builds {
State
.resolve(
RefreshBuildCache { build: build.id },
build_user().clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh build cache in refresh task | build: {} | {e:#}", build.name)
})
.ok();
}
}

View File

@@ -0,0 +1,211 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use formatting::muted;
use monitor_client::entities::{
builder::{AwsBuilderConfig, Builder, BuilderConfig},
monitor_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
Version,
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
};
use crate::{
cloud::{
aws::ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
BuildCleanupData,
},
config::core_config,
helpers::update::update_update,
resource,
};
use super::periphery_client;
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument(skip_all, fields(builder_id = builder.id, update_id = update.id))]
pub async fn get_builder_periphery(
// build: &Build,
resource_name: String,
version: Option<Version>,
builder: Builder,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
match builder.config {
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(&resource_name, version, config, update).await
}
}
}
#[instrument(skip_all, fields(resource_name, update_id = update.id))]
async fn get_aws_builder(
resource_name: &str,
version: Option<Version>,
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = monitor_timestamp();
let version = version.map(|v| format!("-v{v}")).unwrap_or_default();
let instance_name = format!("BUILDER-{resource_name}{version}");
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(log);
update_update(update.clone()).await?;
let periphery_address = format!("http://{ip}:{}", config.port);
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = monitor_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = periphery
.request(api::GetVersion {})
.await
.context("failed to reach periphery client on builder");
if let Ok(GetVersionResponse { version }) = &version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!(
"established contact with periphery on builder\nperiphery version: v{}",
version
),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(connect_log);
update_update(update.clone()).await?;
return Ok((
periphery,
BuildCleanupData::Aws {
instance_id,
region: config.region,
},
));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
// Spawn terminate task in failure case (if loop is passed without return)
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
.await;
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(
res.err().unwrap().context(
"failed to start usable builder. terminating instance.",
),
)
}
#[instrument(skip(periphery, update))]
pub async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(region, &_instance_id)
.await;
});
update.push_simple_log(
"terminate instance",
format!("termination queued for instance id {instance_id}"),
);
}
}
}
pub fn start_aws_builder_log(
instance_id: &str,
ip: &str,
config: &AwsBuilderConfig,
) -> String {
let AwsBuilderConfig {
ami_id,
instance_type,
volume_gb,
subnet_id,
assign_public_ip,
security_group_ids,
use_public_ip,
..
} = config;
let readable_sec_group_ids = security_group_ids.join(", ");
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}

View File

@@ -12,6 +12,15 @@ pub fn build_cancel_channel(
BUILD_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
}
/// A channel sending (repo_id, update_id)
pub fn repo_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
static REPO_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();
REPO_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
}
pub fn update_channel() -> &'static BroadcastChannel<UpdateListItem> {
static UPDATE_CHANNEL: OnceLock<BroadcastChannel<UpdateListItem>> =
OnceLock::new();

View File

@@ -1,26 +1,32 @@
use std::time::Duration;
use std::{collections::HashSet, time::Duration};
use anyhow::{anyhow, Context};
use mongo_indexed::Document;
use monitor_client::entities::{
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
update::ResourceTarget,
update::{Log, ResourceTarget, Update},
user::User,
EnvironmentVar,
};
use mungos::mongodb::bson::{doc, Bson};
use mungos::mongodb::bson::{doc, to_document, Bson};
use periphery_client::PeripheryClient;
use rand::{thread_rng, Rng};
use query::get_global_variables;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use crate::{config::core_config, state::db_client};
pub mod action_state;
pub mod alert;
pub mod build;
pub mod builder;
pub mod cache;
pub mod channel;
pub mod procedure;
pub mod prune;
pub mod query;
pub mod repo;
pub mod stack;
pub mod sync;
pub mod update;
@@ -42,6 +48,78 @@ pub fn random_duration(min_ms: u64, max_ms: u64) -> Duration {
Duration::from_millis(thread_rng().gen_range(min_ms..max_ms))
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
/// Returns (token, use_https)
pub async fn git_token(
provider_domain: &str,
account_username: &str,
mut on_https_found: impl FnMut(bool),
) -> anyhow::Result<Option<String>> {
let db_provider = db_client()
.await
.git_accounts
.find_one(doc! { "domain": provider_domain, "username": account_username })
.await
.context("failed to query db for git provider accounts")?;
if let Some(provider) = db_provider {
on_https_found(provider.https);
return Ok(Some(provider.token));
}
Ok(
core_config()
.git_providers
.iter()
.find(|provider| provider.domain == provider_domain)
.and_then(|provider| {
on_https_found(provider.https);
provider
.accounts
.iter()
.find(|account| account.username == account_username)
.map(|account| account.token.clone())
}),
)
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
pub async fn registry_token(
provider_domain: &str,
account_username: &str,
) -> anyhow::Result<Option<String>> {
let provider = db_client()
.await
.registry_accounts
.find_one(doc! { "domain": provider_domain, "username": account_username })
.await
.context("failed to query db for docker registry accounts")?;
if let Some(provider) = provider {
return Ok(Some(provider.token));
}
Ok(
core_config()
.docker_registries
.iter()
.find(|provider| provider.domain == provider_domain)
.and_then(|provider| {
provider
.accounts
.iter()
.find(|account| account.username == account_username)
.map(|account| account.token.clone())
}),
)
}
#[instrument]
pub async fn remove_from_recently_viewed<T>(resource: T)
where
@@ -62,7 +140,6 @@ where
}
}
},
None,
)
.await
.context("failed to remove resource from users recently viewed")
@@ -104,15 +181,12 @@ pub async fn create_permission<T>(
if let Err(e) = db_client()
.await
.permissions
.insert_one(
Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
},
None,
)
.insert_one(Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
})
.await
{
error!("failed to create permission for {target:?} | {e:#}");
@@ -138,3 +212,99 @@ pub fn flatten_document(doc: Document) -> Document {
target
}
/// Returns the secret replacers
pub async fn interpolate_variables_secrets_into_environment(
environment: &mut Vec<EnvironmentVar>,
update: &mut Update,
) -> anyhow::Result<HashSet<(String, String)>> {
// Interpolate variables into environment
let variables = get_global_variables().await?;
let core_config = core_config();
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for env in environment {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables - {}",
env.variable
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
Ok(secret_replacers)
}
/// Run on startup, as no updates should be in progress on startup
pub async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"monitor shutdown",
String::from("Monitor shutdown during execution. If this is a build, the builder may not have been terminated.")
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
if let Err(e) = db_client()
.await
.updates
.update_many(
doc! { "status": "InProgress" },
doc! {
"$set": {
"status": "Complete",
"success": false,
},
"$push": {
"logs": log
}
},
)
.await
{
error!("failed to cleanup in progress updates on startup | {e:#}")
}
}

View File

@@ -1,18 +1,24 @@
use std::time::{Duration, Instant};
use anyhow::{anyhow, Context, Ok};
use formatting::{bold, colored, muted, Color};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::execute::Execution,
entities::{
procedure::Procedure, update::Update, user::procedure_user,
procedure::Procedure,
update::{Log, Update},
user::procedure_user,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{api::execute::ExecuteRequest, state::State};
use crate::{
api::execute::ExecuteRequest,
state::{db_client, State},
};
use super::update::{init_execution_update, update_update};
@@ -49,8 +55,7 @@ pub async fn execute_procedure(
.await
.with_context(|| {
format!(
"{}: failed stage '{}' execution after {:?}",
colored("ERROR", Color::Red),
"failed stage '{}' execution after {:?}",
bold(&stage.name),
timer.elapsed(),
)
@@ -130,10 +135,15 @@ async fn execute_execution(
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunProcedure")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunProcedure"),
&update_id,
)
.await?
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
@@ -141,10 +151,31 @@ async fn execute_execution(
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunBuild")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunBuild"),
&update_id,
)
.await?
}
Execution::CancelBuild(req) => {
let req = ExecuteRequest::CancelBuild(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CancelBuild(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CancelBuild"),
&update_id,
)
.await?
}
Execution::Deploy(req) => {
let req = ExecuteRequest::Deploy(req);
@@ -152,10 +183,15 @@ async fn execute_execution(
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at Deploy")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at Deploy"),
&update_id,
)
.await?
}
Execution::StartContainer(req) => {
let req = ExecuteRequest::StartContainer(req);
@@ -163,10 +199,63 @@ async fn execute_execution(
let ExecuteRequest::StartContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StartContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StartContainer"),
&update_id,
)
.await?
}
Execution::RestartContainer(req) => {
let req = ExecuteRequest::RestartContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RestartContainer(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RestartContainer"),
&update_id,
)
.await?
}
Execution::PauseContainer(req) => {
let req = ExecuteRequest::PauseContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PauseContainer(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PauseContainer"),
&update_id,
)
.await?
}
Execution::UnpauseContainer(req) => {
let req = ExecuteRequest::UnpauseContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::UnpauseContainer(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at UnpauseContainer"),
&update_id,
)
.await?
}
Execution::StopContainer(req) => {
let req = ExecuteRequest::StopContainer(req);
@@ -174,10 +263,15 @@ async fn execute_execution(
let ExecuteRequest::StopContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopContainer"),
&update_id,
)
.await?
}
Execution::StopAllContainers(req) => {
let req = ExecuteRequest::StopAllContainers(req);
@@ -185,10 +279,15 @@ async fn execute_execution(
let ExecuteRequest::StopAllContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopAllContainers")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopAllContainers"),
&update_id,
)
.await?
}
Execution::RemoveContainer(req) => {
let req = ExecuteRequest::RemoveContainer(req);
@@ -196,10 +295,15 @@ async fn execute_execution(
let ExecuteRequest::RemoveContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer"),
&update_id,
)
.await?
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
@@ -207,10 +311,15 @@ async fn execute_execution(
let ExecuteRequest::CloneRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo"),
&update_id,
)
.await?
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
@@ -218,10 +327,47 @@ async fn execute_execution(
let ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo"),
&update_id,
)
.await?
}
Execution::BuildRepo(req) => {
let req = ExecuteRequest::BuildRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::BuildRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at BuildRepo"),
&update_id,
)
.await?
}
Execution::CancelRepoBuild(req) => {
let req = ExecuteRequest::CancelRepoBuild(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CancelRepoBuild(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CancelRepoBuild"),
&update_id,
)
.await?
}
Execution::PruneNetworks(req) => {
let req = ExecuteRequest::PruneNetworks(req);
@@ -229,10 +375,15 @@ async fn execute_execution(
let ExecuteRequest::PruneNetworks(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneNetworks")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneNetworks"),
&update_id,
)
.await?
}
Execution::PruneImages(req) => {
let req = ExecuteRequest::PruneImages(req);
@@ -240,10 +391,15 @@ async fn execute_execution(
let ExecuteRequest::PruneImages(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneImages")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneImages"),
&update_id,
)
.await?
}
Execution::PruneContainers(req) => {
let req = ExecuteRequest::PruneContainers(req);
@@ -251,10 +407,15 @@ async fn execute_execution(
let ExecuteRequest::PruneContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers"),
&update_id,
)
.await?
}
Execution::RunSync(req) => {
let req = ExecuteRequest::RunSync(req);
@@ -262,10 +423,127 @@ async fn execute_execution(
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunSync")?
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RunSync"),
&update_id,
)
.await?
}
Execution::DeployStack(req) => {
let req = ExecuteRequest::DeployStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at DeployStack"),
&update_id,
)
.await?
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StartStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StartStack"),
&update_id,
)
.await?
}
Execution::RestartStack(req) => {
let req = ExecuteRequest::RestartStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RestartStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RestartStack"),
&update_id,
)
.await?
}
Execution::PauseStack(req) => {
let req = ExecuteRequest::PauseStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PauseStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PauseStack"),
&update_id,
)
.await?
}
Execution::UnpauseStack(req) => {
let req = ExecuteRequest::UnpauseStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::UnpauseStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at UnpauseStack"),
&update_id,
)
.await?
}
Execution::StopStack(req) => {
let req = ExecuteRequest::StopStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StopStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopStack"),
&update_id,
)
.await?
}
Execution::DestroyStack(req) => {
let req = ExecuteRequest::DestroyStack(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DestroyStack(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at DestroyStack"),
&update_id,
)
.await?
}
Execution::Sleep(req) => {
tokio::time::sleep(Duration::from_millis(
@@ -289,6 +567,30 @@ async fn execute_execution(
}
}
/// If the call to .resolve returns Err, the update may not be closed.
/// This will ensure it is closed with error log attached.
async fn handle_resolve_result(
res: anyhow::Result<Update>,
update_id: &str,
) -> anyhow::Result<Update> {
match res {
Ok(res) => Ok(res),
Err(e) => {
let log =
Log::error("execution error", format_serror(&e.into()));
let mut update =
find_one_by_id(&db_client().await.updates, update_id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
}
/// ASSUMES FIRST LOG IS ALREADY CREATED
#[instrument(level = "debug")]
async fn add_line_to_update(update: &Mutex<Update>, line: &str) {

View File

@@ -68,12 +68,9 @@ async fn prune_stats() -> anyhow::Result<()> {
let res = db_client()
.await
.stats
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} stats from db", res.deleted_count);
Ok(())
@@ -89,12 +86,9 @@ async fn prune_alerts() -> anyhow::Result<()> {
let res = db_client()
.await
.alerts
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} alerts from db", res.deleted_count);
Ok(())

View File

@@ -1,21 +1,26 @@
use std::{
collections::{HashMap, HashSet},
str::FromStr,
};
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use monitor_client::entities::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::{Deployment, DeploymentState},
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{Server, ServerState},
server_template::ServerTemplate,
stack::{ComposeProject, Stack, StackState},
sync::ResourceSync,
tag::Tag,
update::{ResourceTargetVariant, Update},
update::{ResourceTarget, ResourceTargetVariant, Update},
user::{admin_service_user, User},
user_group::UserGroup,
variable::Variable,
Operation,
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
@@ -23,17 +28,24 @@ use mungos::{
},
};
use crate::{config::core_config, resource, state::db_client};
use crate::{
resource::{self, get_user_permission_on_resource},
state::db_client,
};
#[instrument(level = "debug")]
pub async fn get_user(user_id: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user_id) {
// user: Id or username
pub async fn get_user(user: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user) {
return Ok(user);
}
find_one_by_id(&db_client().await.users, user_id)
db_client()
.await
.users
.find_one(id_or_username_filter(user))
.await
.context("failed to query mongo for user")?
.with_context(|| format!("no user found with id {user_id}"))
.with_context(|| format!("no user found with {user}"))
}
#[instrument(level = "debug")]
@@ -80,6 +92,71 @@ pub async fn get_deployment_state(
Ok(state)
}
/// Can pass all the containers from the same server
pub fn get_stack_state_from_projects(
stack: &Stack,
projects: &[ComposeProject],
) -> StackState {
let project_name = stack.project_name(false);
let Some(status) = projects
.iter()
.find(|project| project.name == project_name)
.and_then(|project| project.status.as_deref())
else {
return StackState::Down;
};
let Ok(states) = status
.split(", ")
.filter_map(|state| state.split('(').next())
.map(|state| {
state.parse::<DeploymentState>().with_context(|| {
format!("failed to parse stack state entry: {state}")
})
})
.collect::<anyhow::Result<Vec<_>>>()
.inspect_err(|e| warn!("{e:#}"))
else {
return StackState::Unknown;
};
if states.is_empty() {
return StackState::Down;
}
if states.len() > 1 {
return StackState::Unhealthy;
}
match states[0] {
DeploymentState::Unknown => StackState::Unknown,
DeploymentState::NotDeployed => StackState::Down,
DeploymentState::Created => StackState::Created,
DeploymentState::Restarting => StackState::Restarting,
DeploymentState::Running => StackState::Running,
DeploymentState::Removing => StackState::Removing,
DeploymentState::Paused => StackState::Paused,
DeploymentState::Exited => StackState::Stopped,
DeploymentState::Dead => StackState::Dead,
}
}
/// Gets stack state fresh from periphery
#[instrument(level = "debug")]
pub async fn get_stack_state(
stack: &Stack,
) -> anyhow::Result<StackState> {
if stack.config.server_id.is_empty() {
return Ok(StackState::Down);
}
let (server, status) =
get_server_with_status(&stack.config.server_id).await?;
if status != ServerState::Ok {
return Ok(StackState::Unknown);
}
let projects = super::periphery_client(&server)?
.request(periphery_client::api::compose::ListComposeProjects {})
.await?;
Ok(get_stack_state_from_projects(stack, &projects))
}
#[instrument(level = "debug")]
pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
let query = match ObjectId::from_str(id_or_name) {
@@ -89,7 +166,7 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
db_client()
.await
.tags
.find_one(query, None)
.find_one(query)
.await
.context("failed to query mongo for tag")?
.with_context(|| format!("no tag found matching {id_or_name}"))
@@ -120,10 +197,10 @@ pub async fn get_id_to_tags(
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
pub async fn get_user_user_groups(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = find_collect(
) -> anyhow::Result<Vec<UserGroup>> {
find_collect(
&db_client().await.user_groups,
doc! {
"users": user_id
@@ -131,90 +208,75 @@ pub async fn get_user_user_group_ids(
None,
)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|ug| ug.id)
.collect();
.context("failed to query db for user groups")
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
user_id: &str,
) -> anyhow::Result<Vec<String>> {
let res = get_user_user_groups(user_id)
.await?
.into_iter()
.map(|ug| ug.id)
.collect();
Ok(res)
}
/// Returns Vec of all queries on permissions that match against the user
/// or any user groups that the user is a part of.
/// Result used with Mongodb '$or'.
#[instrument(level = "debug")]
pub async fn user_target_query(
pub fn user_target_query(
user_id: &str,
user_groups: &[UserGroup],
) -> anyhow::Result<Vec<Document>> {
let mut user_target_query = vec![
doc! { "user_target.type": "User", "user_target.id": user_id },
];
let user_groups = get_user_user_group_ids(user_id)
.await?
.into_iter()
.map(|ug_id| {
doc! {
"user_target.type": "UserGroup", "user_target.id": ug_id,
}
});
let user_groups = user_groups.iter().map(|ug| {
doc! {
"user_target.type": "UserGroup", "user_target.id": &ug.id,
}
});
user_target_query.extend(user_groups);
Ok(user_target_query)
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource(
user_id: &str,
resource_variant: ResourceTargetVariant,
resource_id: &str,
pub async fn get_user_permission_on_target(
user: &User,
target: &ResourceTarget,
) -> anyhow::Result<PermissionLevel> {
let lowest_permission = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
let permission = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"resource_target.type": resource_variant.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(lowest_permission, |level, permission| {
if permission.level > level {
permission.level
} else {
level
match target {
ResourceTarget::System(_) => Ok(PermissionLevel::None),
ResourceTarget::Build(id) => {
get_user_permission_on_resource::<Build>(user, id).await
}
});
Ok(permission)
}
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_non_admin(
user_id: &str,
resource_type: ResourceTargetVariant,
) -> anyhow::Result<Vec<String>> {
let permissions = find_collect(
&db_client().await.permissions,
doc! {
"$or": user_target_query(user_id).await?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
None,
)
.await
.context("failed to query permissions on db")?
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
Ok(permissions.into_iter().collect())
ResourceTarget::Builder(id) => {
get_user_permission_on_resource::<Builder>(user, id).await
}
ResourceTarget::Deployment(id) => {
get_user_permission_on_resource::<Deployment>(user, id).await
}
ResourceTarget::Server(id) => {
get_user_permission_on_resource::<Server>(user, id).await
}
ResourceTarget::Repo(id) => {
get_user_permission_on_resource::<Repo>(user, id).await
}
ResourceTarget::Alerter(id) => {
get_user_permission_on_resource::<Alerter>(user, id).await
}
ResourceTarget::Procedure(id) => {
get_user_permission_on_resource::<Procedure>(user, id).await
}
ResourceTarget::ServerTemplate(id) => {
get_user_permission_on_resource::<ServerTemplate>(user, id)
.await
}
ResourceTarget::ResourceSync(id) => {
get_user_permission_on_resource::<ResourceSync>(user, id).await
}
ResourceTarget::Stack(id) => {
get_user_permission_on_resource::<Stack>(user, id).await
}
}
}
pub fn id_or_name_filter(id_or_name: &str) -> Document {
@@ -224,6 +286,13 @@ pub fn id_or_name_filter(id_or_name: &str) -> Document {
}
}
pub fn id_or_username_filter(id_or_username: &str) -> Document {
match ObjectId::from_str(id_or_username) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": id_or_username },
}
}
pub async fn get_global_variables(
) -> anyhow::Result<HashMap<String, String>> {
Ok(
@@ -240,7 +309,7 @@ pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
db_client()
.await
.variables
.find_one(doc! { "name": &name }, None)
.find_one(doc! { "name": &name })
.await
.context("failed at call to db")?
.with_context(|| {
@@ -256,12 +325,12 @@ pub async fn get_latest_update(
db_client()
.await
.updates
.find_one(
doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
},
.find_one(doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),

View File

@@ -0,0 +1,48 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
api::write::RefreshRepoCache, entities::user::repo_user,
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
config::core_config,
state::{db_client, State},
};
pub fn spawn_repo_refresh_loop() {
let interval: Timelength = core_config()
.repo_poll_interval
.try_into()
.expect("Invalid repo poll interval");
tokio::spawn(async move {
refresh_repos().await;
loop {
wait_until_timelength(interval, 1000).await;
refresh_repos().await;
}
});
}
async fn refresh_repos() {
let Ok(repos) = find_collect(&db_client().await.repos, None, None)
.await
.inspect_err(|e| {
warn!("failed to get repos from db in refresh task | {e:#}")
})
else {
return;
};
for repo in repos {
State
.resolve(
RefreshRepoCache { repo: repo.id },
repo_user().clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh repo cache in refresh task | repo: {} | {e:#}", repo.name)
})
.ok();
}
}

View File

@@ -0,0 +1,211 @@
use monitor_client::{
api::execute::*,
entities::{
permission::PermissionLevel,
stack::{Stack, StackActionState},
update::{Log, Update},
user::User,
},
};
use periphery_client::{api::compose::*, PeripheryClient};
use crate::{
helpers::{periphery_client, update::update_update},
monitor::update_cache_for_server,
state::action_states,
};
use super::get_stack_and_server;
pub trait ExecuteCompose {
type Extras;
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
extras: Self::Extras,
) -> anyhow::Result<Log>;
}
pub async fn execute_compose<T: ExecuteCompose>(
stack: &str,
service: Option<String>,
user: &User,
set_in_progress: impl Fn(&mut StackActionState),
mut update: Update,
extras: T::Extras,
) -> anyhow::Result<Update> {
let (stack, server) =
get_stack_and_server(stack, user, PermissionLevel::Execute, true)
.await?;
// get the action state for the stack (or insert default).
let action_state =
action_states().stack.get_or_insert_default(&stack.id).await;
// Will check to ensure stack not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state.update(set_in_progress)?;
// Send update here for frontend to recheck action state
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
if let Some(service) = &service {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for service stack {service}"),
))
}
update
.logs
.push(T::execute(periphery, stack, service, extras).await?);
// Ensure cached stack state up to date by updating server cache
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
impl ExecuteCompose for StartStack {
type Extras = ();
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
_: Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!("start{service}"),
})
.await
}
}
impl ExecuteCompose for RestartStack {
type Extras = ();
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
_: Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!("restart{service}"),
})
.await
}
}
impl ExecuteCompose for PauseStack {
type Extras = ();
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
_: Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!("pause{service}"),
})
.await
}
}
impl ExecuteCompose for UnpauseStack {
type Extras = ();
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
_: Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!("unpause{service}"),
})
.await
}
}
impl ExecuteCompose for StopStack {
type Extras = Option<i32>;
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
timeout: Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
let maybe_timeout = maybe_timeout(timeout);
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!("stop{maybe_timeout}{service}"),
})
.await
}
}
impl ExecuteCompose for DestroyStack {
type Extras = (Option<i32>, bool);
async fn execute(
periphery: PeripheryClient,
stack: Stack,
service: Option<String>,
(timeout, remove_orphans): Self::Extras,
) -> anyhow::Result<Log> {
let service = service
.map(|service| format!(" {service}"))
.unwrap_or_default();
let maybe_timeout = maybe_timeout(timeout);
let maybe_remove_orphans = if remove_orphans {
" --remove-orphans"
} else {
""
};
periphery
.request(ComposeExecution {
project: stack.project_name(false),
command: format!(
"down{maybe_timeout}{maybe_remove_orphans}{service}"
),
})
.await
}
}
pub fn maybe_timeout(timeout: Option<i32>) -> String {
if let Some(timeout) = timeout {
format!(" --timeout {timeout}")
} else {
String::new()
}
}

View File

@@ -0,0 +1,97 @@
use anyhow::{anyhow, Context};
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
api::write::RefreshStackCache,
entities::{
permission::PermissionLevel,
server::{Server, ServerState},
stack::Stack,
user::{stack_user, User},
},
};
use mungos::find::find_collect;
use regex::Regex;
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{db_client, State},
};
use super::query::get_server_with_status;
pub mod execute;
pub mod remote;
pub mod services;
pub fn spawn_stack_refresh_loop() {
let interval: Timelength = core_config()
.stack_poll_interval
.try_into()
.expect("Invalid stack poll interval");
tokio::spawn(async move {
refresh_stacks().await;
loop {
wait_until_timelength(interval, 3000).await;
refresh_stacks().await;
}
});
}
async fn refresh_stacks() {
let Ok(stacks) =
find_collect(&db_client().await.stacks, None, None)
.await
.inspect_err(|e| {
warn!("failed to get stacks from db in refresh task | {e:#}")
})
else {
return;
};
for stack in stacks {
State
.resolve(
RefreshStackCache { stack: stack.id },
stack_user().clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh stack cache in refresh task | stack: {} | {e:#}", stack.name)
})
.ok();
}
}
pub async fn get_stack_and_server(
stack: &str,
user: &User,
permission_level: PermissionLevel,
block_if_server_unreachable: bool,
) -> anyhow::Result<(Stack, Server)> {
let stack = resource::get_check_permissions::<Stack>(
stack,
user,
permission_level,
)
.await?;
if stack.config.server_id.is_empty() {
return Err(anyhow!("Stack has no server configured"));
}
let (server, status) =
get_server_with_status(&stack.config.server_id).await?;
if block_if_server_unreachable && status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
Ok((stack, server))
}
pub fn compose_container_match_regex(container_name: &str) -> anyhow::Result<Regex> {
let regex = format!("^{container_name}-?[0-9]*$");
Regex::new(&regex).with_context(|| format!("failed to construct valid regex from {regex}"))
}

View File

@@ -0,0 +1,122 @@
use std::{fs, path::{Path, PathBuf}};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::entities::{
stack::{ComposeContents, Stack},
update::Log,
CloneArgs,
};
use crate::{
config::core_config,
helpers::{git_token, random_string},
};
/// Returns Result<(read paths, error paths, logs, short hash, commit message)>
pub async fn get_remote_compose_contents(
stack: &Stack,
// Collect any files which are missing in the repo.
mut missing_files: Option<&mut Vec<String>>,
) -> anyhow::Result<(
// Successful contents
Vec<ComposeContents>,
// error contents
Vec<ComposeContents>,
// logs
Vec<Log>,
// commit short hash
Option<String>,
// commit message
Option<String>,
)> {
let repo_path =
core_config().repo_directory.join(random_string(10));
let (logs, hash, message) = clone_remote_repo(&repo_path, stack)
.await
.context("failed to clone stack repo")?;
let run_directory = repo_path.join(&stack.config.run_directory);
// This will remove any intermediate '/./' which can be a problem for some OS.
let run_directory = run_directory.components().collect::<PathBuf>();
let mut oks = Vec::new();
let mut errs = Vec::new();
for path in stack.file_paths() {
let file_path = run_directory.join(path);
if !file_path.exists() {
if let Some(missing_files) = &mut missing_files {
missing_files.push(path.to_string());
}
}
// If file does not exist, will show up in err case so the log is handled
match fs::read_to_string(&file_path).with_context(|| {
format!("failed to read file contents from {file_path:?}")
}) {
Ok(contents) => oks.push(ComposeContents {
path: path.to_string(),
contents,
}),
Err(e) => errs.push(ComposeContents {
path: path.to_string(),
contents: format_serror(&e.into()),
}),
}
}
if repo_path.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_path) {
warn!("failed to remove stack repo directory | {e:?}")
}
}
Ok((oks, errs, logs, hash, message))
}
/// Returns (logs, hash, message)
pub async fn clone_remote_repo(
repo_path: &Path,
stack: &Stack,
) -> anyhow::Result<(Vec<Log>, Option<String>, Option<String>)> {
let mut clone_args: CloneArgs = stack.into();
let config = core_config();
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
clone_args.destination = Some(repo_path.display().to_string());
git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
)
.await
.context("failed to clone stack repo")
.map(|(a, b, c, _)| (a, b, c))
}

View File

@@ -0,0 +1,81 @@
use anyhow::Context;
use monitor_client::entities::stack::{
ComposeContents, ComposeFile, ComposeService, Stack,
StackServiceNames,
};
use crate::helpers::stack::remote::get_remote_compose_contents;
/// Passing fresh will re-extract services from compose file, whether local or remote (repo)
pub async fn extract_services_from_stack(
stack: &Stack,
fresh: bool,
) -> anyhow::Result<Vec<StackServiceNames>> {
if !fresh {
if let Some(services) = &stack.info.deployed_services {
return Ok(services.clone());
} else {
return Ok(stack.info.latest_services.clone());
}
}
let compose_contents = if stack.config.file_contents.is_empty() {
let (contents, errors, _, _, _) =
get_remote_compose_contents(stack, None).await.context(
"failed to get remote compose files to extract services",
)?;
if !errors.is_empty() {
let mut e = anyhow::Error::msg("Trace root");
for err in errors {
e = e.context(format!("{}: {}", err.path, err.contents));
}
return Err(
e.context("Failed to read one or more remote compose files"),
);
}
contents
} else {
vec![ComposeContents {
path: String::from("compose.yaml"),
contents: stack.config.file_contents.clone(),
}]
};
let mut res = Vec::new();
for ComposeContents { path, contents } in &compose_contents {
extract_services_into_res(
&stack.project_name(true),
contents,
&mut res,
)
.with_context(|| {
format!("failed to extract services from file at path: {path}")
})?;
}
Ok(res)
}
pub fn extract_services_into_res(
project_name: &str,
compose_contents: &str,
res: &mut Vec<StackServiceNames>,
) -> anyhow::Result<()> {
let compose = serde_yaml::from_str::<ComposeFile>(compose_contents)
.context("failed to parse service names from compose contents")?;
let services = compose.services.into_iter().map(
|(service_name, ComposeService { container_name, .. })| {
StackServiceNames {
container_name: container_name.unwrap_or_else(|| {
format!("{project_name}-{service_name}")
}),
service_name,
}
},
);
res.extend(services);
Ok(())
}

View File

@@ -0,0 +1,814 @@
use std::{collections::HashMap, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::{
execute::{Deploy, DeployStack},
read::ListBuildVersions,
},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
PartialDeploymentConfig,
},
stack::{PartialStackConfig, Stack, StackConfig, StackState},
sync::SyncDeployUpdate,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
config::core_config,
helpers::{
random_string, stack::remote::clone_remote_repo,
update::init_execution_update,
},
state::{deployment_status_cache, stack_status_cache, State},
};
use super::resource::{AllResourcesById, ResourceSync};
/// All entries in here are due to be deployed,
/// after the given dependencies,
/// with the given reason.
pub type ToDeployCache =
Vec<(ResourceTarget, String, Vec<ResourceTarget>)>;
#[derive(Clone, Copy)]
pub struct SyncDeployParams<'a> {
pub deployments: &'a [ResourceToml<PartialDeploymentConfig>],
// Names to deployments
pub deployment_map: &'a HashMap<String, Deployment>,
pub stacks: &'a [ResourceToml<PartialStackConfig>],
// Names to stacks
pub stack_map: &'a HashMap<String, Stack>,
pub all_resources: &'a AllResourcesById,
}
pub async fn deploy_from_cache(
mut to_deploy: ToDeployCache,
logs: &mut Vec<Log>,
) {
let mut log = format!(
"{}: running executions to sync deployment / stack state",
muted("INFO")
);
let mut round = 1;
let user = sync_user();
while !to_deploy.is_empty() {
// Collect all waiting deployments without waiting dependencies.
let good_to_deploy = to_deploy
.iter()
.filter(|(_, _, after)| {
to_deploy
.iter()
.all(|(target, _, _)| !after.contains(target))
})
// The target / reason need the be cloned out to to_deploy is not borrowed from.
// to_deploy will be mutably accessed later.
.map(|(target, reason, _)| (target.clone(), reason.clone()))
.collect::<HashMap<_, _>>();
// Deploy the ones ready for deployment
let res = join_all(good_to_deploy.iter().map(
|(target, reason)| async move {
let res = async {
match &target {
ResourceTarget::Deployment(name) => {
let req = ExecuteRequest::Deploy(Deploy {
deployment: name.to_string(),
stop_signal: None,
stop_time: None,
});
let update = init_execution_update(&req, user).await?;
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State.resolve(req, (user.to_owned(), update)).await
}
ResourceTarget::Stack(name) => {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: name.to_string(),
stop_time: None,
});
let update = init_execution_update(&req, user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
State.resolve(req, (user.to_owned(), update)).await
}
_ => unreachable!(),
}
}
.await;
(target, reason, res)
},
))
.await;
let mut has_error = false;
// Log results of deploy
for (target, reason, res) in res {
let (resource, name) = target.extract_variant_id();
if let Err(e) = res {
has_error = true;
log.push_str(&format!(
"\n{}: failed to deploy {resource} '{}' in round {} | {e:#}",
colored("ERROR", Color::Red),
bold(name),
bold(round)
));
} else {
log.push_str(&format!(
"\n{}: deployed {resource} '{}' in round {} with reason: {reason}",
muted("INFO"),
bold(name),
bold(round)
));
}
}
// Early exit if any deploy has errors
if has_error {
log.push_str(&format!(
"\n{}: exited in round {} {}",
muted("INFO"),
bold(round),
colored("with errors", Color::Red)
));
logs.push(Log::error("Sync Deploy", log));
return;
}
// Remove the deployed ones from 'to_deploy'
to_deploy
.retain(|(target, _, _)| !good_to_deploy.contains_key(target));
// If there must be another round, these are dependent on the first round.
// Sleep for 1s to allow for first round to startup
if !to_deploy.is_empty() {
// Increment the round
round += 1;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
log.push_str(&format!(
"\n{}: finished after {} round{}",
muted("INFO"),
bold(round),
(round > 1).then_some("s").unwrap_or_default()
));
logs.push(Log::simple("Sync Deploy", log));
}
pub async fn get_updates_for_view(
params: SyncDeployParams<'_>,
) -> Option<SyncDeployUpdate> {
let inner = async {
let mut update = SyncDeployUpdate {
to_deploy: 0,
log: String::from("Deploy Updates\n-------------------\n"),
};
let mut lines = Vec::<String>::new();
for (target, reason, after) in build_deploy_cache(params).await? {
update.to_deploy += 1;
let mut line = format!(
"{}: {}. reason: {reason}",
colored("Deploy", Color::Green),
bold(format!("{target:?}")),
);
if !after.is_empty() {
line.push_str(&format!(
"\n{}: {}",
colored("After", Color::Blue),
after
.iter()
.map(|target| format!("{target:?}"))
.collect::<Vec<_>>()
.join(", ")
))
}
lines.push(line);
}
update.log.push_str(&lines.join("\n-------------------\n"));
anyhow::Ok((update.to_deploy > 0).then_some(update))
};
match inner.await {
Ok(res) => res,
Err(e) => Some(SyncDeployUpdate {
to_deploy: 0,
log: format_serror(
&e.context("failed to get deploy updates for view").into(),
),
}),
}
}
/// Entries are keyed by ResourceTargets wrapping "name" instead of "id".
/// If entry is None, it is confirmed no-deploy.
/// If it is Some, it is confirmed deploy with provided reason and dependencies.
///
/// Used to build up resources to deploy earlier in the sync.
type ToDeployCacheInner =
HashMap<ResourceTarget, Option<(String, Vec<ResourceTarget>)>>;
/// Maps build ids to latest versions as string.
type BuildVersionCache = HashMap<String, String>;
pub async fn build_deploy_cache(
params: SyncDeployParams<'_>,
) -> anyhow::Result<ToDeployCache> {
let mut cache = ToDeployCacheInner::new();
let mut build_version_cache = BuildVersionCache::new();
// Just ensure they are all in the cache by looping through them all
for deployment in params.deployments {
build_cache_for_deployment(
deployment,
params,
&mut cache,
&mut build_version_cache,
)
.await?;
}
for stack in params.stacks {
build_cache_for_stack(
stack,
params,
&mut cache,
&mut build_version_cache,
)
.await?;
}
let cache = cache
.into_iter()
.filter_map(|(target, entry)| {
let (reason, after) = entry?;
Some((target, (reason, after)))
})
.collect::<HashMap<_, _>>();
// Have to clone here to use it after 'into_iter' below.
// All entries in cache at this point are deploying.
let clone = cache.clone();
Ok(
cache
.into_iter()
.map(|(target, (reason, mut after))| {
// Only keep targets which are deploying.
after.retain(|target| clone.contains_key(target));
(target, reason, after)
})
.collect(),
)
}
type BuildRes<'a> = std::pin::Pin<
Box<
dyn std::future::Future<Output = anyhow::Result<()>> + Send + 'a,
>,
>;
fn build_cache_for_deployment<'a>(
deployment: &'a ResourceToml<PartialDeploymentConfig>,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
}: SyncDeployParams<'a>,
cache: &'a mut ToDeployCacheInner,
build_version_cache: &'a mut BuildVersionCache,
) -> BuildRes<'a> {
Box::pin(async move {
let target = ResourceTarget::Deployment(deployment.name.clone());
// First check existing, and continue if already handled.
if cache.contains_key(&target) {
return Ok(());
}
// Check if deployment doesn't have "deploy" enabled.
if !deployment.deploy {
cache.insert(target, None);
return Ok(());
}
let after = get_after_as_resource_targets(
&deployment.name,
&deployment.after,
deployment_map,
deployments,
stack_map,
stacks,
)?;
let Some(original) = deployment_map.get(&deployment.name) else {
// This block is the None case, deployment is not created, should definitely deploy
cache.insert(
target,
Some((String::from("deploy on creation"), after)),
);
return Ok(());
};
let status = &deployment_status_cache()
.get_or_insert_default(&original.id)
.await
.curr;
let state = status.state;
match state {
DeploymentState::Unknown => {
// Can't do anything with unknown state
cache.insert(target, None);
return Ok(());
}
DeploymentState::Running => {
// Here can diff the changes, to see if they merit a redeploy.
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig =
deployment.config.clone().into();
let mut config: PartialDeploymentConfig = config.into();
Deployment::validate_partial_config(&mut config);
let mut diff = Deployment::get_diff(
original.config.clone(),
config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
// Needs to only check config fields that affect docker run
let changed = diff.server_id.is_some()
|| diff.image.is_some()
|| diff.image_registry_account.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.network.is_some()
|| diff.restart.is_some()
|| diff.command.is_some()
|| diff.extra_args.is_some()
|| diff.ports.is_some()
|| diff.volumes.is_some()
|| diff.environment.is_some()
|| diff.labels.is_some();
if changed {
cache.insert(
target,
Some((
String::from("deployment config has changed"),
after,
)),
);
return Ok(());
}
}
// All other cases will require Deploy to enter Running state.
_ => {
cache.insert(
target,
Some((
format!(
"deployment has {} state",
colored(state, Color::Red)
),
after,
)),
);
return Ok(());
}
};
// We know the config hasn't changed at this point, but still need
// to check if attached build has updated. Can check original for this (know it hasn't changed)
if let DeploymentImage::Build { build_id, version } =
&original.config.image
{
// check if version is none, ie use latest build
if !version.is_none() {
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.split(':').last())
.unwrap_or("0.0.0");
match build_version_cache.get(build_id) {
Some(version) if deployed_version != version => {
cache.insert(
target,
Some((
format!("build has new version: {version}"),
after,
)),
);
return Ok(());
}
// Build version is the same, still need to check 'after'
Some(_) => {}
None => {
let Some(version) = State
.resolve(
ListBuildVersions {
build: build_id.to_string(),
limit: Some(1),
..Default::default()
},
sync_user().to_owned(),
)
.await
.context("failed to get build versions")?
.pop()
else {
// The build has never been built.
// Skip deploy regardless of 'after' (it can't be deployed)
// Not sure how this would be reached on Running deployment...
cache.insert(target, None);
return Ok(());
};
let version = version.version.to_string();
build_version_cache
.insert(build_id.to_string(), version.clone());
if deployed_version != version {
// Same as 'Some' case out of the cache
cache.insert(
target,
Some((
format!("build has new version: {version}"),
after,
)),
);
return Ok(());
}
}
}
}
};
// Check 'after' to see if they deploy.
insert_target_using_after_list(
target,
after,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
},
cache,
build_version_cache,
)
.await
})
}
fn build_cache_for_stack<'a>(
stack: &'a ResourceToml<PartialStackConfig>,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
}: SyncDeployParams<'a>,
cache: &'a mut ToDeployCacheInner,
build_version_cache: &'a mut BuildVersionCache,
) -> BuildRes<'a> {
Box::pin(async move {
let target = ResourceTarget::Stack(stack.name.clone());
// First check existing, and continue if already handled.
if cache.contains_key(&target) {
return Ok(());
}
// Check if stack doesn't have "deploy" enabled.
if !stack.deploy {
cache.insert(target, None);
return Ok(());
}
let after = get_after_as_resource_targets(
&stack.name,
&stack.after,
deployment_map,
deployments,
stack_map,
stacks,
)?;
let Some(original) = stack_map.get(&stack.name) else {
// This block is the None case, deployment is not created, should definitely deploy
cache.insert(
target,
Some((String::from("deploy on creation"), after)),
);
return Ok(());
};
let status = &stack_status_cache()
.get_or_insert_default(&original.id)
.await
.curr;
let state = status.state;
match state {
StackState::Unknown => {
// Can't do anything with unknown state
cache.insert(target, None);
return Ok(());
}
StackState::Running => {
// Here can diff the changes, to see if they merit a redeploy.
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: StackConfig = stack.config.clone().into();
let mut config: PartialStackConfig = config.into();
Stack::validate_partial_config(&mut config);
let mut diff = Stack::get_diff(
original.config.clone(),
config,
all_resources,
)?;
Stack::validate_diff(&mut diff);
// Needs to only check config fields that affect docker compose command
let changed = diff.server_id.is_some()
|| diff.project_name.is_some()
|| diff.run_directory.is_some()
|| diff.file_paths.is_some()
|| diff.file_contents.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.extra_args.is_some()
|| diff.environment.is_some()
|| diff.env_file_path.is_some()
|| diff.repo.is_some()
|| diff.branch.is_some()
|| diff.commit.is_some();
if changed {
cache.insert(
target,
Some((String::from("stack config has changed"), after)),
);
return Ok(());
}
}
// All other cases will require Deploy to enter Running state.
_ => {
cache.insert(
target,
Some((
format!("stack has {} state", colored(state, Color::Red)),
after,
)),
);
return Ok(());
}
};
// We know the config hasn't changed at this point, but still need
// to check if its a repo based stack, and the hash has updated.
// Can use 'original' for this (config hasn't changed)
if stack.latest_hash {
if let Some(deployed_hash) = &original.info.deployed_hash {
let repo_path =
core_config().repo_directory.join(random_string(10));
let (_, hash, _) = clone_remote_repo(&repo_path, original)
.await
.context("failed to get latest hash for repo based stack")
.with_context(|| {
format!(
"Stack {} {}",
bold(&stack.name),
colored("has errors", Color::Red)
)
})?;
if let Some(hash) = hash {
if &hash != deployed_hash {
cache.insert(
target,
Some((
format!(
"outdated hash. deployed: {} -> latest: {}",
colored(deployed_hash, Color::Red),
colored(hash, Color::Green)
),
after,
)),
);
return Ok(());
}
}
}
}
// Check 'after' to see if they deploy.
insert_target_using_after_list(
target,
after,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
},
cache,
build_version_cache,
)
.await
})
}
async fn insert_target_using_after_list<'a>(
target: ResourceTarget,
after: Vec<ResourceTarget>,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
}: SyncDeployParams<'a>,
cache: &'a mut ToDeployCacheInner,
build_version_cache: &'a mut BuildVersionCache,
) -> anyhow::Result<()> {
for parent in &after {
match cache.get(parent) {
Some(Some(_)) => {
// a parent will deploy
let (variant, name) = parent.extract_variant_id();
cache.insert(
target.to_owned(),
Some((
format!(
"{variant} parent dependency '{}' is deploying",
bold(name)
),
after,
)),
);
return Ok(());
}
// The parent will not deploy, do nothing here.
Some(None) => {}
None => {
match parent {
ResourceTarget::Deployment(name) => {
let Some(parent_deployment) =
deployments.iter().find(|d| &d.name == name)
else {
// The parent is not in the sync, so won't be deploying
// Note that cross-sync deploy dependencies are not currently supported.
continue;
};
// Recurse to add the parent to cache, then check again.
build_cache_for_deployment(
parent_deployment,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
},
cache,
build_version_cache,
)
.await?;
match cache.get(parent) {
Some(Some(_)) => {
// Same as the 'Some' case above
let (variant, name) = parent.extract_variant_id();
cache.insert(
target.to_owned(),
Some((
format!(
"{variant} parent dependency '{}' is deploying",
bold(name)
),
after,
)),
);
return Ok(());
},
// The parent will not deploy, do nothing here.
Some(None) => {},
None => return Err(anyhow!("Did not find parent in cache after build recursion. This should not happen."))
}
}
ResourceTarget::Stack(name) => {
let Some(parent_stack) =
stacks.iter().find(|d| &d.name == name)
else {
// The parent is not in the sync, so won't be deploying
// Note that cross-sync deploy dependencies are not currently supported.
continue;
};
// Recurse to add the parent to cache, then check again.
build_cache_for_stack(
parent_stack,
SyncDeployParams {
deployments,
deployment_map,
stacks,
stack_map,
all_resources,
},
cache,
build_version_cache,
)
.await?;
match cache.get(parent) {
Some(Some(_)) => {
// Same as the 'Some' case above
let (variant, name) = parent.extract_variant_id();
cache.insert(
target.to_owned(),
Some((
format!(
"{variant} parent dependency '{}' is deploying",
bold(name)
),
after,
)),
);
return Ok(());
},
// The parent will not deploy, do nothing here.
Some(None) => {},
None => return Err(anyhow!("Did not find parent in cache after build recursion. This should not happen."))
}
}
_ => unreachable!(),
}
}
}
}
// If it has reached here, its not deploying
cache.insert(target, None);
Ok(())
}
fn get_after_as_resource_targets(
resource_name: &str,
after: &[String],
// Names to deployments
deployment_map: &HashMap<String, Deployment>,
deployments: &[ResourceToml<PartialDeploymentConfig>],
// Names to stacks
stack_map: &HashMap<String, Stack>,
stacks: &[ResourceToml<PartialStackConfig>],
) -> anyhow::Result<Vec<ResourceTarget>> {
after
.iter()
.map(|name| match deployment_map.get(name) {
Some(_) => Ok(ResourceTarget::Deployment(name.clone())),
None => {
if deployments
.iter()
.any(|deployment| deployment.name.as_str() == resource_name)
{
Ok(ResourceTarget::Deployment(name.clone()))
} else {
match stack_map.get(name) {
Some(_) => Ok(ResourceTarget::Stack(name.clone())),
None => {
if stacks
.iter()
.any(|stack| stack.name.as_str() == resource_name)
{
Ok(ResourceTarget::Stack(name.clone()))
} else {
Err(anyhow!("failed to match deploy dependency in 'after' list | resource: {resource_name} | dependency: {name}"))
}
}
}
}
}
})
.collect()
}

View File

@@ -1,858 +0,0 @@
use std::{collections::HashMap, time::Duration};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::{execute::Deploy, read::GetBuildVersions},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
PartialDeploymentConfig,
},
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use mungos::find::find_collect;
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update,
resource::MonitorResource,
state::{deployment_status_cache, State},
};
use super::resource::{
run_update_description, run_update_tags, AllResourcesById,
ResourceSync,
};
pub type ToUpdate = Vec<ToUpdateItem>;
pub type ToCreate = Vec<ResourceToml<PartialDeploymentConfig>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult = (ToCreate, ToUpdate, ToDelete);
pub struct ToUpdateItem {
pub id: String,
pub resource: ResourceToml<PartialDeploymentConfig>,
pub update_description: bool,
pub update_tags: bool,
pub deploy: bool,
}
/// Turns all the diffs into a readable string
pub async fn get_updates_for_view(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: format!("{} Updates", Deployment::resource_type()),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
update.to_delete += 1;
to_delete.push(resource.name.clone())
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, state, reason) = extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
if state == DeploymentState::Unknown {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\nDeployment sync actions could not be computed due to Unknown deployment state\n-------------------",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource.name)
));
}
continue;
}
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
Deployment::resource_type(),
bold(&resource.name)
));
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, Color::Red),
muted("to"),
colored(&resource.description, Color::Green)
));
}
if resource.tags != original_tags {
let from =
colored(&format!("{:?}", original_tags), Color::Red);
let to =
colored(&format!("{:?}", resource.tags), Color::Green);
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
muted("field"),
muted("from"),
muted("to"),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(from, Color::Red),
muted("to"),
colored(to, Color::Green)
)
},
));
if state == DeploymentState::Unknown {
lines.push(format!(
"{}: Deployment sync actions {} due to Unknown deployment state",
colored("ERROR", Color::Red),
bold("could not be computed")
));
} else if to_deploy {
let mut line = if state == DeploymentState::Running {
format!(
"{}: {reason}, {}",
muted("deploy"),
bold("sync will trigger deploy")
)
} else {
format!(
"{}: deployment is currently in {} state, {}",
muted("deploy"),
colored(&state.to_string(), Color::Red),
bold("sync will trigger deploy")
)
};
if !resource.after.is_empty() {
line.push_str(&format!(
"\n{}: {:?}",
muted("deploy after"),
resource.after
));
}
lines.push(line);
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
None => {
update.to_create += 1;
let mut lines = vec![
format!(
"{}: {}",
muted("description"),
resource.description,
),
format!("{}: {:?}", muted("tags"), resource.tags,),
format!(
"{}: {}",
muted("config"),
serde_json::to_string_pretty(&resource.config)
.context("failed to serialize config to json")?
),
];
if resource.deploy {
lines.push(format!(
"{}: {}",
muted("will deploy"),
colored("true", Color::Green)
));
if !resource.after.is_empty() {
lines.push(format!(
"{}: {:?}",
muted("deploy after"),
resource.after
));
}
}
update.log.push_str(&format!(
"\n\n{}: {}: {}\n{}",
colored("CREATE", Color::Green),
Deployment::resource_type(),
bold(&resource.name),
lines.join("\n")
))
}
}
}
for name in to_delete {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("DELETE", Color::Red),
Deployment::resource_type(),
bold(&name)
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
/// Gets all the resources to update. For use in sync execution.
pub async fn get_updates_for_execution(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<UpdatesResult> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut to_create = ToCreate::new();
let mut to_update = ToUpdate::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, _state, _reason) =
extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
deploy: to_deploy,
};
to_update.push(update);
}
None => to_create.push(resource),
}
}
Ok((to_create, to_update, to_delete))
}
type Res<'a> = std::pin::Pin<
Box<
dyn std::future::Future<
Output = anyhow::Result<(bool, DeploymentState, String)>,
> + Send
+ 'a,
>,
>;
fn extract_to_deploy_and_state<'a>(
all_resources: &'a AllResourcesById,
map: &'a HashMap<String, Deployment>,
resources: &'a [ResourceToml<PartialDeploymentConfig>],
name: String,
// name to 'to_deploy'
cache: &'a mut HashMap<String, bool>,
// build id to latest built version string
build_cache: &'a mut HashMap<String, String>,
) -> Res<'a> {
Box::pin(async move {
let mut reason = String::new();
let Some(deployment) = resources.iter().find(|r| r.name == name)
else {
// this case should be unreachable, the names come off of a loop over resources
cache.insert(name, false);
return Ok((false, DeploymentState::Unknown, reason));
};
if deployment.deploy {
let Some(original) = map.get(&name) else {
// not created, definitely deploy
cache.insert(name, true);
// Don't need reason here, will be populated automatically
return Ok((true, DeploymentState::NotDeployed, reason));
};
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = deployment.config.clone().into();
let mut config: PartialDeploymentConfig = config.into();
Deployment::validate_partial_config(&mut config);
let mut diff = Deployment::get_diff(
original.config.clone(),
config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let status = &deployment_status_cache()
.get_or_insert_default(&original.id)
.await
.curr;
let state = status.state;
let mut to_deploy = match state {
DeploymentState::Unknown => false,
DeploymentState::Running => {
// Needs to only check config fields that affect docker run
let changed = diff.server_id.is_some()
|| diff.image.is_some()
|| diff.image_registry.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.network.is_some()
|| diff.restart.is_some()
|| diff.command.is_some()
|| diff.extra_args.is_some()
|| diff.ports.is_some()
|| diff.volumes.is_some()
|| diff.environment.is_some()
|| diff.labels.is_some();
if changed {
reason = String::from("deployment config has changed")
}
changed
}
// All other cases will require Deploy to enter Running state.
// Don't need reason here as this case is handled outside, using returned state.
_ => true,
};
// Check if build attached, version latest, and there is a new build.
if !to_deploy {
// only need to check original, if diff.image was Some, to_deploy would be true.
if let DeploymentImage::Build { build_id, version } =
&original.config.image
{
// check if version is none, ie use latest build
if version.is_none() {
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.split(':').last())
.unwrap_or("0.0.0");
match build_cache.get(build_id) {
Some(version) if deployed_version != version => {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
Some(_) => {}
None => {
let Some(version) = State
.resolve(
GetBuildVersions {
build: build_id.to_string(),
limit: Some(1),
..Default::default()
},
sync_user().to_owned(),
)
.await
.context("failed to get build versions")?
.pop()
else {
// this case shouldn't ever happen, how would deployment be deployed if build was never built?
return Ok((
false,
DeploymentState::NotDeployed,
reason,
));
};
let version = version.version.to_string();
build_cache
.insert(build_id.to_string(), version.clone());
if deployed_version != version {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
}
};
}
}
}
// Still need to check 'after' if they need deploy
if !to_deploy {
for name in &deployment.after {
match cache.get(name) {
Some(will_deploy) if *will_deploy => {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
Some(_) => {}
None => {
let (will_deploy, _, _) = extract_to_deploy_and_state(
all_resources,
map,
resources,
name.to_string(),
cache,
build_cache,
)
.await?;
if will_deploy {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
}
}
}
}
cache.insert(name, to_deploy);
Ok((to_deploy, state, reason))
} else {
// The state in this case doesn't matter and won't be read (as long as it isn't 'Unknown' which will log in all cases)
cache.insert(name, false);
Ok((false, DeploymentState::NotDeployed, reason))
}
})
}
pub async fn run_updates(
to_create: ToCreate,
to_update: ToUpdate,
to_delete: ToDelete,
) -> Option<Vec<Log>> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log = String::new();
// Collect all the deployment names that need to be deployed
// and their 'after' dependencies
let mut to_deploy = Vec::<(String, Vec<String>)>::new();
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match crate::resource::create::<Deployment>(
&resource.name,
resource.config,
sync_user(),
)
.await
{
Ok(resource) => resource.id,
Err(e) => {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name)
));
continue;
}
};
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
run_update_description::<Deployment>(
id,
&name,
description,
&mut log,
&mut has_error,
)
.await;
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", Color::Green),
Deployment::resource_type(),
bold(&name)
));
if resource.deploy {
to_deploy.push((resource.name, resource.after));
}
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
deploy,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Deployment>(
id.clone(),
&name,
description,
&mut log,
&mut has_error,
)
.await;
}
if update_tags {
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
}
let mut config_update_error = false;
if !resource.config.is_none() {
if let Err(e) = crate::resource::update::<Deployment>(
&id,
resource.config,
sync_user(),
)
.await
{
has_error = true;
config_update_error = true;
log.push_str(&format!(
"\n{}: failed to update config on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' configuration",
muted("INFO"),
colored("updated", Color::Blue),
Deployment::resource_type(),
bold(&name)
));
}
}
if !config_update_error && deploy {
to_deploy.push((resource.name, resource.after));
}
}
for resource in to_delete {
if let Err(e) =
crate::resource::delete::<Deployment>(&resource, sync_user())
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Deployment::resource_type(),
bold(&resource)
));
}
}
let mut logs = Vec::with_capacity(1);
let stage = format!("Update {}s", Deployment::resource_type());
if has_error {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::error(&stage, log));
return Some(logs);
} else if !log.is_empty() {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::simple(&stage, log));
}
if to_deploy.is_empty() {
return Some(logs);
}
let mut log = format!(
"{}: running executions to sync deployment state",
muted("INFO")
);
let mut round = 1;
while !to_deploy.is_empty() {
// Collect all waiting deployments without waiting dependencies.
let good_to_deploy = to_deploy
.iter()
.filter(|(_, after)| {
to_deploy.iter().all(|(name, _)| !after.contains(name))
})
.map(|(name, _)| name.clone())
.collect::<Vec<_>>();
// Deploy the ones ready for deployment
let res =
join_all(good_to_deploy.iter().map(|name| async move {
let res = async {
let req = ExecuteRequest::Deploy(Deploy {
deployment: name.to_string(),
stop_signal: None,
stop_time: None,
});
let user = sync_user();
let update = init_execution_update(&req, user).await?;
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State.resolve(req, (user.to_owned(), update)).await
}
.await;
(name, res)
}))
.await;
// Log results of deploy
for (name, res) in res {
if let Err(e) = res {
has_error = true;
log.push_str(&format!(
"\n{}: failed to deploy '{}' in round {} | {e:#}",
colored("ERROR", Color::Red),
bold(name),
bold(round)
));
} else {
log.push_str(&format!(
"\n{}: deployed '{}' in round {}",
muted("INFO"),
bold(name),
bold(round)
));
}
}
// Early exit if any deploy has errors
if has_error {
log.push_str(&format!(
"\n{}: exited in round {} {}",
muted("INFO"),
bold(round),
colored("with errors", Color::Red)
));
logs.push(Log::error("Sync Deployment State", log));
return Some(logs);
}
// Remove the deployed ones from 'to_deploy'
to_deploy.retain(|(name, _)| !good_to_deploy.contains(name));
// If there must be another round, these are dependent on the first round.
// Sleep for 1s to allow for first round to startup
if !to_deploy.is_empty() {
// Increment the round
round += 1;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
log.push_str(&format!(
"\n{}: finished after {} round{}",
muted("INFO"),
bold(round),
(round > 1).then_some("s").unwrap_or_default()
));
logs.push(Log::simple("Sync Deployment State", log));
Some(logs)
}
impl ResourceSync for Deployment {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: resources
.builds
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
}

View File

@@ -45,15 +45,17 @@ fn read_resources_recursive(
resources.servers.extend(more.servers);
resources.deployments.extend(more.deployments);
resources.stacks.extend(more.stacks);
resources.builds.extend(more.builds);
resources.repos.extend(more.repos);
resources.procedures.extend(more.procedures);
resources.builders.extend(more.builders);
resources.alerters.extend(more.alerters);
resources.builders.extend(more.builders);
resources.server_templates.extend(more.server_templates);
resources.resource_syncs.extend(more.resource_syncs);
resources.user_groups.extend(more.user_groups);
resources.variables.extend(more.variables);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)

View File

@@ -5,40 +5,57 @@ use monitor_client::{
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::state::{db_client, State};
use crate::{
config::core_config,
state::{db_client, State},
};
pub mod deployment;
// pub mod deployment;
pub mod remote;
pub mod resource;
pub mod user_groups;
pub mod variables;
pub mod deploy;
mod file;
mod resources;
pub fn spawn_sync_refresh_loop() {
let interval: Timelength = core_config()
.sync_poll_interval
.try_into()
.expect("Invalid sync poll interval");
tokio::spawn(async move {
let db = db_client().await;
let user = sync_user();
refresh_syncs().await;
loop {
wait_until_timelength(Timelength::FiveMinutes, 0).await;
let Ok(syncs) = find_collect(&db.resource_syncs, None, None)
.await
.inspect_err(|e| warn!("failed to get resource syncs from db in refresh task | {e:#}")) else {
continue;
};
for sync in syncs {
State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
user.clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name)
})
.ok();
}
wait_until_timelength(interval, 0).await;
refresh_syncs().await;
}
});
}
async fn refresh_syncs() {
let Ok(syncs) =
find_collect(&db_client().await.resource_syncs, None, None)
.await
.inspect_err(|e| {
warn!(
"failed to get resource syncs from db in refresh task | {e:#}"
)
})
else {
return;
};
for sync in syncs {
State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
sync_user().clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name)
})
.ok();
}
}

View File

@@ -2,11 +2,14 @@ use std::fs;
use anyhow::{anyhow, Context};
use monitor_client::entities::{
sync::ResourceSync, to_monitor_name, toml::ResourcesToml,
update::Log, CloneArgs, LatestCommit,
sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs,
};
use crate::{config::core_config, state::resource_sync_lock_cache};
use crate::{
config::core_config,
helpers::{git_token, random_string},
state::resource_sync_lock_cache,
};
pub async fn get_remote_resources(
sync: &ResourceSync,
@@ -18,23 +21,28 @@ pub async fn get_remote_resources(
// commit message
String,
)> {
let name = to_monitor_name(&sync.name);
let clone_args: CloneArgs = sync.into();
let mut clone_args: CloneArgs = sync.into();
let config = core_config();
let github_token = clone_args
.github_account
.as_ref()
.map(|account| {
config.github_accounts.get(account).ok_or_else(|| {
anyhow!("did not find github token for account {account}")
})
})
.transpose()?
.cloned();
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| clone_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
fs::create_dir_all(&config.sync_directory)
fs::create_dir_all(&config.repo_directory)
.context("failed to create sync directory")?;
// lock simultaneous access to same directory
@@ -43,18 +51,29 @@ pub async fn get_remote_resources(
.await;
let _lock = lock.lock().await;
let mut logs =
git::clone(clone_args, &config.sync_directory, github_token)
.await
.context("failed to clone resource repo")?;
let repo_path = config.repo_directory.join(random_string(10));
// This overrides any other method of determining clone path.
clone_args.destination = Some(repo_path.display().to_string());
let repo_dir = config.sync_directory.join(&name);
let LatestCommit { hash, message } =
git::get_commit_hash_info(&repo_dir)
.await
.context("failed to get commit hash info")?;
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
let (mut logs, hash, message, _) = git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
)
.await
.context("failed to clone resource repo")?;
let hash = hash.context("failed to get commit hash")?;
let message =
message.context("failed to get commit hash message")?;
let repo_path = config.sync_directory.join(&sync.name);
let resource_path = repo_path.join(&sync.config.resource_path);
let res = super::file::read_resources(&resource_path).map(
@@ -64,8 +83,10 @@ pub async fn get_remote_resources(
},
);
if let Err(e) = std::fs::remove_dir_all(&repo_path) {
warn!("failed to remove sync repo directory | {e:?}")
if repo_path.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_path) {
warn!("failed to remove sync repo directory | {e:?}")
}
}
Ok((res, logs, hash, message))

View File

@@ -14,6 +14,7 @@ use monitor_client::{
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
@@ -513,6 +514,7 @@ pub async fn run_update_description<Resource: ResourceSync>(
pub struct AllResourcesById {
pub servers: HashMap<String, Server>,
pub deployments: HashMap<String, Deployment>,
pub stacks: HashMap<String, Stack>,
pub builds: HashMap<String, Build>,
pub repos: HashMap<String, Repo>,
pub procedures: HashMap<String, Procedure>,
@@ -549,6 +551,10 @@ impl AllResourcesById {
entities::sync::ResourceSync,
>()
.await?,
stacks: crate::resource::get_id_to_resource_map::<
entities::stack::Stack,
>()
.await?,
})
}
}

View File

@@ -6,10 +6,12 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
deployment::{Deployment, DeploymentImage},
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
update::{Log, ResourceTarget},
user::sync_user,
},
@@ -42,6 +44,62 @@ impl ResourceSync for Server {
}
}
impl ResourceSync for Deployment {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: resources
.builds
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Stack {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Stack(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// Need to replace server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Build {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Build(id)
@@ -63,6 +121,9 @@ impl ResourceSync for Build {
fn validate_diff(diff: &mut Self::ConfigDiff) {
if let Some((_, to)) = &diff.version {
// When setting a build back to "latest" version,
// Don't actually set version to None.
// You can do this on the db, or set it to 0.0.1
if to.is_none() {
diff.version = None;
}
@@ -87,6 +148,13 @@ impl ResourceSync for Repo {
.map(|s| s.name.clone())
.unwrap_or_default();
// Need to replace builder id with name
original.builder_id = resources
.builders
.get(&original.builder_id)
.map(|s| s.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
}
@@ -184,6 +252,13 @@ impl ResourceSync for Procedure {
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::CancelBuild(config) => {
config.build = resources
.builds
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = resources
.deployments
@@ -198,6 +273,27 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RestartContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PauseContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::UnpauseContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = resources
.deployments
@@ -226,6 +322,20 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::BuildRepo(config) => {
config.repo = resources
.repos
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CancelRepoBuild(config) => {
config.repo = resources
.repos
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = resources
.servers
@@ -261,6 +371,55 @@ impl ResourceSync for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DeployStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::StartStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::RestartStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::PauseStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::UnpauseStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::StopStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DestroyStack(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::Sleep(_) => {}
}
}
@@ -445,6 +604,9 @@ impl ResourceSync for Procedure {
}
warn!("procedure sync loop exited after max iterations");
todo!()
Some(Log::error(
"run procedure",
String::from("procedure sync loop exited after max iterations"),
))
}
}

View File

@@ -7,18 +7,19 @@ use monitor_client::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
UpdatePermissionOnResourceType, UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
permission::{PermissionLevel, UserTarget},
sync::SyncUpdate,
toml::{PermissionToml, UserGroupToml},
update::{Log, ResourceTarget},
update::{Log, ResourceTarget, ResourceTargetVariant},
user::sync_user,
},
};
use mungos::find::find_collect;
use regex::Regex;
use resolver_api::Resolve;
use crate::state::{db_client, State};
@@ -28,7 +29,7 @@ use super::resource::AllResourcesById;
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
}
pub struct DeleteItem {
@@ -72,19 +73,49 @@ pub async fn get_updates_for_view(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
if user_group.all.is_empty() {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
} else {
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:#?}\n{}: {:#?}\n{}: {:#?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("base permissions"),
user_group.all,
muted("permissions"),
user_group.permissions,
));
}
continue;
}
};
@@ -107,6 +138,7 @@ pub async fn get_updates_for_view(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -174,6 +206,13 @@ pub async fn get_updates_for_view(
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = all_resources
.stacks
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
@@ -185,22 +224,27 @@ pub async fn get_updates_for_view(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_all = !all_diff.is_empty();
let update_permissions =
user_group.permissions != original_permissions;
// only add log after diff detected
if update_users || update_permissions {
if update_users || update_all || update_permissions {
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
bold(&user_group.name),
));
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
@@ -211,7 +255,7 @@ pub async fn get_updates_for_view(
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), Color::Green)
colored(adding.join(", "), Color::Green)
};
let removing = original_users
.iter()
@@ -221,7 +265,7 @@ pub async fn get_updates_for_view(
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), Color::Red)
colored(removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
@@ -230,39 +274,90 @@ pub async fn get_updates_for_view(
muted("adding"),
))
}
if update_all {
let updates = all_diff
.into_iter()
.map(|(variant, (orig, incoming))| {
format!(
"{}: {} {} {}",
bold(variant),
colored(orig, Color::Red),
muted("->"),
colored(incoming, Color::Green)
)
})
.collect::<Vec<_>>()
.join("\n");
lines.push(format!(
"{}: 'base permission'\n{updates}",
muted("field"),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
// add if original has no exising permission on the target
!original_permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), Color::Green)
colored(adding.join(", "), Color::Green)
};
let updating = user_group
.permissions
.iter()
.filter(|permission| {
// update if original has exising permission on the target with different level
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
return false;
};
permission.level != level
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let updating = if updating.is_empty() {
String::from("None")
} else {
colored(updating.join(", "), Color::Blue)
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
// remove if incoming has no permission on the target
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), Color::Red)
colored(removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"{}: 'permissions'\n{}: {removing}\n{}: {updating}\n{}: {adding}",
muted("field"),
muted("removing"),
muted("updating"),
muted("adding"),
))
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
@@ -326,6 +421,22 @@ pub async fn get_updates_for_execution(
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
.retain(|p| p.level > PermissionLevel::None);
user_group.permissions = expand_user_group_permissions(
user_group.permissions,
all_resources,
)
.await
.with_context(|| {
format!(
"failed to expand user group {} permissions",
user_group.name
)
})?;
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
@@ -352,6 +463,7 @@ pub async fn get_updates_for_execution(
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
@@ -419,6 +531,13 @@ pub async fn get_updates_for_execution(
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = all_resources
.stacks
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
@@ -430,19 +549,55 @@ pub async fn get_updates_for_execution(
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
// Extend permissions with any existing that have no target in incoming
let to_remove = original_permissions
.iter()
.filter(|permission| {
!user_group
.permissions
.iter()
.any(|p| p.target == permission.target)
})
.map(|permission| PermissionToml {
target: permission.target.clone(),
level: PermissionLevel::None,
})
.collect::<Vec<_>>();
user_group.permissions.extend(to_remove);
// remove any permissions that already exist on original
user_group.permissions.retain(|permission| {
let Some(level) = original_permissions
.iter()
.find(|p| p.target == permission.target)
.map(|p| p.level)
else {
// not in original, keep it
return true;
};
// keep it if level doesn't match
level != permission.level
});
// only push update after diff detected
if update_users
|| !all_diff.is_empty()
|| !user_group.permissions.is_empty()
{
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
all_diff: all_diff
.into_iter()
.map(|(k, (_, v))| (k, v))
.collect(),
});
}
}
@@ -516,6 +671,13 @@ pub async fn run_updates(
&mut has_error,
)
.await;
run_update_all(
user_group.name.clone(),
user_group.all,
&mut log,
&mut has_error,
)
.await;
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -529,7 +691,7 @@ pub async fn run_updates(
for UpdateItem {
user_group,
update_users,
update_permissions,
all_diff,
} in to_update
{
if update_users {
@@ -541,7 +703,16 @@ pub async fn run_updates(
)
.await;
}
if update_permissions {
if !all_diff.is_empty() {
run_update_all(
user_group.name.clone(),
all_diff,
&mut log,
&mut has_error,
)
.await;
}
if !user_group.permissions.is_empty() {
run_update_permissions(
user_group.name,
user_group.permissions,
@@ -616,6 +787,41 @@ async fn set_users(
}
}
async fn run_update_all(
user_group: String,
all_diff: HashMap<ResourceTargetVariant, PermissionLevel>,
log: &mut String,
has_error: &mut bool,
) {
for (resource_type, permission) in all_diff {
if let Err(e) = State
.resolve(
UpdatePermissionOnResourceType {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_type,
permission,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set base permissions on {resource_type} in group {} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' base permissions on {resource_type}",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
@@ -636,17 +842,199 @@ async fn run_update_permissions(
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set permssion in group {} | target: {target:?} | {e:#}",
"\n{}: failed to set permission in group {} | target: {target:?} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' permissions",
"\n{}: {} user group '{}' permissions | {}: {target:?} | {}: {level}",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
bold(&user_group),
muted("target"),
muted("level")
))
}
}
}
/// Expands any regex defined targets into the full list
async fn expand_user_group_permissions(
permissions: Vec<PermissionToml>,
all_resources: &AllResourcesById,
) -> anyhow::Result<Vec<PermissionToml>> {
let mut expanded =
Vec::<PermissionToml>::with_capacity(permissions.capacity());
for permission in permissions {
let (variant, id) = permission.target.extract_variant_id();
if id.is_empty() {
continue;
}
if id.starts_with('\\') && id.ends_with('\\') {
let inner = &id[1..(id.len() - 1)];
let regex = Regex::new(inner)
.with_context(|| format!("invalid regex. got: {inner}"))?;
match variant {
ResourceTargetVariant::Build => {
let permissions = all_resources
.builds
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Build(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Builder => {
let permissions = all_resources
.builders
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Builder(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Deployment => {
let permissions = all_resources
.deployments
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Deployment(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Server => {
let permissions = all_resources
.servers
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Server(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Repo => {
let permissions = all_resources
.repos
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Repo(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Alerter => {
let permissions = all_resources
.alerters
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Alerter(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Procedure => {
let permissions = all_resources
.procedures
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Procedure(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ServerTemplate => {
let permissions = all_resources
.templates
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ServerTemplate(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ResourceSync => {
let permissions = all_resources
.syncs
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::ResourceSync(
resource.name.clone(),
),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::Stack => {
let permissions = all_resources
.stacks
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Stack(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::System => {}
}
} else {
// No regex
expanded.push(permission);
}
}
Ok(expanded)
}
type AllDiff =
HashMap<ResourceTargetVariant, (PermissionLevel, PermissionLevel)>;
/// diffs user_group.all
fn diff_group_all(
original: &HashMap<ResourceTargetVariant, PermissionLevel>,
incoming: &HashMap<ResourceTargetVariant, PermissionLevel>,
) -> AllDiff {
let mut to_update = HashMap::new();
// need to compare both forward and backward because either hashmap could be sparse.
// forward direction
for (variant, level) in incoming {
let original_level = original.get(variant).unwrap_or_default();
if level == original_level {
continue;
}
to_update.insert(*variant, (*original_level, *level));
}
// backward direction
for (variant, level) in original {
let incoming_level = incoming.get(variant).unwrap_or_default();
if level == incoming_level {
continue;
}
to_update.insert(*variant, (*level, *incoming_level));
}
to_update
}

View File

@@ -7,6 +7,7 @@ use monitor_client::entities::{
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{ResourceTarget, Update, UpdateListItem},
user::User,
@@ -45,7 +46,7 @@ pub async fn add_update(
update.id = db_client()
.await
.updates
.insert_one(&update, None)
.insert_one(&update)
.await
.context("failed to insert update into db")?
.inserted_id
@@ -58,11 +59,28 @@ pub async fn add_update(
Ok(id)
}
#[instrument(level = "debug")]
pub async fn add_update_without_send(
update: &Update,
) -> anyhow::Result<String> {
let id = db_client()
.await
.updates
.insert_one(update)
.await
.context("failed to insert update into db")?
.inserted_id
.as_object_id()
.context("inserted_id is not object id")?
.to_string();
Ok(id)
}
#[instrument(level = "debug")]
pub async fn update_update(update: Update) -> anyhow::Result<()> {
update_one_by_id(&db_client().await.updates, &update.id, mungos::update::Update::Set(to_document(&update)?), None)
.await
.context("failed to update the update on db. the update build process was deleted")?;
.await
.context("failed to update the update on db. the update build process was deleted")?;
let update = update_list_item(update).await?;
let _ = send_update(update).await;
Ok(())
@@ -110,6 +128,12 @@ pub async fn init_execution_update(
) -> anyhow::Result<Update> {
let (operation, target) = match &request {
// Server
ExecuteRequest::StopAllContainers(data) => (
Operation::StopAllContainers,
ResourceTarget::Server(
resource::get::<Server>(&data.server).await?.id,
),
),
ExecuteRequest::PruneContainers(data) => (
Operation::PruneImages,
ResourceTarget::Server(
@@ -128,12 +152,6 @@ pub async fn init_execution_update(
resource::get::<Server>(&data.server).await?.id,
),
),
ExecuteRequest::StopAllContainers(data) => (
Operation::StopAllContainers,
ResourceTarget::Server(
resource::get::<Server>(&data.server).await?.id,
),
),
// Deployment
ExecuteRequest::Deploy(data) => (
@@ -148,6 +166,24 @@ pub async fn init_execution_update(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::RestartContainer(data) => (
Operation::RestartContainer,
ResourceTarget::Deployment(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::PauseContainer(data) => (
Operation::PauseContainer,
ResourceTarget::Deployment(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::UnpauseContainer(data) => (
Operation::UnpauseContainer,
ResourceTarget::Deployment(
resource::get::<Deployment>(&data.deployment).await?.id,
),
),
ExecuteRequest::StopContainer(data) => (
Operation::StopContainer,
ResourceTarget::Deployment(
@@ -188,6 +224,18 @@ pub async fn init_execution_update(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::BuildRepo(data) => (
Operation::BuildRepo,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
ExecuteRequest::CancelRepoBuild(data) => (
Operation::CancelRepoBuild,
ResourceTarget::Repo(
resource::get::<Repo>(&data.repo).await?.id,
),
),
// Procedure
ExecuteRequest::RunProcedure(data) => (
@@ -214,9 +262,74 @@ pub async fn init_execution_update(
resource::get::<ResourceSync>(&data.sync).await?.id,
),
),
// Stack
ExecuteRequest::DeployStack(data) => (
Operation::DeployStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
Operation::StartStackService
} else {
Operation::StartStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::RestartStack(data) => (
if data.service.is_some() {
Operation::RestartStackService
} else {
Operation::RestartStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::PauseStack(data) => (
if data.service.is_some() {
Operation::PauseStackService
} else {
Operation::PauseStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::UnpauseStack(data) => (
if data.service.is_some() {
Operation::UnpauseStackService
} else {
Operation::UnpauseStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::StopStack(data) => (
if data.service.is_some() {
Operation::StopStackService
} else {
Operation::StopStack
},
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::DestroyStack(data) => (
Operation::DestroyStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
};
let mut update = make_update(target, operation, user);
update.in_progress();
update.id = add_update(update.clone()).await?;
// Don't actually send it here, let the handlers send it after they can set action state.
update.id = add_update_without_send(&update).await?;
Ok(update)
}

Some files were not shown because too many files have changed in this diff Show More