Compare commits

..

663 Commits

Author SHA1 Message Date
Maxwell Becker
f349cdf50d 1.14.1 (#69)
* 1.14.1

* 1.14.1 version

* repo pull use configured repo path

* don't show UI defined file if using Stack files on host mode

* Stack "run build" option

* note on bind mounts

* improve bind mount doc

* add links to schema

* add new stacks configs UI

* interp into stack build_extra_args

* add links UI
2024-09-10 08:17:53 -07:00
mbecker20
796bcac952 no business edition docs 2024-09-07 18:50:18 +03:00
mbecker20
fed05684aa no business edition 2024-09-07 18:48:58 +03:00
mbecker20
80a91584a8 version number link to releases 2024-09-07 12:54:54 +03:00
mbecker20
12d05e9a25 1.14.0 stable 2024-09-07 12:51:56 +03:00
mbecker20
f4d06c91ff add 5s log polling option 2024-09-07 12:42:35 +03:00
mbecker20
5d7449529f fix Builder schema link 2024-09-05 01:40:54 +03:00
mbecker20
a0021d1785 obfuscate secret variable value with * 2024-09-04 17:15:59 +03:00
mbecker20
bbd23e3f5f improve login failure feedback 2024-09-04 17:15:47 +03:00
mbecker20
71841a8e41 update komodo CLI readme 2024-09-04 16:48:27 +03:00
mbecker20
5228ffd9b8 fix changelog 2024-09-02 12:54:56 +03:00
mbecker20
a06f506e54 installer log 2024-09-02 03:13:58 +03:00
mbecker20
71d6a55e50 modified installer 2024-09-02 03:12:17 +03:00
mbecker20
d16c03dd2a fix force service file install 2024-09-02 03:09:14 +03:00
mbecker20
6abd9a6554 publish rc1 crates 2024-09-02 02:06:02 +03:00
mbecker20
5f04e881a5 fix doc link 2024-09-02 01:45:39 +03:00
Maxwell Becker
5fc0a87dea 1.14 - Rename to Komodo - Docker Management (#56)
* setup network page

* add Network, Image, Container

* Docker ListItems and Inspects

* frontend build

* dev0

* network info working

* fix cargo lock

* dev1

* pages for the things

* implement Active in dashboard

* RunBuild update trigger list refresh

* rename deployment executions to StartDeployment etc

* add server level container control

* dev2

* add Config field to Image

* can get image labels from Config.Labels

* mount container page

* server show resource count

* add GetContainerLog api

* add _AllContainers api

* dev3

* move ResourceTarget to entities mod

* GetResourceMatchingContainer api

* connect container to resource

* dev4 add volume names to container list items

* ts types

* volume / image / network unused management

* add image history to image page

* fix PruneContainers incorret Operation

* update cache for server for server after server actions

* dev5

* add singapore to Hetzner

* implement delete single network / image / volume api

* dev6

* include "in use" on Docker Lists

* add docker resource delete buttons

* is nice

* fix volume all in use

* remove google font dependency

* use host networking in test compose

* implement Secret Variables (hidden in logs)

* remove unneeded borrow

* interpolate variables / secrets into extra args / onclone / onpull / command etc

* validate empty strings before SelectItem

* rename everything to Komodo

* rename workspace to komodo

* rc1
2024-09-01 15:38:40 -07:00
mbecker20
2463ed3879 1.13.4 Fix periphery image registry login / pull ordering 2024-08-20 12:50:07 -04:00
mbecker20
a2758ce6f4 Stack: login to registry BEFORE pulling image 2024-08-20 12:49:19 -04:00
mbecker20
3f1788dbbb docsite: reindent ports in example 2024-08-19 11:36:17 -04:00
mbecker20
33a0560af6 frontend: Ignore Services correct content hidden logic 2024-08-19 00:44:48 -04:00
mbecker20
610a10c488 frontend: show state when using Stack files-on-host 2024-08-18 21:16:53 -04:00
mbecker20
39b217687d 1.13.3 filter Periphery disks 2024-08-18 18:12:47 -04:00
mbecker20
2f73461979 stack don't show Git Repo config is file contents defined in UI 2024-08-18 18:06:12 -04:00
mbecker20
aae9bb9e51 move image registry to the top of Build Image config 2024-08-18 18:06:12 -04:00
mbecker20
7d011d93fa Fix: Periphery should flter out "overlay" volumes, not include them 2024-08-18 18:05:56 -04:00
mbecker20
bffdea4357 Swarm roadmap 2024-08-18 16:05:58 -04:00
mbecker20
790566bf79 add note about podman support 2024-08-18 13:37:26 -04:00
mbecker20
b17db93f13 quick-fix: fix build version config 2024-08-18 04:57:25 -04:00
mbecker20
daa2ea9361 better build version management 2024-08-18 04:24:22 -04:00
mbecker20
176fb04707 add docs about running periphery in contianer 2024-08-18 04:04:03 -04:00
mbecker20
5ba1254cdb add docker compose docs 2024-08-18 03:30:53 -04:00
Maxwell Becker
43593162b0 1.13.2 local compose (#36)
* stack config files_on_host

* refresh stack cache not blocked when using files_on_host

* add remote errors status

* improve info tab

* store the full path in ComposeContents
2024-08-18 00:04:47 -07:00
Maxwell Becker
418f359492 1.13.2 (#35)
* 1.13.2 add periphery disk report whitelist / blacklist

* improve setup docs

* publish 1.13.2 client
2024-08-17 15:28:01 -07:00
mbecker20
3cded60166 close any open alerts on non-existant disk mounts 2024-08-17 18:04:11 -04:00
mbecker20
6f70f9acb0 no need to try to parse deploy stuff, only can cause failure 2024-08-17 17:38:14 -04:00
mbecker20
6e1064e58e auto enable ensured server 2024-08-17 14:28:38 -04:00
mbecker20
d96e5b4c46 comment out environment so parsing doesn't fail 2024-08-17 14:26:10 -04:00
mbecker20
5a8822c7d2 add note about arm periphery 2024-08-17 14:23:21 -04:00
Maxwell Becker
1f2d236228 Dockerized periphery (#34)
* Add webhooks page to docs

* supports

* supports

* periphery Dockerfile

* add comments. Remove unneeded default config

* add FILE SYSTEM log

* remove log

* filter disks included in periphery disk report, on periphery side

* dockerized periphery

* all in one compose file docs

* remove some unused deps
2024-08-17 00:25:42 -07:00
mbecker20
a89bd4a36d deleting ResourceSync cleans up open alerts
use .then in alert cleanup
2024-08-16 12:27:50 -04:00
mbecker20
0b40dff72b add images, volumes to 1.14 roadmap 2024-08-16 11:54:24 -04:00
mbecker20
59874f0a92 temp downgrade tower due to build issue 2024-08-16 02:35:23 -04:00
mbecker20
14e459b32e debug on RefreshCache tasks 2024-08-16 02:08:30 -04:00
mbecker20
f6c55b7be1 default log config include opentelemetry service name 2024-08-16 02:05:48 -04:00
mbecker20
460819a145 hetzner pass enable ipv4/v6 explicitly 2024-08-16 01:42:28 -04:00
mbecker20
91f4df8ac2 update logging deps 2024-08-16 01:15:53 -04:00
mbecker20
6a19e18539 pass disabled to stack TextInput area 2024-08-15 04:17:15 -04:00
mbecker20
30c5fa3569 add gap to commit hover 2024-08-15 03:56:03 -04:00
mbecker20
4b6aa1d73d stack services use new StateBadge 2024-08-15 03:32:30 -04:00
mbecker20
5dfd007580 fmt 2024-08-15 03:24:52 -04:00
mbecker20
955670d979 improve Stack ingore_services info tip 2024-08-15 03:24:49 -04:00
mbecker20
f70e359f14 avoid warn log on refresh repo task if no repo configured 2024-08-15 03:24:27 -04:00
mbecker20
a2b0981f76 filter out any disks with mount path at /var/lib/docker/volumes. Filter out user defined ones on server 2024-08-15 02:36:53 -04:00
mbecker20
49a8e581bf move Stack post_create stuff into trait for reusability 2024-08-15 02:32:38 -04:00
mbecker20
2d0c1724db fix too many recent cards 2024-08-12 18:38:44 -07:00
mbecker20
20ae1c22d7 config example links correctly 2024-08-12 15:03:02 -07:00
mbecker20
e8d75b2a3d improve docsite resource page 2024-08-12 14:59:10 -07:00
mbecker20
e23d68f86a update logs will convert ansi colors to html 2024-08-12 13:59:56 -07:00
mbecker20
2111976450 Join the Discord 2024-08-12 02:10:19 -07:00
mbecker20
8a0109522b fix remove recents on delete 2024-08-12 00:33:08 -07:00
mbecker20
8d75fa3f2f provide custom webhook secret to all resources which take webhooks 2024-08-11 22:05:26 -07:00
mbecker20
197e938346 stack ignore_services config hidden 2024-08-11 20:18:28 -07:00
mbecker20
6ba0184551 add ignore_services to stack 2024-08-11 20:00:20 -07:00
mbecker20
c456b67018 ListStackServices refetch interval 2024-08-11 19:28:05 -07:00
mbecker20
02e152af4d clean up config struct 2024-08-11 19:16:44 -07:00
mbecker20
392e691f92 add repo build webhook 2024-08-11 18:26:51 -07:00
mbecker20
495e208ccd add building in repo busy check 2024-08-11 17:47:10 -07:00
mbecker20
14474adb90 add building state to repo 2024-08-11 17:46:26 -07:00
mbecker20
896784e2e3 fix repo action UI responsiveness 2024-08-11 17:38:15 -07:00
mbecker20
2e690bce24 repo table just show repo and branch 2024-08-11 17:36:00 -07:00
mbecker20
7172d24512 add message if fail to remove_dir_all after compose deploy 2024-08-11 17:21:19 -07:00
mbecker20
b754c89118 validate config makes sure ids not empty 2024-08-11 17:09:53 -07:00
Maxwell Becker
31a23dfe2d v1.13.1 improve stack edge cases, and UI action responsiveness (#26)
* get stack state from project

* move custom image name / tag below image setting for build config

* services also trigger stack action state

* add status to stack page

* 1.13.1 patch
2024-08-11 17:01:09 -07:00
mbecker20
b0f80cafc3 improve action responsiveness by improving when update is sent out rel to action state set 2024-08-11 14:59:34 -07:00
mbecker20
85a16f6c6f ensure run directory is normalized before create dir all 2024-08-11 14:14:17 -07:00
mbecker20
29a7e4c27b add link to builder in build info 2024-08-11 13:24:29 -07:00
mbecker20
a73b572725 improve dashboard responsiveness 2024-08-11 12:07:14 -07:00
mbecker20
aa44bf04e8 validate repo builder id in diff (new field) 2024-08-11 05:06:17 -07:00
mbecker20
93348621c5 replace repo builder_id with name for toml export 2024-08-11 05:00:34 -07:00
mbecker20
4b2139ede2 docker compose ls --all 2024-08-11 04:56:28 -07:00
mbecker20
3251216be7 update server address config placeholder 2024-08-11 03:35:34 -07:00
mbecker20
1f980a45e8 fix compose example file to reference monitor-mongo 2024-08-11 03:34:19 -07:00
mbecker20
94da1dce99 fill out Procedure execute types 2024-08-11 02:38:15 -07:00
mbecker20
d4fc015494 cli don't panic of no HOME env var 2024-08-11 02:26:18 -07:00
mbecker20
5800fc91d2 repo don't show build button if builder not attached 2024-08-11 02:12:39 -07:00
mbecker20
91785e1e8f monitor_cli install instructions 2024-08-11 01:56:56 -07:00
mbecker20
41fccdb16e demo link in readme 2024-08-10 23:58:42 -07:00
mbecker20
78cf93da8a improve dashboard recents responsiveness 2024-08-10 23:52:24 -07:00
mbecker20
ea36549dbe fix stack service routing page - works with updates 2024-08-10 23:01:31 -07:00
mbecker20
a319095869 improve readme 2024-08-10 16:02:29 -07:00
mbecker20
a6d7a80cbc capitalize Monitor Docs 2024-08-10 15:33:17 -07:00
mbecker20
20f051c890 readme 2024-08-10 15:32:22 -07:00
mbecker20
2fef954ad5 add link to demo docs 2024-08-10 15:30:54 -07:00
mbecker20
e1b9367ee3 remove attempt at parsing json out of config 2024-08-10 14:27:58 -07:00
mbecker20
c7717fbfdf disable create variable for non admin 2024-08-10 13:59:42 -07:00
mbecker20
bf918042c3 point to right mongo 2024-08-10 13:33:09 -07:00
mbecker20
46ac16100d fix example compose "depends on" 2024-08-10 12:55:31 -07:00
mbecker20
eca0378c56 apply spellcheck={false} 2024-08-10 12:50:21 -07:00
mbecker20
bfd5c5390d fix use before init loop 2024-08-10 12:18:07 -07:00
mbecker20
db41878278 guard against running on_clone / on_pull on core 2024-08-10 11:56:18 -07:00
mbecker20
26468ed8ea add infos for stack repo branch commit 2024-08-10 11:49:23 -07:00
mbecker20
707751708d update to latest rust docker base 2024-08-10 11:31:52 -07:00
mbecker20
d28d3422a3 disable server builder cancel in UI 2024-08-10 11:04:51 -07:00
mbecker20
9e2b1ede93 tweak layout: items-center 2024-08-10 10:22:03 -07:00
mbecker20
37e37deb04 clarify what "sync resources" means 2024-08-10 09:42:42 -07:00
mbecker20
e73a6ca72c fix docsite link error 2024-08-10 09:39:54 -07:00
mbecker20
6082b7b1bd update client version in toml 2024-08-10 09:35:20 -07:00
Maxwell Becker
678767c24b [v1.13 - Komodo] Docker compose support with the Stack resource (#24) Co-authored with @karamvirsingh98
* add some network stuff to container summary

* improve settings tables UI

* periphery build supports additional tags

* fix variable container sizing

* alert types newline wrap

* plumbing for Stack resource

* plumbing for Stack resource

* mount stack api

* stack resource sync

* get remote compose file

* support image_name and image_tag

* add server config placeholders. default server config address

* configure image name and image tag

* deployment work with build image_name and image_tag

* stack UI

* fe builds

* configure registry provider and account

* implement periphery stack api

* stack poll interval

* add UI provider management

* deploy stacks

* build push commit hash tag.

* Destroy stack

* update default core port to 9120

* remove git_account alias

* finish stack (and container) api

* frontend builds

* cant cancel server based builds

* fix

* use git pull -f

* 9120

* start UI updates (#15)

* fix  From<Stack> for CloneArgs

* remove unused imports

* UI Updates (#16)

* cleanup dashboard charts for resources

* bring back solid scrollbars

* enable sidebar scrolling

* remove alerts from all resources

* pass jwt secret

* stacks dont delete the target

* parse services from yaml

* stacks deploy

* close

* looking good

* closer

* destroy stack when file missing. onboard stacks

* figure out stack container name matching

* get stack state correct

* work with service views

* UI Updates - Sidebar, Topbar Alerts, and All Resources page (#17)

* move sidebar to use fixed positioning instead of sticky

* add alert details dialog to topbar alerts

* cleanup all resources page layout

* ensure resource links don't propagate clicks

* periphery support passing env with --env-file

* StackServicePage

* default run_directory to ./ for clarify

* add stack webhook listeners

* add default compose name of stack name

* stacks controlled with project name

* migrate to dotenvy

* add stack to dashboard

* remove deploying / destroying stack services

* update config files

* fix getting service logs

* git / docker provider management api

* implement passing git / registry token from db

* rename system user Github to Git Webhook

* seperate deployed and latest services on stack info

* add stack service level operations

* UI Updates - Update Shadcn/UI components, prevent navbar menu layout shift (#20)

* add dashboard pie for resource syncs

* dashboard items same height

* update shadcn components

* ensure centered following sheet update

* cleanup layout, prevent navbar menu layout shifts

* add manual filter, fix toast call

* guard webhooks

* remove deployed_message, latest_message from StackListItemInfo

* stop all containers on server correctly

* support multiple compose files

* cache all containers networks images projects

* remove project missing from db cache

* work on sync deploy stuff

* rework deployment sync deploy to support stacks. they can depend on each other.

* UI Updates - Remove topbar transparency, pretty status badges, tidy resource page layout with a 'back' button (#21)

* remove topbar transparency

* cleanup unused

* responsive dashboard

* better mobile header

* dont need to calc 64px less since header is using position fixed

* add status badge component

* update status badges

* further simplify layout

* allow undefined status as prop

* use new status badges for alerts

* update status badges for all resources

* undo layout change

* tidy up resource page layout, add back button

* no need for button wrapper

* remove unused

* build cancel log

* update ts types

* fix fe type changes

* fe tweaks

* remove on build logs

* core refresh cache immediately on startup

* jwt_ttl

* canonicalize run directory on host

* update canonicalize error message

* core use docker-compose

* fix incorrect project missing, add status string to stack info

* remove entries in "after" that aren't deploying

* fix dockerfiel

* build custom tag postfix

* sync fixes

* ensure UpdateGitProviderAccount doesn't change id

* ensure UpdateDockerRegistryAccount doesn't change id

*  configure providers in the UI

* add // comment support to env, conversions

* add updates for provider deletes

* improve sync pending deploy log

* add more deployment actions

* add backward compat with v1.12 for clone repo

* stack deploy format

* fe

* alert menus clone when click resource link

* rename stacks

* don't close on click

* snake case stack state, in line with deployment state

* sync redeploy stack if newer hash (optional behind resource field 'latest_hash')

* remove nav to tree

* RefreshStack/Sync debug instruments

* improve inline UI docs

* implement resource base_permission backend

* plumbing for Repo build

* build repos

* write env file repos

* add latest hash / message to build info

* add optional hash to update

* keep built_hash updated

* add backend for build / repo latest hash management

* remove unused resources

* clean up repo dirs after cache update

* fix repo info deser error

* add build / repo git status

* fix page layouts

* improve layout responsive

* most config incline docs

* add descriptions for all resource types

* default local auth false

* fix omnibar arrow keys issue

* add compose file to example config

* image registry

* dashboard display no resources messge

* update deps.

* show when no config

* resource sync use config git_provider

* fix networks

* fix deploy error due to after

* update lots of docs

* fix server stat charts not working

* update screenshots

* update changelog

* add a disclaimer

* remove file paths docs stuff

* build repo

* v1.13 - Komodo

* update docs for cli

* fill out the compose example more

---------

Co-authored-by: Karamvir Singh <67458484+karamvirsingh98@users.noreply.github.com>
2024-08-10 09:33:14 -07:00
mbecker20
59cb86d599 serde default on token re Issue 10 2024-08-02 11:13:31 -07:00
mbecker20
5f0a9ad652 remove env vars / conversions / labels # comment support 2024-07-31 13:04:57 -07:00
mbecker20
fc758121da note on login if no auth methods configured 2024-07-31 12:56:59 -07:00
mbecker20
95ccf1af0b reset version on copy 2024-07-31 05:03:33 -07:00
mbecker20
627f7ab585 detect aarch64 periphery install 2024-07-31 04:58:12 -07:00
mbecker20
4238abf61a fix resource sync delete operation 2024-07-31 02:19:24 -07:00
mbecker20
66bfe69983 add note about user periphery install 2024-07-31 00:14:19 -07:00
mbecker20
42b493ae10 host network in example 2024-07-30 15:56:01 -07:00
mbecker20
f4d6c50b67 ensure core config startup log redacted 2024-07-30 14:43:15 -07:00
mbecker20
17176a7d56 add note about upgrading periphery 2024-07-30 14:20:15 -07:00
mbecker20
140b95b70c skip secret interp respected for core secrets 2024-07-30 00:18:28 -07:00
mbecker20
3a2cb73088 improve git https config look 2024-07-29 20:51:55 -07:00
mbecker20
4585533bc5 migration optional env vars 2024-07-29 20:28:57 -07:00
mbecker20
83099f03a1 changelog 2024-07-29 19:36:48 -07:00
mbecker20
9e619c0250 add sync screenshots 2024-07-29 19:34:10 -07:00
mbecker20
edf49dc685 update resource syncs 2024-07-29 19:30:28 -07:00
mbecker20
beffc8c159 consistent dockerfile 2024-07-29 19:18:14 -07:00
mbecker20
d99cf87da0 update client to 1.12 2024-07-29 18:36:42 -07:00
mbecker20
8e19eb7b0f versions 2024-07-29 18:33:14 -07:00
mbecker20
78a0b56c73 migrator readme 2024-07-29 18:32:11 -07:00
mbecker20
bf5dc52237 fix upgrades docs 2024-07-29 18:29:49 -07:00
mbecker20
482ea59d4c add docsite upgrades 2024-07-29 18:28:12 -07:00
Maxwell Becker
7740d36f49 v1.12 Custom Git Providers / Docker Registries (#8)
* update deps

* remove patch when 0 for deployments using specific build version

* implement custom git provider and image registry support

* common providers api

* toml array alias

* username alias account

* get fe to build

* http or https

* fix frontend build

* improve registry / provider config

* frontend build

* rework deployment / builds image registry

* frontend builds

* update build config fe

* configure builder additional accounts / secrets

* guard against managing non-github repo webhooks

* fmt

* md size dashboard

* lowercase organization in image name

* update config docs

* update example env

* provider configuration

* distribute migrator

* fix casing mismatch

* docs
2024-07-29 18:23:58 -07:00
mbecker20
820754deda roadmap 2024-07-24 00:11:58 -07:00
mbecker20
4219884198 roadmapx 2024-07-24 00:11:08 -07:00
mbecker20
d9e24cc35a add roadmap 2024-07-24 00:10:32 -07:00
mbecker20
8d2ce884d9 1.11.1 updated hetzner instances 2024-07-20 02:49:38 -07:00
mbecker20
313b000e64 update hetzner server types 2024-07-20 01:16:52 -07:00
mbecker20
c2f9e29605 close failed procedure execution updates 2024-07-19 23:21:21 -07:00
Maxwell Becker
8c6f38cafb v1.11 Improve permission management (#6)
* add "all permissions" feature on user and user group schema

* prepare support for group all

* implement user.all and user_group.all for broad base permissioning

* clean up unused deps

* sync support user group permissions regex

* 1.11

* fix fe ? issue

* this doesn't work

* sync handle user group all set

* retain above non earlier

* remove permissions that already exist

* update docs

* add user group docs

* minimize user group permissions for execute

* sync toml

* add sync name to slack alert title

* add syncs to alerter white/blacklist

* use \\ instead of $reg

* share resource type base permissions api users and user groups

* manage user / group base permissions ui

* manage user / group base resource type permissions

* update api permission handling

* manage all resource permissions in table

* user show group membership

* update client to 1.11
2024-07-19 02:11:36 -07:00
mbecker20
4a03eba99a granular invalidations 2024-07-17 14:51:51 -07:00
mbecker20
79fe078e3b 1.10.5 cpu/mem only update alert if severity increases (or resolved) 2024-07-17 14:36:22 -07:00
mbecker20
6be032fcd4 update client to 1.10.4 2024-07-16 16:06:38 -07:00
mbecker20
d0c94278ec 1.10.4 fix EnvVar parsing when value contains '=' 2024-07-16 16:05:11 -07:00
mbecker20
03ae7268fd fix server table search when sorting by deployments 2024-07-10 12:09:42 -07:00
mbecker20
f443294818 add clear link to api docs 2024-07-10 02:33:14 -07:00
mbecker20
2202835d86 improve core setup docs 2024-07-10 02:26:58 -07:00
mbecker20
98fbc7a506 improve migrator and add Dockerfile 2024-07-10 02:25:44 -07:00
mbecker20
8ee89296e1 frontend only invalidate on update Complete 2024-07-09 13:50:03 -07:00
mbecker20
989c3d2d01 more compact webhook button labels 2024-07-09 02:26:50 -07:00
mbecker20
dc72883b90 update config example 2024-07-09 02:09:17 -07:00
mbecker20
e99364430f update local client version 2024-07-09 02:06:30 -07:00
mbecker20
e106e38cd9 1.10.3 support multiple github webhook app installations 2024-07-09 02:05:38 -07:00
mbecker20
e4d0c56e49 debug git logs 2024-07-09 00:50:24 -07:00
mbecker20
7427a158f4 full err too large for alert 2024-07-09 00:40:11 -07:00
mbecker20
b926f89954 log on build unsuccessful and alerting 2024-07-09 00:20:03 -07:00
mbecker20
e666a22f08 debug instrument git calls 2024-07-09 00:09:06 -07:00
mbecker20
4107f779a5 fix build increment major version 2024-07-08 13:15:52 -07:00
mbecker20
828d6cdfed improve responsive 2024-07-05 20:19:20 -07:00
mbecker20
fe82400a99 1.10.2 ResourceSync manage repo webhooks 2024-07-05 20:02:20 -07:00
mbecker20
e37fc6adde publish 1.10.1 2024-07-05 03:32:24 -07:00
mbecker20
c21c8f99ae manage webhooks working 2024-07-05 03:29:23 -07:00
mbecker20
78a63f92bb build repo webhook management 2024-07-05 03:17:29 -07:00
mbecker20
ce67655021 core info provide owners 2024-07-05 02:26:18 -07:00
mbecker20
2ccecf38f2 default pk path /github/private-key.pem 2024-07-05 02:15:35 -07:00
mbecker20
1ddae31aad update config example 2024-07-05 02:06:27 -07:00
mbecker20
097fbefa63 1.10.1 2024-07-05 02:02:59 -07:00
mbecker20
b51442a661 ts types 2024-07-05 02:02:25 -07:00
mbecker20
a21d49d224 build / repo webhook write api 2024-07-05 02:02:03 -07:00
mbecker20
c99a33880e Create / Delete webhook api 2024-07-05 01:31:15 -07:00
mbecker20
6ee55262ba webhook management api aware if repo can be managed 2024-07-05 01:18:21 -07:00
mbecker20
878b9b55bb see whether webhooks enabled 2024-07-05 01:05:27 -07:00
mbecker20
af6193f83a update async_timing_util 2024-07-04 21:15:38 -07:00
mbecker20
b8fefddd8b EC2 2024-07-04 19:13:49 -07:00
mbecker20
7f490f5bf2 tweak 2024-07-04 19:12:02 -07:00
mbecker20
efa7c13286 docs 2024-07-04 19:08:48 -07:00
mbecker20
f913be7a0b builder setup guide 2024-07-04 19:03:43 -07:00
mbecker20
35901ef7ea actions can wrap 2024-07-04 17:53:24 -07:00
mbecker20
5b938490fc response 2024-07-04 17:29:45 -07:00
mbecker20
a7326a0116 user group toml export replace target ids with names 2024-07-04 17:10:36 -07:00
mbecker20
877bda91d7 improve log responsiveness 2024-07-04 16:49:08 -07:00
mbecker20
439a091e50 improve resource responsive 2024-07-04 16:29:13 -07:00
mbecker20
b0e89f4963 fix dashboard 2024-07-04 15:46:43 -07:00
mbecker20
b1e4b55ba1 more responsive 2024-07-04 14:41:40 -07:00
mbecker20
d4a1891c70 delete user group 2024-07-04 14:17:03 -07:00
mbecker20
9db7592d7e all_resources tables use right search 2024-07-04 01:25:40 -07:00
mbecker20
84fb603951 1.10 2024-07-01 03:18:26 -07:00
mbecker20
55bac0dd13 check right thing for empty 2024-07-01 03:12:22 -07:00
mbecker20
b143f42363 update mungos 2024-07-01 02:47:06 -07:00
mbecker20
007efd136a 1.10.0 pre 2024-07-01 02:38:24 -07:00
mbecker20
b329767f9e 1.10.0-pre-0 2024-07-01 02:33:01 -07:00
mbecker20
b4231957d5 config for secret args 2024-07-01 02:31:53 -07:00
mbecker20
b4dc446f95 interpolate core variables / secrets into build secret_args 2024-07-01 02:27:03 -07:00
mbecker20
c92515cecc combine into router 2024-07-01 01:44:07 -07:00
mbecker20
f3712feea2 finish periphery clean 2024-07-01 01:39:03 -07:00
mbecker20
0e81d17860 shrink periphery implementation 2024-07-01 01:19:25 -07:00
mbecker20
c3f1557b83 fix mem alert 2024-06-30 00:27:37 -07:00
mbecker20
5f88e4b436 seperate webhook actions 2024-06-25 01:22:38 -07:00
mbecker20
473c6b3867 dont send failed build alert on build cancel 2024-06-24 16:59:34 -07:00
mbecker20
c10edaa5d1 fix builder toml export 2024-06-23 03:00:31 -07:00
mbecker20
9418a6d963 update client to 1.9.0 2024-06-23 02:30:50 -07:00
mbecker20
57646b750f clean up 2024-06-23 02:29:47 -07:00
mbecker20
0d57f9411c can deploy ecr 2024-06-23 02:27:19 -07:00
mbecker20
7d396dd539 clean up ecr 2024-06-23 02:22:14 -07:00
mbecker20
bfe762b71a install unzip 2024-06-23 01:37:12 -07:00
mbecker20
16ede84bac install aws cli core 2024-06-23 01:31:15 -07:00
mbecker20
4524db94db get ecr token using cli 2024-06-23 01:23:56 -07:00
mbecker20
580dab4acd improve error log formatting 2024-06-23 01:02:52 -07:00
mbecker20
645382856a update only flattens one level deep 2024-06-22 23:56:01 -07:00
mbecker20
5c4e6a6dbb select aws config 2024-06-22 23:33:35 -07:00
mbecker20
66810e1efb add method to get availabel aws ecr labels 2024-06-22 23:29:02 -07:00
mbecker20
69a84882f0 1.9.0 2024-06-22 23:06:53 -07:00
mbecker20
41648436a5 default periphery method fields 2024-06-22 22:59:51 -07:00
mbecker20
083a88aa7b implement aws ecr image registry 2024-06-22 22:57:26 -07:00
mbecker20
750f95c90d improve shortcut menu 2024-06-22 18:24:38 -07:00
mbecker20
129f3ecd82 add more kb shortcuts and shortcut menu 2024-06-22 02:56:57 -07:00
mbecker20
1b754f80ab fix double emojis 2024-06-22 01:54:45 -07:00
mbecker20
968a882012 fix alerter table 2024-06-22 01:29:31 -07:00
mbecker20
696ebdb26f label blacklist correctly 2024-06-22 01:25:38 -07:00
mbecker20
8fee04607d imporve slack alerting 2024-06-22 01:10:13 -07:00
mbecker20
6fe250244b add alerter blacklist 2024-06-22 00:30:43 -07:00
mbecker20
b530af0eec send_alerts for sync alert 2024-06-21 23:09:38 -07:00
mbecker20
21e9361079 remove unused 2024-06-21 02:28:35 -07:00
mbecker20
524d2d956b fix alerts usage 2024-06-21 02:23:42 -07:00
mbecker20
aca9633941 add links and errors to slack messages 2024-06-21 01:12:46 -07:00
mbecker20
32e1bd2dda add badges for tag filter shortcuts 2024-06-21 00:15:40 -07:00
mbecker20
cb363d1559 add shift + T and shift + C to manage tags 2024-06-20 23:51:12 -07:00
mbecker20
63eb74b9c8 Add and configure build alerts 2024-06-20 23:41:28 -07:00
mbecker20
bbcc27704f bump rust builder version 2024-06-16 16:00:57 -07:00
mbecker20
0aa9513dd0 1.8.0 2024-06-16 15:36:51 -07:00
mbecker20
26b216b478 add resources page 2024-06-16 15:33:31 -07:00
mbecker20
166299bb57 sync docs 2024-06-16 14:35:09 -07:00
mbecker20
03c47eb3dc remove cli sync 2024-06-16 01:41:54 -07:00
mbecker20
1fcb4ad085 move / update changelog 2024-06-16 01:41:15 -07:00
mbecker20
f51af8fbe1 docs 2024-06-16 01:34:08 -07:00
mbecker20
4a975e1b92 update resource sync docs 2024-06-16 01:33:05 -07:00
mbecker20
ba556e3284 fix doc link 2024-06-16 00:31:23 -07:00
mbecker20
299a326942 log build has new version 2024-06-16 00:20:22 -07:00
mbecker20
a5d4b9aefb add cached results reasons 2024-06-16 00:04:05 -07:00
mbecker20
40b820ae42 add reason to deploy logs 2024-06-15 22:01:14 -07:00
mbecker20
7028bf2996 remove termination_signal for tokio signal 2024-06-15 21:48:54 -07:00
mbecker20
75ebd0e6c0 fix fe cancel logic error 2024-06-15 21:36:26 -07:00
mbecker20
426153df66 try improve toml parse error message 2024-06-15 21:33:53 -07:00
mbecker20
5bd423a6a6 sync deploy new build 2024-06-15 21:15:17 -07:00
mbecker20
c24131d383 nested propogate read resources error 2024-06-15 20:37:29 -07:00
mbecker20
9f54b6c26a 1.8.0. improve env config UI, add sync deploy state management 2024-06-15 20:15:33 -07:00
mbecker20
ab8ae51ece slight more colors 2024-06-15 20:14:25 -07:00
mbecker20
ef2a83ff16 add colors to procedure logs 2024-06-15 20:06:34 -07:00
mbecker20
7872771aee clean up sync log 2024-06-15 19:45:53 -07:00
mbecker20
b12cf858d8 sync deploy logs need \n 2024-06-15 19:36:46 -07:00
mbecker20
38dba91c3a sync deploy accounts for any dependencies in 'after' need deploy 2024-06-15 19:20:45 -07:00
mbecker20
ea8136aa57 add sync deployment state log 2024-06-15 17:31:49 -07:00
mbecker20
f956e12e28 move formatting to shared lib 2024-06-15 17:15:05 -07:00
mbecker20
207ea52b95 add finished log 2024-06-15 17:12:02 -07:00
mbecker20
caf28d3a26 sync deploy 2024-06-15 17:03:16 -07:00
mbecker20
8fff45649d implement sync deployment get updates for view with deploy action 2024-06-15 15:50:10 -07:00
mbecker20
de5df70e11 invert search FE 2024-06-15 00:58:03 -07:00
mbecker20
3df010ac2a read req error debug 2024-06-15 00:54:11 -07:00
mbecker20
2d3beb708e invert logs 2024-06-15 00:28:04 -07:00
mbecker20
1dc22d01c4 improve execute instrumentation 2024-06-15 00:20:28 -07:00
mbecker20
eb029d0408 clone repo to specific directory on host 2024-06-14 23:43:47 -07:00
mbecker20
f926932181 build / deployment env variable / secret selectors 2024-06-14 23:28:08 -07:00
mbecker20
cc96d80c6a string deser filter empty lines 2024-06-14 22:20:39 -07:00
mbecker20
144b49495c string deser can handle empty string 2024-06-14 22:15:02 -07:00
mbecker20
de9354bdc7 frontend manage env with string 2024-06-14 22:10:07 -07:00
mbecker20
38bfee84d7 read resources propogate error 2024-06-14 21:53:13 -07:00
mbecker20
ec33d9fb9e trim incoming value env var string, conversion string, before deserialize 2024-06-14 21:42:59 -07:00
mbecker20
0a66937b1d fix unused liniting 2024-06-14 21:30:10 -07:00
mbecker20
43cc0c3bc1 remove @ in format date 2024-06-14 14:48:22 -07:00
mbecker20
c14b395c70 quick copy variable value 2024-06-12 12:15:29 -07:00
mbecker20
7b8529a7c6 tweak colors 2024-06-12 11:55:06 -07:00
mbecker20
547c089581 update colors 2024-06-12 11:53:39 -07:00
mbecker20
4fe5e461b3 use stroke for icons 2024-06-12 03:48:47 -07:00
mbecker20
edfb873f7c improve error logs 2024-06-12 03:22:51 -07:00
mbecker20
5ef5294c44 remove onkeydown causing redundant create 2024-06-12 03:15:07 -07:00
mbecker20
5d3c50e04f reorder procedure config table 2024-06-12 02:47:41 -07:00
mbecker20
f10efbb5ba add bg to body 2024-06-12 02:39:26 -07:00
mbecker20
39ce98161b add the colors, always plz 2024-06-12 02:21:49 -07:00
mbecker20
cff6e79eee fix omnibar all resource types 2024-06-12 01:46:30 -07:00
mbecker20
dedf22ede8 continue on disabled stage 2024-06-12 01:25:10 -07:00
mbecker20
6955b92a99 add same colors in update 2024-06-12 01:15:39 -07:00
mbecker20
5c63eeab02 better sync coloring 2024-06-12 01:13:33 -07:00
mbecker20
4c14a4ae20 create variable log skip description line if it's empty 2024-06-12 00:39:23 -07:00
mbecker20
29fd856a2d deal with deployment build version 2024-06-11 03:07:56 -07:00
mbecker20
195bdbd94a fix " to \" 2024-06-11 02:14:57 -07:00
mbecker20
298ccd945c improve export dialog sizing 2024-06-11 01:42:06 -07:00
mbecker20
436e4e79e9 toml include ResourceSync 2024-06-11 01:09:37 -07:00
mbecker20
8b8c89d976 1.7.3 procedure stage alias 2024-06-11 00:51:16 -07:00
mbecker20
25c8d25636 1.7.2 default resource config parsing 2024-06-11 00:44:41 -07:00
mbecker20
ea242de2e4 default the config if not exists 2024-06-11 00:34:11 -07:00
mbecker20
be03547407 reorder struct fields for improved toml 2024-06-11 00:04:20 -07:00
mbecker20
9c0d28b311 allow inline arrow up to max length 2024-06-10 23:53:23 -07:00
mbecker20
f269deb99c update toml_pretty 2024-06-10 23:30:17 -07:00
mbecker20
3df8163131 improve procedure toml 2024-06-10 23:14:04 -07:00
mbecker20
33a16a9bd2 need 2 \n 2024-06-10 22:36:17 -07:00
mbecker20
215e7d1bdc update toml_pretty 2024-06-10 22:11:40 -07:00
mbecker20
25e0905c0c fix deserializers 2024-06-10 21:31:17 -07:00
mbecker20
1c07ccea85 bump toml for multiline string 2024-06-10 19:26:01 -07:00
mbecker20
405ec1b8cc bump toml_pretty for fix 2024-06-10 18:58:33 -07:00
mbecker20
4f212bd06f update toml_pretty with skip empty strings 2024-06-10 18:43:53 -07:00
mbecker20
074f4ea2db fix toml 2024-06-10 18:07:05 -07:00
mbecker20
c9abccaf02 build use string serialized version 2024-06-10 17:59:03 -07:00
mbecker20
6428fa6de2 1.7.1 2024-06-10 17:37:22 -07:00
mbecker20
883f54431d custom to toml serializer for api 2024-06-10 17:34:56 -07:00
mbecker20
28dc030e2b custom Vec<EnvVar>, Vec<Conversion> deserializers to support config them as string 2024-06-10 14:39:51 -07:00
mbecker20
145d933e63 pt-2 2024-06-10 01:47:46 -07:00
mbecker20
9772ca1a1c add Resource Sync system user 2024-06-10 01:46:26 -07:00
mbecker20
4059b69201 core auto refreshes all syncs every 5 min 2024-06-09 23:49:02 -07:00
mbecker20
8e175ea5a1 add pending sync alert variant 2024-06-09 23:23:40 -07:00
mbecker20
d931b8b4e7 fix deployment when image_type None 2024-06-09 23:15:52 -07:00
mbecker20
0982800ad2 update client to 1.7.0 2024-06-09 22:47:49 -07:00
mbecker20
4382ad0b3b migrate 1.6 to 1.7 2024-06-09 22:46:21 -07:00
mbecker20
e7891f7870 update docs for ghcr 2024-06-09 21:56:01 -07:00
mbecker20
6bada46841 add export variables / user groups 2024-06-09 21:32:53 -07:00
mbecker20
eae6cbd228 label the image 2024-06-09 20:55:09 -07:00
mbecker20
a0ee6180b2 finish 1.7.0 2024-06-09 19:45:46 -07:00
mbecker20
3ce3de8768 configure registry 2024-06-09 19:34:49 -07:00
mbecker20
6c46993b61 New Monitor logo cr. George Weston 2024-06-09 18:38:58 -07:00
mbecker20
fbd9d14aaa change handler loggin 2024-06-09 15:11:18 -07:00
mbecker20
1011ec60ab rename to ghcr 2024-06-09 14:55:26 -07:00
mbecker20
48e17a7c87 update config example 2024-06-09 03:43:26 -07:00
mbecker20
a94baded55 1.7.0 2024-06-09 03:06:17 -07:00
mbecker20
e97c0873cf get types 2024-06-09 03:05:07 -07:00
mbecker20
43a0b76811 small 2024-06-09 03:04:05 -07:00
mbecker20
2d2577e5ee ghcr 2024-06-09 02:46:57 -07:00
mbecker20
202ac77de3 from on the new types 2024-06-09 02:18:40 -07:00
mbecker20
568c963419 core / periphery support ghcr 2024-06-09 02:01:51 -07:00
mbecker20
5c3294241d add 1.6 build schema for 1.7 migration 2024-06-08 15:35:31 -07:00
mbecker20
648a04be88 add sleep execution for procedure 2024-06-08 14:51:19 -07:00
mbecker20
1b5822f649 custom version deserializer. support string versions 2024-06-08 14:23:26 -07:00
mbecker20
c41a008603 fix variable update 2024-06-08 05:22:32 -07:00
mbecker20
603243b0eb need partial default on alerter enabled 2024-06-08 04:56:52 -07:00
mbecker20
d09ab36696 any sync error shows up in log 2024-06-08 04:34:09 -07:00
mbecker20
ad168c87f7 use approp dialog menus 2024-06-08 04:12:55 -07:00
mbecker20
914f4c6197 seems to work 2024-06-08 03:35:22 -07:00
mbecker20
c73d918e18 no unnecessary user group sync 2024-06-08 02:56:53 -07:00
mbecker20
9d116f56cb sort lists by name 2024-06-08 02:21:32 -07:00
mbecker20
8a8dede5db resource sync state 2024-06-08 02:12:04 -07:00
mbecker20
d2cecf316c add pending update alert 2024-06-08 01:39:18 -07:00
mbecker20
cad1ee123e improv the sync 2024-06-08 00:50:30 -07:00
mbecker20
6aa801b705 lock sync dir access 2024-06-07 22:02:58 -07:00
mbecker20
078ba59002 ensure sync directory exist 2024-06-07 21:02:28 -07:00
mbecker20
5eacb7191b fix the fe errors with most boilerplate 2024-06-07 20:00:01 -07:00
mbecker20
45eafd10b9 finish sync backend? 2024-06-07 19:00:03 -07:00
mbecker20
42c486807c implement resource sync cli 2024-06-07 17:11:58 -07:00
mbecker20
8c31fcff02 backend for resource sync 2024-06-07 03:52:07 -07:00
mbecker20
49f1d40ce8 implement RunSync 2024-06-07 02:43:45 -07:00
mbecker20
bf85e886bd abit more 2024-06-06 03:02:25 -07:00
mbecker20
eda0b233ca implement sync 2024-06-06 02:38:47 -07:00
mbecker20
5efb227851 update ts client response 2024-06-05 23:46:05 -07:00
mbecker20
1a45fffe75 move some libraries out 2024-06-05 23:44:06 -07:00
mbecker20
fa72f2e5ef update execute task handling 2024-06-05 22:50:03 -07:00
mbecker20
c9152db300 unneeded import 2024-06-05 22:42:28 -07:00
mbecker20
25fcca7246 should fix procedure 2024-06-05 22:42:04 -07:00
mbecker20
ac449e38d5 init boilerplate 2024-06-05 17:29:59 -07:00
mbecker20
d6c66948ba skip update in execute task instrument 2024-06-05 16:16:21 -07:00
mbecker20
b6af790aef sort resources in selector 2024-06-05 15:39:47 -07:00
mbecker20
36a49210a0 fix filter by split 2024-06-05 15:16:30 -07:00
mbecker20
d2b2aa0550 its not really distributed 2024-06-05 02:41:08 -07:00
mbecker20
7f4c883416 hide / show to toggle alert area 2024-06-05 01:59:16 -07:00
mbecker20
676fb3c732 common filtering method 2024-06-04 05:50:36 -07:00
mbecker20
17da4bd2fa procedure setconfig on update 2024-06-04 04:46:40 -07:00
mbecker20
b44e57bbf6 improve update date format 2024-06-04 04:36:55 -07:00
mbecker20
6aa5b5faae use same search alg for all command inputs 2024-06-04 04:21:24 -07:00
mbecker20
9565855477 fix fe error 2024-06-04 02:23:53 -07:00
mbecker20
3504c083b4 export all resources toml filter resources by tag 2024-06-04 02:11:07 -07:00
mbecker20
5fdaa9a808 make overflowing tags wrap 2024-06-04 01:45:20 -07:00
mbecker20
ec35b14077 further improve BuildState if cancel. 2024-06-04 01:22:41 -07:00
mbecker20
158f3ad89b fmt update operation with regex everywhere 2024-06-03 16:36:53 -07:00
mbecker20
7257ecbaed version link to docs 2024-06-03 16:12:09 -07:00
mbecker20
a2a94f23ee publish client + cli 1.6.2 2024-06-03 15:03:43 -07:00
mbecker20
03cad5b23b partial config from files first merged onto full config default before diff with remote 2024-06-03 15:01:14 -07:00
mbecker20
2460b5edf7 update log internal scroll 2024-06-03 03:10:35 -07:00
mbecker20
83fdb180aa avoid deployment state change alert involving status Unknown 2024-06-03 03:01:23 -07:00
mbecker20
9b1d32ebdf base64 encode aws user data before send 2024-06-03 00:44:45 -07:00
mbecker20
ea4ae7651c readme 2024-06-02 21:19:46 -07:00
mbecker20
5f6fabd925 1.6.1 pass creds as args cli 2024-06-02 21:17:23 -07:00
mbecker20
38d9495ab1 fix cli readme 2024-06-02 21:03:37 -07:00
mbecker20
46ad5b3953 1.6.0 Improve procedure with multiple stages 2024-06-02 21:00:06 -07:00
mbecker20
e60b817208 improve saving 2024-06-02 20:57:44 -07:00
mbecker20
0ce5248292 improve changes made visibility 2024-06-02 20:54:41 -07:00
mbecker20
050c29f4a3 show when changes made 2024-06-02 20:30:06 -07:00
mbecker20
8580728933 alert config working 2024-06-02 20:15:49 -07:00
mbecker20
3c5868d111 alert refactor 2024-06-02 19:15:13 -07:00
mbecker20
40e1b1ff88 improve build cancel disabled logic to prevent redundant cancels 2024-06-02 17:50:33 -07:00
mbecker20
99641b2e39 improve update toast title 2024-06-02 17:42:15 -07:00
mbecker20
f0e7757eb4 improve validate CancelBuild 2024-06-02 17:39:13 -07:00
mbecker20
f7283b1fc1 update alerter to support type filtering. 2024-06-02 17:16:35 -07:00
mbecker20
771af21eae migrator support migrate permissions 2024-06-02 15:35:36 -07:00
mbecker20
0dda791ec7 fix build not try add_update 2024-06-02 04:56:37 -07:00
mbecker20
bc76b1c07e only push recently viewed if exists 2024-06-02 04:43:01 -07:00
mbecker20
8b537924fb correct execution target passed by name 2024-06-02 04:38:32 -07:00
mbecker20
f5ce3570e4 execute api returns update immediately 2024-06-02 04:14:51 -07:00
mbecker20
f1e51d275c move stages up / down 2024-06-02 02:39:48 -07:00
mbecker20
eaa10d96b5 finish new procedure config 2024-06-02 02:06:01 -07:00
mbecker20
037364068d refresh caches on create / update 2024-06-02 01:07:53 -07:00
mbecker20
2441bc8cbf fix lint 2024-06-02 00:44:47 -07:00
mbecker20
92ac003910 backend for updated procedure schema 2024-06-02 00:36:39 -07:00
mbecker20
693f24763f new deployment / repo from server page 2024-06-01 20:33:38 -07:00
mbecker20
d9d44ceee1 update readme with manual 2024-06-01 19:58:20 -07:00
mbecker20
30ab8ed17b update cli with execute features. 2024-06-01 19:47:46 -07:00
mbecker20
2bf2be54cc bookworm base 2024-05-29 13:09:52 -07:00
mbecker20
b7ea680958 alert table rename Target to Resource 2024-05-29 01:48:32 -07:00
mbecker20
2a56d09f89 improve periphery start command docs 2024-05-29 01:40:45 -07:00
mbecker20
2612f742b2 remove trailing whitespace in error log 2024-05-29 00:22:04 -07:00
mbecker20
29bdf5c71d pretty clone fail message 2024-05-29 00:20:58 -07:00
mbecker20
873d9ea433 builder instance failed reachability adds log that instance will be terminated 2024-05-29 00:16:55 -07:00
mbecker20
717f3afa89 fix build config when not builder 2024-05-28 14:33:52 -07:00
mbecker20
ec31d1af01 fix 2024-05-28 05:35:35 -07:00
mbecker20
9e5c52b9a4 update client version 2024-05-28 05:32:19 -07:00
mbecker20
762873d5be implement ui_write_disabled 2024-05-28 05:30:37 -07:00
mbecker20
67fa512975 core version in topbar 2024-05-28 05:06:39 -07:00
mbecker20
502dd3a4a8 update client version 2024-05-28 04:58:31 -07:00
mbecker20
8c22bdd473 1.5.4 add variable support to monitor cli 2024-05-28 04:57:41 -07:00
mbecker20
ba6801da11 cli much faster 2024-05-28 04:02:34 -07:00
mbecker20
309802093c 1.5.3 add ListFull methods 2024-05-28 03:42:35 -07:00
mbecker20
3d1e3009b3 add ListFull methods 2024-05-28 03:25:50 -07:00
mbecker20
fdc23c2650 improve docs 2024-05-28 03:06:42 -07:00
mbecker20
072ee6834e update dashboard screenshots 2024-05-28 01:44:11 -07:00
mbecker20
bedbf76349 red 2024-05-28 01:40:44 -07:00
mbecker20
e26d1211cc Cloud 2024-05-28 01:38:35 -07:00
mbecker20
0342ee4dd9 Hetzner 2024-05-28 01:38:08 -07:00
mbecker20
669d5c81b4 read. me. 2024-05-28 01:35:49 -07:00
mbecker20
defbab5955 monitor cli 2024-05-28 01:30:57 -07:00
mbecker20
9405295e4a update changelog 2024-05-28 01:23:44 -07:00
mbecker20
28c077ed4c remove hetzner automount 2024-05-26 02:30:56 -07:00
mbecker20
61406c1b00 add back wait for volume 2024-05-26 02:10:34 -07:00
mbecker20
64638730b9 waiting for volumes makes no difference. dont seem to automount 2024-05-26 01:34:14 -07:00
mbecker20
c0942c6d1d remove execute fail message 2024-05-26 00:42:33 -07:00
mbecker20
ff964cd0fe fix updates 2024-05-26 00:38:59 -07:00
mbecker20
d56f632a11 improve unknown server styling 2024-05-26 00:21:43 -07:00
mbecker20
a7f22b6cfb instrument ServerTemplate write api 2024-05-26 00:09:38 -07:00
mbecker20
6053fc1d99 hetzner poll volumes for ready before launch server 2024-05-26 00:04:31 -07:00
mbecker20
573ff1863c 1.5.2 2024-05-25 23:34:14 -07:00
mbecker20
dd4a9b0cb5 add defaults to Hetzner volume 2024-05-25 23:32:16 -07:00
mbecker20
d243cf2da7 all resources search case insensitive 2024-05-25 23:07:54 -07:00
mbecker20
4e06e788ae PushRecentlyViewed and SetLastSeenUpdate should be debug instrument 2024-05-25 21:13:25 -07:00
mbecker20
a0f71f8af5 table search not case sensitive 2024-05-25 21:07:53 -07:00
mbecker20
fcbb75d0c0 update some tracing stuff 2024-05-25 20:46:56 -07:00
mbecker20
0a8419bb13 update client to 1.5.1 2024-05-25 20:38:35 -07:00
mbecker20
40fe76cf27 1.5.1 move routes to /user 2024-05-25 20:36:52 -07:00
mbecker20
5594d3c1d9 add server to repo table / info 2024-05-25 19:45:23 -07:00
mbecker20
b12aeb259f clean up 2024-05-25 18:38:17 -07:00
mbecker20
b121b0ac07 fix remove from recently viewed 2024-05-25 18:34:05 -07:00
mbecker20
a9f1d91b1b update deps 2024-05-25 18:06:49 -07:00
mbecker20
abf48d0243 1.5.0 doc update and add other_data 2024-05-25 17:57:31 -07:00
mbecker20
447690d8bf remove TextUpdateMenu update on enter 2024-05-25 16:58:34 -07:00
mbecker20
a70c0a2697 increase hetzner polling time 2024-05-25 15:49:10 -07:00
mbecker20
0758e6ff81 get ip after instance is running 2024-05-25 15:29:11 -07:00
mbecker20
ea0e059ee1 hetzner response optional parsing 2024-05-25 14:26:26 -07:00
mbecker20
c9e0524794 add repos tab to server page 2024-05-25 14:05:02 -07:00
mbecker20
81ceaf1eae move automount 2024-05-25 13:50:59 -07:00
mbecker20
37c07ff748 only actually add automount if volumes nonempty 2024-05-25 13:43:06 -07:00
mbecker20
62e8943ebe improve client error message 2024-05-25 13:40:05 -07:00
mbecker20
99ccffbc38 configure hetzner template working 2024-05-25 03:09:31 -07:00
mbecker20
84dc29b77f update ts types 2024-05-25 01:37:05 -07:00
mbecker20
81bab4aa50 clean up some unused stuff 2024-05-25 01:35:28 -07:00
mbecker20
9fa2fd0f58 implement hetzner server launch 2024-05-25 01:16:54 -07:00
mbecker20
3745967690 ensure env overrides fully applied 2024-05-24 16:40:15 -07:00
mbecker20
e8cfc13342 implement transparent mode 2024-05-23 02:19:42 -07:00
mbecker20
ec47bb11ee start on hetzner 2024-05-23 01:47:02 -07:00
mbecker20
d008c95853 no destructive update toasts 2024-05-22 04:27:33 -07:00
mbecker20
4986d70506 remove unneded build toasts 2024-05-22 04:23:53 -07:00
mbecker20
1372a5fb39 3s 2024-05-22 04:22:06 -07:00
mbecker20
f54224650f fix update 2024-05-22 04:20:48 -07:00
mbecker20
2eee1459e7 fix ws 2024-05-22 04:02:37 -07:00
mbecker20
5a3fd891c4 huh 2024-05-22 04:00:43 -07:00
mbecker20
ba3f288c2d improve toasts 2024-05-22 03:55:26 -07:00
mbecker20
6d5fd7dc5d improve update table 2024-05-22 03:25:49 -07:00
mbecker20
df3fd7c4e9 update builder ami 2024-05-22 03:15:30 -07:00
mbecker20
395f032ee2 fix update details name 2024-05-22 03:15:23 -07:00
mbecker20
de2bd800c4 update client to 1.4.1 2024-05-22 02:01:40 -07:00
mbecker20
75352a91ff 1.4.1 fix cli - shouldn't send update if no change 2024-05-22 01:59:25 -07:00
mbecker20
9b12270d04 update client published version 2024-05-22 00:29:34 -07:00
mbecker20
7fc378798f fix cli toml patch 2024-05-22 00:28:21 -07:00
mbecker20
3db2c93303 expande resource table status column 2024-05-22 00:25:54 -07:00
mbecker20
150d6562bf improve table with better row sizing 2024-05-21 01:31:46 -07:00
mbecker20
c3b549b051 resource to_list_item should be infallible 2024-05-21 00:46:13 -07:00
mbecker20
931f2bd92d log auto bottom and increase height 2024-05-20 22:56:32 -07:00
mbecker20
6b6324d79c theme toggle indicator 2024-05-20 22:52:25 -07:00
mbecker20
2c65d924f9 dashboard recents 2 cols unless 2xl 2024-05-20 22:46:21 -07:00
mbecker20
dd1fecf190 a little smaller 2024-05-20 22:36:01 -07:00
mbecker20
aa96a37db4 decrease sidebar vertical size 2024-05-20 21:40:14 -07:00
mbecker20
ec9e9638f5 more gap between resources on dashboard 2024-05-20 03:55:58 -07:00
mbecker20
e33019cab8 add prune images to prune loop 2024-05-20 03:55:01 -07:00
mbecker20
951cb82e0c supports 2024-05-20 03:34:59 -07:00
mbecker20
0643f96053 log 2024-05-20 03:34:07 -07:00
mbecker20
56d835f2d2 not 2024-05-20 03:33:02 -07:00
mbecker20
d8fb8f8649 fix 2024-05-20 03:31:52 -07:00
mbecker20
7197d628e5 fit 2024-05-20 03:30:34 -07:00
mbecker20
96083178dd add changelog to readme 2024-05-20 03:29:45 -07:00
mbecker20
9d1b705ab1 improve login page 2024-05-20 01:36:14 -07:00
mbecker20
2582bc9ba3 simplify resource components, improve update details loading behavior 2024-05-19 15:27:02 -07:00
mbecker20
44f34b9b40 show core secrets on variables page 2024-05-19 02:56:32 -07:00
mbecker20
bbb18d8280 standard search bar location 2024-05-19 02:38:24 -07:00
mbecker20
da95b7d074 add lizard logo / favicon 2024-05-19 02:20:43 -07:00
mbecker20
6b25309aed variables working 2024-05-19 01:51:09 -07:00
mbecker20
f8e371af31 periphery needs axum feature 2024-05-18 16:00:20 -07:00
mbecker20
a0f5ae8c7f add core side interpolation to update 2024-05-18 15:58:23 -07:00
mbecker20
2f371af288 1.4.0 2024-05-18 15:37:30 -07:00
mbecker20
76840efddc implement core variables / secrets on the backend 2024-05-18 15:34:58 -07:00
mbecker20
8f01e441a4 increase per page limit of alerts and updates 2024-05-18 02:42:03 -07:00
mbecker20
41a6e0a65a improve dashboard 2024-05-18 02:34:47 -07:00
mbecker20
40027f7430 improve alert table 2024-05-18 01:07:23 -07:00
mbecker20
a2c69aba87 improve deployment build version selector 2024-05-18 00:49:57 -07:00
mbecker20
a5d3fbedc6 remove build versions pagination 2024-05-18 00:12:08 -07:00
mbecker20
b311b11785 label recents 2024-05-18 00:05:44 -07:00
mbecker20
7a0b29b387 move mongo / dockers deps to client features 2024-05-17 23:48:05 -07:00
mbecker20
d3a87fdb5f update snippet 2024-05-17 22:40:23 -07:00
mbecker20
9b7ab6d98a update resolver and remove async-trait 2024-05-17 22:38:06 -07:00
mbecker20
c302e28d86 improve omnibar search suggestion 2024-05-17 02:13:53 -07:00
mbecker20
33be989e3a dashboard2 2024-05-17 02:07:48 -07:00
mbecker20
c9d65300c9 work on dashboard 2 2024-05-17 01:35:34 -07:00
mbecker20
e96b676366 add build / repo / procedure state stuff to summary 2024-05-17 01:10:48 -07:00
mbecker20
0bff4a5e51 10 recents per resource type 2024-05-16 00:02:09 -07:00
mbecker20
9b12334922 update ts types and fix stuff 2024-05-15 19:17:54 -07:00
mbecker20
68659630fc split recently viewed by resource type 2024-05-15 19:15:03 -07:00
mbecker20
8b33647620 add prop 2024-05-15 02:32:21 -07:00
mbecker20
871aba62d5 move description to right 2024-05-15 02:21:06 -07:00
mbecker20
c649094a8a intro 2024-05-14 23:15:57 -07:00
mbecker20
e3c11db89e update deps 2024-05-14 02:10:10 -07:00
mbecker20
c43293109d add repo and homepage 2024-05-14 01:55:13 -07:00
mbecker20
d3e4f9f638 1.3.0 clean publish 2024-05-14 01:43:01 -07:00
mbecker20
eecf583b0e add dotenv for docsite convenience 2024-05-14 01:37:50 -07:00
mbecker20
a518806d8b docs 2024-05-14 01:33:38 -07:00
mbecker20
8a4611c380 fix link 2024-05-14 01:20:30 -07:00
mbecker20
d679fbe72f client readme 2024-05-14 01:17:55 -07:00
mbecker20
1cf02bc4b4 docs 2024-05-14 01:00:04 -07:00
mbecker20
bf703eef35 frontend support procedure state 2024-05-13 22:28:45 -07:00
mbecker20
985058afb0 ts types 2024-05-13 15:30:37 -07:00
mbecker20
eac1145958 add ProcedureState 2024-05-13 15:30:23 -07:00
mbecker20
1b408d92d9 theme 2024-05-13 01:32:24 -07:00
mbecker20
ee95c2c76b update screenshots 2024-05-13 01:31:49 -07:00
mbecker20
e83124ebff build ConfigOrDeployments 2024-05-13 01:11:12 -07:00
mbecker20
e912ae050a deploy working 2024-05-13 01:01:06 -07:00
mbecker20
99253d6182 build extra arg suggestions 2024-05-13 00:14:12 -07:00
mbecker20
ef91577ac5 check disabled 2024-05-12 23:48:53 -07:00
mbecker20
b97f9b30b3 aws config looking good 2024-05-12 23:44:06 -07:00
mbecker20
7cb11dbc5d improve builder config 2024-05-12 23:33:33 -07:00
mbecker20
6d815629fc everything looking good 2024-05-12 23:12:54 -07:00
mbecker20
f8021d8541 repo config good 2024-05-12 23:01:25 -07:00
mbecker20
1f444fdbc2 builds looking good 2024-05-12 22:58:03 -07:00
mbecker20
af76dd1be4 more config 2024-05-12 22:38:57 -07:00
mbecker20
5cb91c6f8d improve config 2024-05-12 21:35:47 -07:00
mbecker20
de5502aec7 dark less blue 2024-05-12 19:39:23 -07:00
mbecker20
ef4ae4c5f2 state aware repo actions 2024-05-12 16:03:56 -07:00
mbecker20
866eb6d81b looking good 2024-05-12 15:55:49 -07:00
mbecker20
58d6c16eea update sheet from top 2024-05-12 15:37:56 -07:00
mbecker20
ccbf13ae84 get username return avatar 2024-05-12 13:44:32 -07:00
mbecker20
21f6acd3d7 add other resources to omnisearch 2024-05-12 13:11:52 -07:00
mbecker20
dce59d1383 center search 2024-05-12 13:07:10 -07:00
mbecker20
2fb544c3b0 sidebar gap 2024-05-12 13:02:47 -07:00
mbecker20
1ba288be79 split sidebar home 2024-05-12 12:59:45 -07:00
mbecker20
1ff21d2986 more cli loggin 2024-05-12 12:21:38 -07:00
mbecker20
79cc2c1bb7 improve cli logging 2024-05-12 12:15:25 -07:00
mbecker20
17b2e6660c rename alert_logger -> alerter. cli colors 2024-05-12 11:59:49 -07:00
mbecker20
4ef095fe55 improve server stats tables 2024-05-12 04:23:38 -07:00
mbecker20
fb0a7352e3 filter out docker overlay disks 2024-05-12 03:49:29 -07:00
mbecker20
9a087e5975 clean up resource / tree view 2024-05-12 03:26:45 -07:00
mbecker20
814e47031d dope search 2024-05-12 02:55:13 -07:00
mbecker20
1304565e40 fix log to bottom 2024-05-12 01:54:31 -07:00
mbecker20
85616d0669 command loops 2024-05-12 01:46:22 -07:00
mbecker20
feff4647e7 add display for repo latest hash / message 2024-05-12 01:41:41 -07:00
mbecker20
549e15bfe2 add more image tags 2024-05-12 01:24:43 -07:00
mbecker20
a08baf8432 actually mount GetLatestCommit 2024-05-12 01:18:33 -07:00
mbecker20
99c47ce133 just mess with stuff 2024-05-12 01:09:58 -07:00
mbecker20
26a4691c0b server page - swap version / stats link 2024-05-11 23:55:14 -07:00
mbecker20
addb35aa69 give placeholders 2024-05-11 23:38:48 -07:00
mbecker20
16bf78f9ad enable commit config 2024-05-11 23:11:41 -07:00
mbecker20
3ed4f91d82 periphery cli args name to periphery 2024-05-11 23:06:04 -07:00
mbecker20
653fb894a2 delete file 2024-05-11 22:44:13 -07:00
mbecker20
0f9798a5f2 1.2.0 2024-05-11 22:34:35 -07:00
mbecker20
6776a20ec5 build repo state cache 2024-05-11 22:30:33 -07:00
mbecker20
fb21e8586f clone commit hash 2024-05-11 21:38:29 -07:00
mbecker20
8b2c4d604a periphery get repo status 2024-05-11 18:42:21 -07:00
mbecker20
c0b010d5ce rename DockerContainerState -> DeploymentState 2024-05-11 18:29:40 -07:00
mbecker20
97de34a088 align state / status distinction 2024-05-11 18:22:24 -07:00
mbecker20
6c0b76a270 add big icons 2024-05-11 18:04:30 -07:00
mbecker20
eebd44ab9b common resource filter 2024-05-11 17:42:38 -07:00
mbecker20
783250c5ce sort build / repo status update query by most recent 2024-05-11 16:12:11 -07:00
mbecker20
70ff93050f choose between config / log for deployment 2024-05-11 16:06:36 -07:00
mbecker20
1cc1813185 fix dev fe build 2024-05-11 14:50:05 -07:00
mbecker20
b4f9b87d06 update resources, cli 2024-05-11 14:48:26 -07:00
mbecker20
26b09a767e unlink 2024-05-11 13:44:51 -07:00
mbecker20
bba6c4d8b6 add status to build / repo 2024-05-10 22:59:20 -07:00
mbecker20
ea440235c4 configure enable / disable action on webhook recieve 2024-05-10 21:54:41 -07:00
mbecker20
f9949bf988 readOnly webhhok copy 2024-05-10 20:40:54 -07:00
mbecker20
b978db012e slight style 2024-05-10 20:37:56 -07:00
mbecker20
bc2fbdd657 remove rws and code faster reconnect 2024-05-10 18:04:35 -07:00
mbecker20
a5571bcf4d 2xl show 8 recents 2024-05-10 17:30:20 -07:00
mbecker20
683a528dd9 mx-8 2024-05-10 17:23:28 -07:00
mbecker20
4a283b6052 remove container on topbar 2024-05-10 17:21:11 -07:00
mbecker20
37224ee1ad dont req for username of built in users 2024-05-10 17:14:55 -07:00
mbecker20
5e7445b10d add a god damn sidebar 2024-05-10 17:07:51 -07:00
mbecker20
1829a7da34 update diff looking good 2024-05-10 03:59:51 -07:00
mbecker20
4a1a653bd9 rust 1.78.0 2024-05-10 03:40:30 -07:00
mbecker20
840c1a87d0 try putting the html in th diff directly 2024-05-10 03:19:18 -07:00
mbecker20
c90368e2af add periphery build repo 2024-05-10 02:29:47 -07:00
mbecker20
1f9d74fadb fix default creds path 2024-05-10 02:28:06 -07:00
mbecker20
5b261058fe add updates to dashboard 2024-05-10 02:04:23 -07:00
mbecker20
cf6632ba02 partial_derive2 0.4.2 better diffs 2024-05-10 01:50:08 -07:00
mbecker20
c7124bd63c colored diff 2024-05-10 01:03:23 -07:00
mbecker20
ba19e45607 imporve config styling 2024-05-10 00:50:29 -07:00
mbecker20
20282ffcbb update frontend deps 2024-05-10 00:10:40 -07:00
mbecker20
cb8ad90838 cli readme and default creds path 2024-05-09 22:29:40 -07:00
mbecker20
caac3fdcc4 v{version} 2024-05-09 15:51:54 -07:00
mbecker20
44da282060 improve server version 2024-05-09 15:47:58 -07:00
mbecker20
a2e27b09fc show version 2024-05-09 15:39:30 -07:00
mbecker20
c1b1f397fd comments 2024-05-09 15:36:15 -07:00
mbecker20
1d0f239594 delete binary before recurl 2024-05-09 15:34:03 -07:00
mbecker20
549bc78799 log version 2024-05-09 15:28:32 -07:00
mbecker20
9eb9b57e36 install load latest version automatically if its not passed 2024-05-09 15:25:57 -07:00
mbecker20
c38849961e add Repos to search 2024-05-09 14:08:01 -07:00
mbecker20
3acfa0c4b1 more client docs 2024-05-09 03:25:37 -07:00
mbecker20
62b083c3be docsite 2024-05-09 03:12:53 -07:00
mbecker20
ee6fc4c590 include description / tags in diff 2024-05-09 01:53:29 -07:00
mbecker20
4fa550b3d3 monrun diffing 2024-05-09 01:33:03 -07:00
mbecker20
1c44ae98fb monrun / api better diffs 2024-05-09 00:34:21 -07:00
mbecker20
148223f995 config diff update log 2024-05-08 00:34:41 -07:00
mbecker20
4678f83542 fix clippy lints 2024-05-08 00:21:37 -07:00
mbecker20
340bac078f unneeded in cargo toml 2024-05-07 23:31:31 -07:00
mbecker20
3f8e75bbd8 update to partial derive 0.4.0 2024-05-07 23:29:57 -07:00
mbecker20
c4278d14a9 only show actions if more than 0 Actions on resource 2024-05-07 04:08:12 -07:00
mbecker20
4909106c3c turn off build icon spinning. still turns green 2024-05-07 03:45:00 -07:00
mbecker20
0f04a2848a clean up action states 2024-05-07 03:43:20 -07:00
mbecker20
ba073bf8b2 minimize update diffs 2024-05-07 03:40:30 -07:00
mbecker20
640809aa6b use trait for resource crud 2024-05-07 02:54:01 -07:00
mbecker20
943bb4c61a handle update config / description / tags seperately 2024-05-06 03:10:57 -07:00
mbecker20
ef43cb7920 clean up deployments toml 2024-05-06 02:39:04 -07:00
mbecker20
1abe634679 export individual resources 2024-05-06 02:24:35 -07:00
mbecker20
18ab18b6f6 fix for server template sync 2024-05-06 01:55:14 -07:00
mbecker20
aad971a599 sync working 2024-05-06 01:44:20 -07:00
mbecker20
3d9da97d7b monrun better check for empty 2024-05-06 01:36:22 -07:00
mbecker20
2d0a09f760 monrun 2024-05-06 01:12:14 -07:00
mbecker20
568c317d6d create repo does not clone 2024-05-06 00:47:27 -07:00
mbecker20
f0e1f253f4 add queue to listeners using locks 2024-05-06 00:33:19 -07:00
mbecker20
7634b9fa1d example 2024-05-06 00:21:03 -07:00
mbecker20
2e1795eba6 webhook 2024-05-06 00:16:22 -07:00
mbecker20
0f6b3d6e9b add github webhook copiers 2024-05-06 00:00:47 -07:00
mbecker20
a70afcc461 fix build client 2024-05-05 23:20:45 -07:00
mbecker20
d78fc2b282 improve onpull 2024-05-05 23:17:27 -07:00
mbecker20
249610afce frontend onpull 2024-05-05 22:55:37 -07:00
mbecker20
b93b639b40 add extra args style 2024-05-05 22:40:27 -07:00
mbecker20
13ceb55fe8 fix primary dropdown templates 2024-05-05 20:36:56 -07:00
mbecker20
7ebd38d350 slight style 2024-05-05 20:32:19 -07:00
mbecker20
c12c74005e finish configure server templates 2024-05-05 19:08:47 -07:00
mbecker20
c44dc1c6f6 clean server template schema 2024-05-05 18:39:10 -07:00
mbecker20
cd2b9ec4ed validate name not taken on LaunchServer 2024-05-05 16:35:55 -07:00
mbecker20
dc62442c00 LaunchServer revise 2024-05-05 14:16:19 -07:00
mbecker20
81a90f7ae4 launch server 2024-05-05 14:08:49 -07:00
mbecker20
af092e0d88 include server template in monrun export / import 2024-05-05 13:18:50 -07:00
mbecker20
0e70015fd1 add server template page 2024-05-05 13:02:25 -07:00
mbecker20
a214deef86 fix GetServerTemplatesSummary 2024-05-05 13:00:46 -07:00
mbecker20
0efb2966b0 set build version to 0.0.0 on Copy 2024-05-04 23:54:09 -07:00
mbecker20
1e8422a506 custom placeholder for AccountSelector 2024-05-04 23:10:46 -07:00
mbecker20
7bb386e6d0 Account Selector can set back to None 2024-05-04 18:57:42 -07:00
mbecker20
c464ca5612 resources clear their config after update completes 2024-05-04 18:11:23 -07:00
mbecker20
742630fdee create new resources navs to created resource 2024-05-04 17:54:21 -07:00
mbecker20
154fd899fe fix all the links 2024-05-04 17:49:06 -07:00
mbecker20
aa25f9d4c9 servers init disabled 2024-05-04 17:33:05 -07:00
mbecker20
eeed94c8fd === PERIPHERY INSTALLER === 2024-05-04 16:45:39 -07:00
mbecker20
b2f771199d default repos path 2024-05-04 16:39:20 -07:00
mbecker20
4bceb97a66 comment out fields that are just being set to the defautl 2024-05-04 16:25:55 -07:00
mbecker20
8734e6fc4c clean up periphery example config 2024-05-04 16:22:01 -07:00
589 changed files with 64532 additions and 22590 deletions

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[build]
rustflags = ["-Wunused-crate-dependencies"]

4
.gitignore vendored
View File

@@ -6,4 +6,6 @@ dist
.env
.env.development
creds.toml
core.config.toml
core.config.toml
.syncs
.stacks

View File

@@ -3,7 +3,6 @@
"scope": "rust",
"prefix": "resolve",
"body": [
"#[async_trait]",
"impl Resolve<${1}, User> for State {",
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
"\t\ttodo!()",

93
.vscode/tasks.json vendored
View File

@@ -1,93 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"type": "cargo",
"command": "build",
"group": {
"kind": "build",
"isDefault": true
},
"label": "rust: cargo build"
},
{
"type": "cargo",
"command": "fmt",
"label": "rust: cargo fmt"
},
{
"type": "cargo",
"command": "check",
"label": "rust: cargo check"
},
{
"label": "start dev",
"dependsOn": [
"run core",
"start frontend"
],
"problemMatcher": []
},
{
"type": "shell",
"command": "yarn start",
"label": "start frontend",
"options": {
"cwd": "${workspaceFolder}/frontend"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run core",
"options": {
"cwd": "${workspaceFolder}/bin/core"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run periphery",
"options": {
"cwd": "${workspaceFolder}/bin/periphery"
}
},
{
"type": "cargo",
"command": "run",
"label": "run tests",
"options": {
"cwd": "${workspaceFolder}/bin/tests"
}
},
{
"type": "cargo",
"command": "publish",
"args": ["--allow-dirty"],
"label": "publish types",
"options": {
"cwd": "${workspaceFolder}/lib/types"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish rs client",
"options": {
"cwd": "${workspaceFolder}/lib/rs_client"
}
},
{
"type": "shell",
"command": "node ./client/ts/generate_types.mjs",
"label": "generate typescript types",
"problemMatcher": []
}
]
}

3050
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,92 +3,107 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.0.0"
version = "1.14.1"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/mbecker20/komodo"
homepage = "https://komo.do"
[patch.crates-io]
# komodo_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_macros = { path = "lib/macros" }
monitor_client = { path = "client/core/rs" }
# komodo_client = "1.14.1"
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
logger = { path = "lib/logger" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.3.4", features = ["axum"] }
serror = { version = "0.4.6", default-features = false }
slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
termination_signal = "0.1.3"
async_timing_util = "0.1.14"
partial_derive2 = "0.3.2"
derive_variants = "0.1.3"
mongo_indexed = "0.2.2"
resolver_api = "0.1.9"
parse_csl = "0.1.0"
mungos = "0.5.4"
svi = "0.1.4"
async_timing_util = "1.0.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "2.0.1"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
mungos = "1.1.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.37.0", features = ["full"] }
reqwest = { version = "0.12.3", features = ["json"] }
tokio-util = "0.7.10"
tokio = { version = "1.40.0", features = ["full"] }
reqwest = { version = "0.12.7", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
async-trait = "0.1.80"
async-recursion = "1.1.0"
# SERVER
axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.21.0"
tokio-tungstenite = "0.23.1"
# SER/DE
serde = { version = "1.0.198", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.116"
toml = "0.8.12"
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.209", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.127"
serde_yaml = "0.9.34"
toml = "0.8.19"
# ERROR
anyhow = "1.0.82"
thiserror = "1.0.58"
anyhow = "1.0.86"
thiserror = "1.0.63"
# LOGGING
opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"] }
opentelemetry_sdk = { version = "0.24.1", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.18", features = ["json"] }
tracing-opentelemetry = "0.23.0"
opentelemetry-otlp = "0.15.0"
opentelemetry = "0.22.0"
opentelemetry-semantic-conventions = "0.16.0"
tracing-opentelemetry = "0.25.0"
opentelemetry-otlp = "0.17.0"
opentelemetry = "0.24.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.4", features = ["derive"] }
dotenv = "0.15.0"
clap = { version = "4.5.16", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.10.0", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
rand = "0.8.5"
jwt = "0.16.0"
nom_pem = "4.0.0"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
sha2 = "0.10.8"
bcrypt = "0.15.1"
rand = "0.8.5"
jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
bollard = "0.16.1"
sysinfo = "0.30.11"
bollard = "0.17.1"
sysinfo = "0.31.4"
# CLOUD
aws-config = "1.2.0"
aws-sdk-ec2 = "1.34.0"
aws-config = "1.5.5"
aws-sdk-ec2 = "1.70.0"
aws-sdk-ecr = "1.42.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.2"
derive_builder = "0.20.1"
typeshare = "1.0.3"
octorust = "0.7.0"
colored = "2.1.0"
regex = "1.10.6"
bson = "2.11.0"

View File

@@ -1,23 +1,23 @@
[package]
name = "alert_logger"
name = "alerter"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
monitor_client.workspace = true
komodo_client.workspace = true
logger.workspace = true
# mogh
termination_signal.workspace = true
# external
tokio.workspace = true
tracing.workspace = true
axum.workspace = true
anyhow.workspace = true
serde.workspace = true
dotenv.workspace = true
dotenvy.workspace = true
envy.workspace = true

View File

@@ -1,11 +1,11 @@
FROM rust:1.71.1 as builder
FROM rust:1.80.1 as builder
WORKDIR /builder
COPY . .
RUN cargo build -p alert_logger --release
FROM gcr.io/distroless/cc
FROM gcr.io/distroless/debian-cc
COPY --from=builder /builder/target/release/alert_logger /

4
bin/alerter/README.md Normal file
View File

@@ -0,0 +1,4 @@
# Alerter
This crate sets up a basic axum server that listens for incoming alert POSTs.
It can be used as a Komodo alerting endpoint, and serves as a template for other custom alerter implementations.

View File

@@ -5,11 +5,8 @@ use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::{routing::post, Json, Router};
use monitor_client::entities::{
alert::Alert, server::stats::SeverityLevel,
};
use komodo_client::entities::alert::{Alert, SeverityLevel};
use serde::Deserialize;
use termination_signal::tokio::immediate_term_handle;
#[derive(Deserialize)]
struct Env {
@@ -22,7 +19,7 @@ fn default_port() -> u16 {
}
async fn app() -> anyhow::Result<()> {
dotenv::dotenv().ok();
dotenvy::dotenv().ok();
logger::init(&Default::default())?;
let Env { port } =
@@ -57,13 +54,15 @@ async fn app() -> anyhow::Result<()> {
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let term_signal = immediate_term_handle()?;
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
_ = term_signal => {},
res = app => return res?,
_ = term_signal.recv() => {},
}
Ok(())

View File

@@ -1,24 +1,29 @@
[package]
name = "monrun"
name = "komodo_cli"
description = "Command line tool to execute Komodo actions"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "komodo"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local client
monitor_client.workspace = true
# mogh
partial_derive2.workspace = true
# local
komodo_client.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true
futures.workspace = true
tracing.workspace = true
colored.workspace = true
anyhow.workspace = true
tokio.workspace = true
serde.workspace = true
strum.workspace = true
toml.workspace = true
clap.workspace = true
futures.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true

118
bin/cli/README.md Normal file
View File

@@ -0,0 +1,118 @@
# Komodo CLI
Komodo CLI is a tool to execute actions on your Komodo instance from shell scripts.
## Install
```sh
cargo install komodo_cli
```
Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-dev`.
## Usage
### Credentials
Configure a file `~/.config/komodo/creds.toml` file with contents:
```toml
url = "https://your.komodo.address"
key = "YOUR-API-KEY"
secret = "YOUR-API-SECRET"
```
Note. You can specify a different creds file by using `--creds ./other/path.toml`.
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
```sh
komodo --url "https://your.komodo.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
```
### Run Executions
```sh
# Triggers an example build
komodo execute run-build test_build
```
#### Manual
`komodo --help`
```md
Command line tool to execute Komodo actions
Usage: komodo [OPTIONS] <COMMAND>
Commands:
execute Runs an execution
help Print this message or the help of the given subcommand(s)
Options:
--creds <CREDS> The path to a creds file [default: /Users/max/.config/komodo/creds.toml]
--url <URL> Pass url in args instead of creds file
--key <KEY> Pass api key in args instead of creds file
--secret <SECRET> Pass api secret in args instead of creds file
-y, --yes Always continue on user confirmation prompts
-h, --help Print help (see more with '--help')
-V, --version Print version
```
`komodo execute --help`
```md
Runs an execution
Usage: komodo execute <COMMAND>
Commands:
none The "null" execution. Does nothing
run-procedure Runs the target procedure. Response: [Update]
run-build Runs the target build. Response: [Update]
cancel-build Cancels the target build. Only does anything if the build is `building` when called. Response: [Update]
deploy Deploys the container for the target deployment. Response: [Update]
start-deployment Starts the container for the target deployment. Response: [Update]
restart-deployment Restarts the container for the target deployment. Response: [Update]
pause-deployment Pauses the container for the target deployment. Response: [Update]
unpause-deployment Unpauses the container for the target deployment. Response: [Update]
stop-deployment Stops the container for the target deployment. Response: [Update]
destroy-deployment Stops and destroys the container for the target deployment. Reponse: [Update]
clone-repo Clones the target repo. Response: [Update]
pull-repo Pulls the target repo. Response: [Update]
build-repo Builds the target repo, using the attached builder. Response: [Update]
cancel-repo-build Cancels the target repo build. Only does anything if the repo build is `building` when called. Response: [Update]
start-container Starts the container on the target server. Response: [Update]
restart-container Restarts the container on the target server. Response: [Update]
pause-container Pauses the container on the target server. Response: [Update]
unpause-container Unpauses the container on the target server. Response: [Update]
stop-container Stops the container on the target server. Response: [Update]
destroy-container Stops and destroys the container on the target server. Reponse: [Update]
start-all-containers Starts all containers on the target server. Response: [Update]
restart-all-containers Restarts all containers on the target server. Response: [Update]
pause-all-containers Pauses all containers on the target server. Response: [Update]
unpause-all-containers Unpauses all containers on the target server. Response: [Update]
stop-all-containers Stops all containers on the target server. Response: [Update]
prune-containers Prunes the docker containers on the target server. Response: [Update]
delete-network Delete a docker network. Response: [Update]
prune-networks Prunes the docker networks on the target server. Response: [Update]
delete-image Delete a docker image. Response: [Update]
prune-images Prunes the docker images on the target server. Response: [Update]
delete-volume Delete a docker volume. Response: [Update]
prune-volumes Prunes the docker volumes on the target server. Response: [Update]
prune-system Prunes the docker system on the target server, including volumes. Response: [Update]
run-sync Runs the target resource sync. Response: [Update]
deploy-stack Deploys the target stack. `docker compose up`. Response: [Update]
start-stack Starts the target stack. `docker compose start`. Response: [Update]
restart-stack Restarts the target stack. `docker compose restart`. Response: [Update]
pause-stack Pauses the target stack. `docker compose pause`. Response: [Update]
unpause-stack Unpauses the target stack. `docker compose unpause`. Response: [Update]
stop-stack Starts the target stack. `docker compose stop`. Response: [Update]
destroy-stack Destoys the target stack. `docker compose down`. Response: [Update]
sleep
help Print this message or the help of the given subcommand(s)
Options:
-h, --help Print help
```
### --yes
You can use `--yes` to avoid any human prompt to continue, for use in automated environments.

55
bin/cli/src/args.rs Normal file
View File

@@ -0,0 +1,55 @@
use clap::{Parser, Subcommand};
use komodo_client::api::execute::Execution;
use serde::Deserialize;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct CliArgs {
/// Sync or Exec
#[command(subcommand)]
pub command: Command,
/// The path to a creds file.
///
/// Note: If each of `url`, `key` and `secret` are passed,
/// no file is required at this path.
#[arg(long, default_value_t = default_creds())]
pub creds: String,
/// Pass url in args instead of creds file
#[arg(long)]
pub url: Option<String>,
/// Pass api key in args instead of creds file
#[arg(long)]
pub key: Option<String>,
/// Pass api secret in args instead of creds file
#[arg(long)]
pub secret: Option<String>,
/// Always continue on user confirmation prompts.
#[arg(long, short, default_value_t = false)]
pub yes: bool,
}
fn default_creds() -> String {
let home =
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
format!("{home}/.config/komodo/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Runs an execution
Execute {
#[command(subcommand)]
execution: Execution,
},
// Room for more
}
#[derive(Debug, Deserialize)]
pub struct CredsFile {
pub url: String,
pub key: String,
pub secret: String,
}

298
bin/cli/src/exec.rs Normal file
View File

@@ -0,0 +1,298 @@
use std::time::Duration;
use colored::Colorize;
use komodo_client::api::execute::Execution;
use crate::{
helpers::wait_for_enter,
state::{cli_args, komodo_client},
};
pub async fn run(execution: Execution) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
tokio::time::sleep(Duration::from_secs(3)).await;
println!("Finished doing nothing. Exiting...");
std::process::exit(0);
}
println!("\n{}: Execution", "Mode".dimmed());
match &execution {
Execution::None(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeleteNetwork(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneNetworks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeleteImage(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneImages(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeleteVolume(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneVolumes(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneSystem(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
}
if !cli_args().yes {
wait_for_enter("run execution")?;
}
info!("Running Execution...");
let res = match execution {
Execution::RunProcedure(request) => {
komodo_client().execute(request).await
}
Execution::RunBuild(request) => {
komodo_client().execute(request).await
}
Execution::CancelBuild(request) => {
komodo_client().execute(request).await
}
Execution::Deploy(request) => {
komodo_client().execute(request).await
}
Execution::StartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::RestartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::PauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::StopDeployment(request) => {
komodo_client().execute(request).await
}
Execution::DestroyDeployment(request) => {
komodo_client().execute(request).await
}
Execution::CloneRepo(request) => {
komodo_client().execute(request).await
}
Execution::PullRepo(request) => {
komodo_client().execute(request).await
}
Execution::BuildRepo(request) => {
komodo_client().execute(request).await
}
Execution::CancelRepoBuild(request) => {
komodo_client().execute(request).await
}
Execution::StartContainer(request) => {
komodo_client().execute(request).await
}
Execution::RestartContainer(request) => {
komodo_client().execute(request).await
}
Execution::PauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::StopContainer(request) => {
komodo_client().execute(request).await
}
Execution::DestroyContainer(request) => {
komodo_client().execute(request).await
}
Execution::StartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::RestartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::StopAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PruneContainers(request) => {
komodo_client().execute(request).await
}
Execution::DeleteNetwork(request) => {
komodo_client().execute(request).await
}
Execution::PruneNetworks(request) => {
komodo_client().execute(request).await
}
Execution::DeleteImage(request) => {
komodo_client().execute(request).await
}
Execution::PruneImages(request) => {
komodo_client().execute(request).await
}
Execution::DeleteVolume(request) => {
komodo_client().execute(request).await
}
Execution::PruneVolumes(request) => {
komodo_client().execute(request).await
}
Execution::PruneSystem(request) => {
komodo_client().execute(request).await
}
Execution::RunSync(request) => {
komodo_client().execute(request).await
}
Execution::DeployStack(request) => {
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
komodo_client().execute(request).await
}
Execution::RestartStack(request) => {
komodo_client().execute(request).await
}
Execution::PauseStack(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseStack(request) => {
komodo_client().execute(request).await
}
Execution::StopStack(request) => {
komodo_client().execute(request).await
}
Execution::DestroyStack(request) => {
komodo_client().execute(request).await
}
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);
tokio::time::sleep(duration).await;
println!("Finished sleeping!");
std::process::exit(0)
}
Execution::None(_) => unreachable!(),
};
match res {
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}
Ok(())
}

17
bin/cli/src/helpers.rs Normal file
View File

@@ -0,0 +1,17 @@
use std::io::Read;
use anyhow::Context;
use colored::Colorize;
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}

32
bin/cli/src/main.rs Normal file
View File

@@ -0,0 +1,32 @@
#[macro_use]
extern crate tracing;
use colored::Colorize;
use komodo_client::api::read::GetVersion;
mod args;
mod exec;
mod helpers;
mod state;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt().with_target(false).init();
info!(
"Komodo CLI version: {}",
env!("CARGO_PKG_VERSION").blue().bold()
);
let version =
state::komodo_client().read(GetVersion {}).await?.version;
info!("Komodo Core version: {}", version.blue().bold());
match &state::cli_args().command {
args::Command::Execute { execution } => {
exec::run(execution.to_owned()).await?
}
}
Ok(())
}

46
bin/cli/src/state.rs Normal file
View File

@@ -0,0 +1,46 @@
use std::sync::OnceLock;
use clap::Parser;
use komodo_client::KomodoClient;
use merge_config_files::parse_config_file;
pub fn cli_args() -> &'static crate::args::CliArgs {
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
}
pub fn komodo_client() -> &'static KomodoClient {
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
KOMODO_CLIENT.get_or_init(|| {
let args = cli_args();
let crate::args::CredsFile { url, key, secret } =
match (&args.url, &args.key, &args.secret) {
(Some(url), Some(key), Some(secret)) => {
crate::args::CredsFile {
url: url.clone(),
key: key.clone(),
secret: secret.clone(),
}
}
(url, key, secret) => {
let mut creds: crate::args::CredsFile =
parse_config_file(cli_args().creds.as_str())
.expect("failed to parse Komodo credentials");
if let Some(url) = url {
creds.url.clone_from(url);
}
if let Some(key) = key {
creds.key.clone_from(key);
}
if let Some(secret) = secret {
creds.secret.clone_from(secret);
}
creds
}
};
futures::executor::block_on(KomodoClient::new(url, key, secret))
.expect("failed to initialize Komodo client")
})
}

View File

@@ -1,9 +1,11 @@
[package]
name = "monitor_core"
name = "komodo_core"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "core"
@@ -13,45 +15,54 @@ path = "src/main.rs"
[dependencies]
# local
monitor_client.workspace = true
komodo_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
formatting.workspace = true
logger.workspace = true
git.workspace = true
# mogh
async_timing_util.workspace = true
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
parse_csl.workspace = true
termination_signal.workspace = true
resolver_api.workspace = true
mungos.workspace = true
async_timing_util.workspace = true
partial_derive2.workspace = true
derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
mungos.workspace = true
slack.workspace = true
serror.workspace = true
svi.workspace = true
# external
tokio.workspace = true
ordered_hash_map.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-sdk-ecr.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum.workspace = true
axum-extra.workspace = true
tower.workspace = true
tower-http.workspace = true
serde.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
anyhow.workspace = true
dotenvy.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true
serde.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true
anyhow.workspace = true
tracing.workspace = true
dotenv.workspace = true
envy.workspace = true
reqwest.workspace = true
urlencoding.workspace = true
rand.workspace = true
jwt.workspace = true
hmac.workspace = true
sha2.workspace = true
bcrypt.workspace = true
jwt.workspace = true
hex.workspace = true
async-trait.workspace = true
async-recursion.workspace = true
futures.workspace = true
aws-config.workspace = true
aws-sdk-ec2.workspace = true
typeshare.workspace = true

View File

@@ -1,23 +1,39 @@
# Build Core
FROM rust:1.77.2-bullseye as core-builder
FROM rust:1.80.1-bookworm AS core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
RUN cargo build -p komodo_core --release
# Build Frontend
FROM node:20.12-alpine as frontend-builder
FROM node:20.12-alpine AS frontend-builder
WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
# Final Image
# FROM gcr.io/distroless/cc
FROM debian:bullseye-slim
RUN apt update && apt install -y ca-certificates
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl -SL https://github.com/docker/compose/releases/download/v2.29.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose && \
chmod +x /usr/local/bin/docker-compose && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install
# Copy
COPY ./config_example/core.config.example.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /
COPY --from=frontend-builder /builder/frontend/dist /frontend
# Hint at the port
EXPOSE 9000
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./core"]

View File

@@ -1,13 +1,13 @@
use std::{sync::OnceLock, time::Instant};
use anyhow::anyhow;
use async_trait::async_trait;
use axum::{http::HeaderMap, routing::post, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::auth::*, entities::user::User};
use komodo_client::{api::auth::*, entities::user::User};
use reqwest::StatusCode;
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use serror::{AddStatusCode, Json};
use typeshare::typeshare;
use uuid::Uuid;
@@ -71,7 +71,10 @@ async fn handler(
}
let elapsed = timer.elapsed();
debug!("/auth request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
Ok((
TypedHeader(ContentType::json()),
res.status_code(StatusCode::UNAUTHORIZED)?,
))
}
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
@@ -92,7 +95,6 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
})
}
#[async_trait]
impl Resolve<GetLoginOptions, HeaderMap> for State {
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
async fn resolve(
@@ -104,7 +106,6 @@ impl Resolve<GetLoginOptions, HeaderMap> for State {
}
}
#[async_trait]
impl Resolve<ExchangeForJwt, HeaderMap> for State {
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
async fn resolve(
@@ -118,7 +119,6 @@ impl Resolve<ExchangeForJwt, HeaderMap> for State {
}
}
#[async_trait]
impl Resolve<GetUser, HeaderMap> for State {
#[instrument(name = "GetUser", level = "debug", skip(self))]
async fn resolve(

View File

@@ -1,71 +1,78 @@
use std::{str::FromStr, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
use komodo_client::{
api::execute::{CancelBuild, Deploy, RunBuild},
entities::{
alert::{Alert, AlertData, SeverityLevel},
all_logs_success,
build::Build,
builder::{AwsBuilderConfig, Builder, BuilderConfig},
deployment::DockerContainerState,
monitor_timestamp,
build::{Build, ImageRegistry, StandardRegistryConfig},
builder::{Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
komodo_timestamp,
permission::PermissionLevel,
server::Server,
server_template::AwsServerTemplateConfig,
to_komodo_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
Operation,
},
};
use mungos::{
by_id::update_one_by_id,
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, to_bson},
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
mongodb::{
bson::{doc, to_bson, to_document},
options::FindOneOptions,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::{serialize_error, serialize_error_pretty};
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
BuildCleanupData,
},
cloud::aws::ecr,
config::core_config,
helpers::{
alert::send_alerts,
builder::{cleanup_builder_instance, get_builder_periphery},
channel::build_cancel_channel,
periphery_client,
query::get_deployment_state,
resource::StateResource,
update::{add_update, make_update, update_update},
git_token,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
interpolate_variables_secrets_into_system_command,
},
query::{get_deployment_state, get_variables_and_secrets},
registry_token,
update::{init_execution_update, update_update},
},
resource::{self, refresh_build_state_cache},
state::{action_states, db_client, State},
};
#[async_trait]
impl Resolve<RunBuild, User> for State {
#[instrument(name = "RunBuild", skip(self, user))]
use super::ExecuteRequest;
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunBuild { build }: RunBuild,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut build = Build::get_resource_check_permissions(
let mut build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
let vars_and_secrets = get_variables_and_secrets().await?;
if build.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to RunBuild"));
}
// get the action state for the build (or insert default).
let action_state =
@@ -76,11 +83,24 @@ impl Resolve<RunBuild, User> for State {
let _action_guard =
action_state.update(|state| state.building = true)?;
build.config.version.increment();
if build.config.auto_increment_version {
build.config.version.increment();
}
update.version = build.config.version;
update_update(update.clone()).await?;
let mut update = make_update(&build, Operation::RunBuild, &user);
update.in_progress();
update.version = build.config.version.clone();
let git_token = git_token(
&build.config.git_provider,
&build.config.git_account,
|https| build.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
)?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
@@ -88,6 +108,12 @@ impl Resolve<RunBuild, User> for State {
build_cancel_channel().receiver.resubscribe();
let build_id = build.id.clone();
let builder =
resource::get::<Builder>(&build.config.builder_id).await?;
let is_server_builder =
matches!(&builder.config, BuilderConfig::Server(_));
tokio::spawn(async move {
let poll = async {
loop {
@@ -96,17 +122,19 @@ impl Resolve<RunBuild, User> for State {
id = cancel_recv.recv() => id?
};
if incoming_build_id == build_id {
info!("build cancel acknowledged");
update.push_simple_log(
"cancel acknowledged",
"the build cancellation has been queud, it may still take some time",
);
if is_server_builder {
update.push_error_log("Cancel acknowledged", "Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
} else {
update.push_simple_log("Cancel acknowledged", "The build cancellation has been queued, it may still take some time.");
}
update.finalize();
let id = update.id.clone();
if let Err(e) = update_update(update).await {
warn!("failed to update Update {id} | {e:#}");
warn!("failed to modify Update {id} on db | {e:#}");
}
if !is_server_builder {
cancel_clone.cancel();
}
cancel_clone.cancel();
return Ok(());
}
}
@@ -114,346 +142,402 @@ impl Resolve<RunBuild, User> for State {
anyhow::Ok(())
};
tokio::select! {
_ = cancel_clone.cancelled() => {}
_ = poll => {}
_ = cancel_clone.cancelled() => {}
_ = poll => {}
}
});
update.id = add_update(update.clone()).await?;
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) =
match get_build_builder(&build, &mut update).await {
Ok(builder) => {
info!("got builder for build");
builder
}
Err(e) => {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
serialize_error_pretty(&e),
));
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
let core_config = core_config();
let (periphery, cleanup_data) = match get_builder_periphery(
build.name.clone(),
Some(build.config.version),
builder,
&mut update,
)
.await
{
Ok(builder) => builder,
Err(e) => {
warn!(
"failed to get builder for build {} | {e:#}",
build.name
);
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(
update, build.id, build.name, false,
)
.await;
}
};
// CLONE REPO
let github_token = core_config
.github_accounts
.get(&build.config.github_account)
.cloned();
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into pre build command
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut build.config.pre_build,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
github_token,
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
replacers: secret_replacers.into_iter().collect(),
}) => res,
_ = cancel.cancelled() => {
info!("build cancelled during clone, cleaning up builder");
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
update.finalize();
update_update(update.clone()).await?;
return Ok(update)
return handle_early_return(update, build.id, build.name, true).await
},
};
match res {
Ok(clone_logs) => {
info!("finished repo clone");
update.logs.extend(clone_logs);
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
res.commit_message.unwrap_or_default()
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update.push_error_log("clone repo", serialize_error(&e));
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
}
};
update_update(update.clone()).await?;
if all_logs_success(&update.logs) {
let docker_token = core_config
.docker_accounts
.get(&build.config.docker_account)
.cloned();
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut build.config.build_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut build.config.secret_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut build.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let res = tokio::select! {
res = periphery
.request(api::build::Build {
build: build.clone(),
docker_token,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
// Push a commit hash tagged image
additional_tags: if update.commit_hash.is_empty() {
Default::default()
} else {
vec![update.commit_hash.clone()]
},
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
update.finalize();
update_update(update.clone()).await?;
return Ok(update)
return handle_early_return(update, build.id, build.name, true).await
},
};
match res {
Ok(logs) => {
info!("finished build");
debug!("finished build");
update.logs.extend(logs);
}
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log("build", serialize_error(&e))
update.push_error_log(
"build",
format_serror(&e.context("failed to build").into()),
)
}
};
}
update.finalize();
let db = db_client().await;
if update.success {
let _ = db_client()
.await
let _ = db
.builds
.update_one(
doc! { "_id": ObjectId::from_str(&build.id)? },
doc! {
"$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": monitor_timestamp(),
}
},
None,
doc! { "name": &build.name },
doc! { "$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": komodo_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
)
.await;
}
// stop the cancel listening task from going forever
cancel.cancel();
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder instance cleaned up");
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if update.success {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
// don't hold response up for user
tokio::spawn(async move {
handle_post_build_redeploy(&build.id).await;
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
#[async_trait]
impl Resolve<CancelBuild, User> for State {
#[instrument(name = "CancelBuild", skip(self, user))]
#[instrument(skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build_id,
name: build_name,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
if let ExecuteRequest::CancelBuild(req) = request {
let build = resource::get::<Build>(&req.build).await?;
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates
.find_one(doc! {
"operation": "RunBuild",
"target.id": &build.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
(Some(build), Some(cancel)) => {
if cancel.start_ts > build.start_ts {
return Err(anyhow!("Build has already been cancelled"));
}
}
(None, _) => return Err(anyhow!("No build in progress")),
_ => {}
};
}
Ok(())
}
impl Resolve<CancelBuild, (User, Update)> for State {
#[instrument(name = "CancelBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelBuild { build }: CancelBuild,
user: User,
) -> anyhow::Result<CancelBuildResponse> {
let build = Build::get_resource_check_permissions(
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
// check if theres already an open cancel build update
if db_client()
// make sure the build is building
if !action_states()
.build
.get(&build.id)
.await
.updates
.find_one(
doc! {
"operation": "CancelBuild",
"status": "InProgress",
"target.id": &build.id,
},
None,
)
.await
.context("failed to query updates")?
.is_some()
.and_then(|s| s.get().ok().map(|s| s.building))
.unwrap_or_default()
{
return Err(anyhow!("Build cancel is already in progress"));
return Err(anyhow!("Build is not building."));
}
let mut update =
make_update(&build, Operation::CancelBuild, &user);
update.push_simple_log(
"cancel triggered",
"the build cancel has been triggered",
);
update.in_progress();
update.id =
add_update(make_update(&build, Operation::CancelBuild, &user))
.await?;
update_update(update.clone()).await?;
build_cancel_channel()
.sender
.lock()
.await
.send((build.id, update))?;
.send((build.id, update.clone()))?;
Ok(CancelBuildResponse {})
}
}
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument]
async fn get_build_builder(
build: &Build,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
if build.config.builder_id.is_empty() {
return Err(anyhow!("build has not configured a builder"));
}
let builder =
Builder::get_resource(&build.config.builder_id).await?;
match builder.config {
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
}
let server = Server::get_resource(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: build.name.clone(),
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(build, config, update).await
}
}
}
#[instrument]
async fn get_aws_builder(
build: &Build,
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = monitor_timestamp();
let instance_name = format!(
"BUILDER-{}-v{}",
build.name,
build.config.version.to_string()
);
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(log);
update_update(update.clone()).await?;
let periphery_address = format!("http://{ip}:{}", config.port);
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = monitor_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = periphery
.request(api::GetVersion {})
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
let update_id = update.id.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
&db_client().await.updates,
&update_id,
doc! { "$set": { "status": "Complete" } },
None,
)
.await
.context("failed to reach periphery client on builder");
if let Ok(GetVersionResponse { version }) = &version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!(
"established contact with periphery on builder\nperiphery version: v{}",
version
),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(connect_log);
update_update(update.clone()).await?;
return Ok((
periphery,
BuildCleanupData::Aws {
instance_id,
region: config.region,
},
));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
.await;
});
{
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
}
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(res.err().unwrap())
}
#[instrument(skip(periphery))]
async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(region, &_instance_id)
.await;
});
update.push_simple_log(
"terminate instance",
format!("termination queued for instance id {instance_id}"),
);
}
Ok(update)
}
}
@@ -478,57 +562,103 @@ async fn handle_post_build_redeploy(build_id: &str) {
.map(|deployment| async move {
let state =
get_deployment_state(&deployment).await.unwrap_or_default();
if state == DockerContainerState::Running {
let res = State
.resolve(
Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
},
auto_redeploy_user().to_owned(),
)
.await;
if state == DeploymentState::Running {
let req = super::ExecuteRequest::Deploy(Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
});
let user = auto_redeploy_user().to_owned();
let res = async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
},
(user, update),
)
.await
}
.await;
Some((deployment.id.clone(), res))
} else {
None
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
for res in join_all(futures).await {
let Some((id, res)) = res else {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
};
if let Err(e) = res {
warn!("failed post build redeploy for deployment {id}: {e:#}");
}
}
}
fn start_aws_builder_log(
instance_id: &str,
ip: &str,
config: &AwsBuilderConfig,
) -> String {
let AwsBuilderConfig {
ami_id,
instance_type,
volume_gb,
subnet_id,
assign_public_ip,
security_group_ids,
use_public_ip,
..
} = config;
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
let (domain, account) = match &build.config.image_registry {
// Early return for None
ImageRegistry::None(_) => return Ok((None, None)),
// Early return for AwsEcr
ImageRegistry::AwsEcr(label) => {
// Note that aws ecr config still only lives in config file
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| &reg.label == label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
access_key_id,
secret_access_key,
..
}) => {
let token = ecr::get_ecr_token(
region,
access_key_id,
secret_access_key,
)
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_komodo_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,
)
.await
.context("failed to create aws ecr repo")?;
Some(token)
}
None => None,
};
return Ok((token, config.map(AwsEcrConfig::from)));
}
ImageRegistry::Standard(StandardRegistryConfig {
domain,
account,
..
}) => (domain.as_str(), account),
};
let readable_sec_group_ids = security_group_ids.join(", ");
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use registry provider {domain}"
));
}
format!("instance id: {instance_id}\nip: {ip}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}\nassign public ip: {assign_public_ip}\nuse public ip: {use_public_ip}")
let registry_token = registry_token(domain, account).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
)?;
Ok((registry_token, None))
}

View File

@@ -1,39 +1,69 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use futures::future::join_all;
use monitor_client::{
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
build::Build,
deployment::{Deployment, DeploymentImage},
get_image_name, monitor_timestamp,
build::{Build, ImageRegistry},
config::core::AwsEcrConfig,
deployment::{
extract_registry_domain, Deployment, DeploymentImage,
},
get_image_name,
permission::PermissionLevel,
server::ServerStatus,
update::{Log, ResourceTarget, Update, UpdateStatus},
server::Server,
update::{Log, Update},
user::User,
Operation, Version,
Version,
},
};
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_container_command,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
},
periphery_client,
query::get_server_with_status,
resource::StateResource,
update::{add_update, make_update, update_update},
query::get_variables_and_secrets,
registry_token,
update::update_update,
},
monitor::update_cache_for_server,
state::{action_states, db_client, State},
resource,
state::{action_states, State},
};
#[async_trait]
impl Resolve<Deploy, User> for State {
#[instrument(name = "Deploy", skip(self, user))]
async fn setup_deployment_execution(
deployment: &str,
user: &User,
) -> anyhow::Result<(Deployment, Server)> {
let deployment = resource::get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Execute,
)
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
Ok((deployment, server))
}
impl Resolve<Deploy, (User, Update)> for State {
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
Deploy {
@@ -41,14 +71,10 @@ impl Resolve<Deploy, User> for State {
stop_signal,
stop_time,
}: Deploy,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut deployment = Deployment::get_resource_check_permissions(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (mut deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -61,59 +87,161 @@ impl Resolve<Deploy, User> for State {
let _action_guard =
action_state.update(|state| state.deploying = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerStatus::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let version = match deployment.config.image {
periphery
.health_check()
.await
.context("Failed server health check, stopping run.")?;
// This block resolves the attached Build to an actual versioned image
let (version, registry_token, aws_ecr) = match &deployment
.config
.image
{
DeploymentImage::Build { build_id, version } => {
let build = Build::get_resource(&build_id).await?;
let image_name = get_image_name(&build);
let build = resource::get::<Build>(build_id).await?;
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.iter()
.find(|reg| &reg.label == label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
version
*version
};
// Remove ending patch if it is 0, this means use latest patch.
let version_str = if version.patch == 0 {
format!("{}.{}", version.major, version.minor)
} else {
version.to_string()
};
// Potentially add the build image_tag postfix
let version_str = if build.config.image_tag.is_empty() {
version_str
} else {
format!("{version_str}-{}", build.config.image_tag)
};
// replace image with corresponding build image.
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{}", version.to_string()),
image: format!("{image_name}:{version_str}"),
};
if deployment.config.docker_account.is_empty() {
deployment.config.docker_account =
build.config.docker_account;
match build.config.image_registry {
ImageRegistry::None(_) => (version, None, None),
ImageRegistry::AwsEcr(label) => {
let config = core_config()
.aws_ecr_registries
.iter()
.find(|reg| reg.label == label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
let token = ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?;
(version, Some(token), Some(AwsEcrConfig::from(config)))
}
ImageRegistry::Standard(params) => {
if deployment.config.image_registry_account.is_empty() {
deployment.config.image_registry_account =
params.account
}
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&params.domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", params.domain, deployment.config.image_registry_account),
)?
} else {
None
};
(version, token, None)
}
}
version
}
DeploymentImage::Image { .. } => Version::default(),
DeploymentImage::Image { image } => {
let domain = extract_registry_domain(image)?;
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
)?
} else {
None
};
(Version::default(), token, None)
}
};
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers = if !deployment.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut deployment.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut deployment.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_container_command(
&vars_and_secrets,
&mut deployment.config.command,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let mut update =
make_update(&deployment, Operation::DeployContainer, &user);
update.in_progress();
update.version = version;
update.id = add_update(update.clone()).await?;
let docker_token = core_config()
.docker_accounts
.get(&deployment.config.docker_account)
.cloned();
update_update(update.clone()).await?;
match periphery
.request(api::container::Deploy {
deployment,
stop_signal,
stop_time,
docker_token,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
@@ -121,7 +249,9 @@ impl Resolve<Deploy, User> for State {
Err(e) => {
update.push_error_log(
"deploy container",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to deploy container").into(),
),
);
}
};
@@ -135,20 +265,15 @@ impl Resolve<Deploy, User> for State {
}
}
#[async_trait]
impl Resolve<StartContainer, User> for State {
#[instrument(name = "StartContainer", skip(self, user))]
impl Resolve<StartDeployment, (User, Update)> for State {
#[instrument(name = "StartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { deployment }: StartContainer,
user: User,
StartDeployment { deployment }: StartDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = Deployment::get_resource_check_permissions(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -161,73 +286,194 @@ impl Resolve<StartContainer, User> for State {
let _action_guard =
action_state.update(|state| state.starting = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerStatus::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: ResourceTarget::Deployment(deployment.id.clone()),
operation: Operation::StartContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::StartContainer {
name: deployment.name.clone(),
name: deployment.name,
})
.await
{
Ok(log) => log,
Err(e) => {
Log::error("start container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
#[async_trait]
impl Resolve<StopContainer, User> for State {
#[instrument(name = "StopContainer", skip(self, user))]
impl Resolve<RestartDeployment, (User, Update)> for State {
#[instrument(name = "RestartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
RestartDeployment { deployment }: RestartDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.restarting = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::RestartContainer {
name: deployment.name,
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"restart container",
format_serror(
&e.context("failed to restart container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PauseDeployment, (User, Update)> for State {
#[instrument(name = "PauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseDeployment { deployment }: PauseDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pausing = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::PauseContainer {
name: deployment.name,
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"pause container",
format_serror(&e.context("failed to pause container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<UnpauseDeployment, (User, Update)> for State {
#[instrument(name = "UnpauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseDeployment { deployment }: UnpauseDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.unpausing = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::UnpauseContainer {
name: deployment.name,
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"unpause container",
format_serror(
&e.context("failed to unpause container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopDeployment, (User, Update)> for State {
#[instrument(name = "StopDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopDeployment {
deployment,
signal,
time,
}: StopContainer,
user: User,
}: StopDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = Deployment::get_resource_check_permissions(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -240,28 +486,14 @@ impl Resolve<StopContainer, User> for State {
let _action_guard =
action_state.update(|state| state.stopping = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerStatus::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let mut update =
make_update(&deployment, Operation::StopContainer, &user);
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::StopContainer {
name: deployment.name.clone(),
name: deployment.name,
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),
@@ -272,96 +504,14 @@ impl Resolve<StopContainer, User> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update_update(update.clone()).await?;
Ok(update)
}
}
#[async_trait]
impl Resolve<StopAllContainers, User> for State {
#[instrument(name = "StopAllContainers", skip(self, user))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
user: User,
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerStatus::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.stopping_containers = true)?;
let deployments = find_collect(
&db_client().await.deployments,
doc! {
"config.server_id": &server.id
},
None,
)
.await
.context("failed to find deployments on server")?;
let mut update =
make_update(&server, Operation::StopAllContainers, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let futures = deployments.iter().map(|deployment| async {
(
self
.resolve(
StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
},
user.clone(),
)
.await,
deployment.name.clone(),
deployment.id.clone(),
)
});
let results = join_all(futures).await;
let deployment_names = deployments
.iter()
.map(|d| format!("{} ({})", d.name, d.id))
.collect::<Vec<_>>()
.join("\n");
update.push_simple_log("stopping containers", deployment_names);
for (res, name, id) in results {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format!(
"failed to stop container {name} ({id})\n\n{}",
serialize_error_pretty(&e)
),
);
}
}
update.finalize();
update_update(update.clone()).await?;
@@ -369,24 +519,19 @@ impl Resolve<StopAllContainers, User> for State {
}
}
#[async_trait]
impl Resolve<RemoveContainer, User> for State {
#[instrument(name = "RemoveContainer", skip(self, user))]
impl Resolve<DestroyDeployment, (User, Update)> for State {
#[instrument(name = "DestroyDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RemoveContainer {
DestroyDeployment {
deployment,
signal,
time,
}: RemoveContainer,
user: User,
}: DestroyDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = Deployment::get_resource_check_permissions(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
let (deployment, server) =
setup_deployment_execution(&deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -397,39 +542,16 @@ impl Resolve<RemoveContainer, User> for State {
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.removing = true)?;
action_state.update(|state| state.destroying = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerStatus::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: ResourceTarget::Deployment(deployment.id.clone()),
operation: Operation::RemoveContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::RemoveContainer {
name: deployment.name.clone(),
name: deployment.name,
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),
@@ -440,9 +562,10 @@ impl Resolve<RemoveContainer, User> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);

View File

@@ -2,15 +2,26 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::execute::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
update::{Log, Update},
user::User,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, state::State};
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
state::{db_client, State},
};
mod build;
mod deployment;
@@ -18,24 +29,53 @@ mod procedure;
mod repo;
mod server;
mod server_template;
mod stack;
mod sync;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[resolver_args((User, Update))]
#[serde(tag = "type", content = "params")]
enum ExecuteRequest {
pub enum ExecuteRequest {
// ==== SERVER ====
PruneContainers(PruneDockerContainers),
PruneImages(PruneDockerImages),
PruneNetworks(PruneDockerNetworks),
StartContainer(StartContainer),
RestartContainer(RestartContainer),
PauseContainer(PauseContainer),
UnpauseContainer(UnpauseContainer),
StopContainer(StopContainer),
DestroyContainer(DestroyContainer),
StartAllContainers(StartAllContainers),
RestartAllContainers(RestartAllContainers),
PauseAllContainers(PauseAllContainers),
UnpauseAllContainers(UnpauseAllContainers),
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
DeleteNetwork(DeleteNetwork),
PruneNetworks(PruneNetworks),
DeleteImage(DeleteImage),
PruneImages(PruneImages),
DeleteVolume(DeleteVolume),
PruneVolumes(PruneVolumes),
PruneSystem(PruneSystem),
// ==== DEPLOYMENT ====
Deploy(Deploy),
StartContainer(StartContainer),
StopContainer(StopContainer),
StopAllContainers(StopAllContainers),
RemoveContainer(RemoveContainer),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
UnpauseDeployment(UnpauseDeployment),
StopDeployment(StopDeployment),
DestroyDeployment(DestroyDeployment),
// ==== STACK ====
DeployStack(DeployStack),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
// ==== BUILD ====
RunBuild(RunBuild),
@@ -44,12 +84,17 @@ enum ExecuteRequest {
// ==== REPO ====
CloneRepo(CloneRepo),
PullRepo(PullRepo),
BuildRepo(BuildRepo),
CancelRepoBuild(CancelRepoBuild),
// ==== PROCEDURE ====
RunProcedure(RunProcedure),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
// ==== SYNC ====
RunSync(RunSync),
}
pub fn router() -> Router {
@@ -61,49 +106,78 @@ pub fn router() -> Router {
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
) -> serror::Result<Json<Update>> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request, user))
.await
.context("failure in spawned execute task");
// need to validate no cancel is active before any update is created.
build::validate_cancel_build(&request).await?;
if let Err(e) = &res {
warn!("/execute request {req_id} spawn error: {e:#}",);
}
let update = init_execution_update(&request, &user).await?;
Ok((TypedHeader(ContentType::json()), res??))
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
tokio::spawn({
let update_id = update.id.clone();
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
Log::error("spawn error", format!("{e:#?}"))
}
_ => return,
};
let res = async {
let mut update =
find_one_by_id(&db_client().await.updates, &update_id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update).await
}
.await;
if let Err(e) = res {
warn!("failed to update update with task error log | {e:#}");
}
}
});
Ok(Json(update))
}
#[instrument(name = "ExecuteRequest", skip(user))]
#[instrument(name = "ExecuteRequest", skip(user, update), fields(user_id = user.id, update_id = update.id))]
async fn task(
req_id: Uuid,
request: ExecuteRequest,
user: User,
update: Update,
) -> anyhow::Result<String> {
info!(
"/execute request {req_id} | user: {} ({})",
user.username, user.id
);
info!("/execute request {req_id} | user: {}", user.username);
let timer = Instant::now();
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
let res = State
.resolve_request(request, (user, update))
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/execute request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
info!("/execute request {req_id} | resolve time: {elapsed:?}");
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -1,39 +1,63 @@
use async_trait::async_trait;
use monitor_client::{
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::{
api::execute::RunProcedure,
entities::{
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User, Operation,
update::Update, user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio::sync::Mutex;
use crate::{
helpers::{
procedure::execute_procedure,
resource::StateResource,
update::{add_update, make_update, update_update},
},
state::{action_states, State},
helpers::{procedure::execute_procedure, update::update_update},
resource::{self, refresh_procedure_state_cache},
state::{action_states, db_client, State},
};
#[async_trait]
impl Resolve<RunProcedure, User> for State {
#[instrument(name = "RunProcedure", skip(self, user))]
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunProcedure { procedure }: RunProcedure,
user: User,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
let procedure = Procedure::get_resource_check_permissions(
resolve_inner(procedure, user, update).await
}
}
fn resolve_inner(
procedure: String,
user: User,
mut update: Update,
) -> Pin<
Box<
dyn std::future::Future<Output = anyhow::Result<Update>> + Send,
>,
> {
Box::pin(async move {
let procedure = resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Execute,
)
.await?;
// Need to push the initial log, as execute_procedure
// assumes first log is already created
// and will panic otherwise.
update.push_simple_log(
"execute_procedure",
format!(
"{}: executing procedure '{}'",
muted("INFO"),
bold(&procedure.name)
),
);
// get the action state for the procedure (or insert default).
let action_state = action_states()
.procedure
@@ -45,15 +69,7 @@ impl Resolve<RunProcedure, User> for State {
let _action_guard =
action_state.update(|state| state.running = true)?;
let mut update =
make_update(&procedure, Operation::RunProcedure, &user);
update.in_progress();
update.push_simple_log(
"execute procedure",
format!("Executing procedure: {}", procedure.name),
);
update.id = add_update(update.clone()).await?;
update_update(update.clone()).await?;
let update = Mutex::new(update);
@@ -65,19 +81,36 @@ impl Resolve<RunProcedure, User> for State {
Ok(_) => {
update.push_simple_log(
"execution ok",
"the procedure has completed with no errors",
format!(
"{}: the procedure has {} with no errors",
muted("INFO"),
colored("completed", Color::Green)
),
);
}
Err(e) => update.push_error_log(
"execution error",
serialize_error_pretty(&e),
),
Err(e) => update
.push_error_log("execution error", format_serror(&e.into())),
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_procedure_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
})
}

View File

@@ -1,43 +1,60 @@
use std::str::FromStr;
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::anyhow;
use async_trait::async_trait;
use monitor_client::{
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
monitor_timestamp, optional_string,
alert::{Alert, AlertData, SeverityLevel},
builder::{Builder, BuilderConfig},
komodo_timestamp, optional_string,
permission::PermissionLevel,
repo::Repo,
server::Server,
update::{Log, ResourceTarget, Update, UpdateStatus},
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::{
by_id::update_one_by_id,
mongodb::{
bson::{doc, to_document},
options::FindOneOptions,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio_util::sync::CancellationToken;
use crate::{
config::core_config,
helpers::{
alert::send_alerts,
builder::{cleanup_builder_instance, get_builder_periphery},
channel::repo_cancel_channel,
git_token,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_system_command,
},
periphery_client,
resource::StateResource,
update::{add_update, update_update},
query::get_variables_and_secrets,
update::update_update,
},
resource::{self, refresh_repo_state_cache},
state::{action_states, db_client, State},
};
#[async_trait]
impl Resolve<CloneRepo, User> for State {
#[instrument(name = "CloneRepo", skip(self, user))]
use super::ExecuteRequest;
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CloneRepo { repo }: CloneRepo,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = Repo::get_resource_check_permissions(
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
@@ -53,43 +70,49 @@ impl Resolve<CloneRepo, User> for State {
let _action_guard =
action_state.update(|state| state.cloning = true)?;
update_update(update.clone()).await?;
let git_token = git_token(
&repo.config.git_provider,
&repo.config.git_account,
|https| repo.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
)?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
let server = Server::get_resource(&repo.config.server_id).await?;
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
operation: Operation::CloneRepo,
target: ResourceTarget::Repo(repo.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = add_update(update.clone()).await?;
let github_token = core_config()
.github_accounts
.get(&repo.config.github_account)
.cloned();
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let logs = match periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
github_token,
git_token,
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(logs) => logs,
Ok(res) => res.logs,
Err(e) => {
vec![Log::error("clone repo", serialize_error_pretty(&e))]
vec![Log::error(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
)]
}
};
@@ -97,39 +120,24 @@ impl Resolve<CloneRepo, User> for State {
update.finalize();
if update.success {
let res = db_client().await
.repos
.update_one(
doc! { "_id": ObjectId::from_str(&repo.id)? },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {
warn!(
"failed to update repo last_pulled_at | repo id: {} | {e:#}",
repo.id
);
}
update_last_pulled_time(&repo.name).await;
}
update_update(update.clone()).await?;
Ok(update)
handle_server_update_return(update).await
}
}
#[async_trait]
impl Resolve<PullRepo, User> for State {
#[instrument(name = "PullRepo", skip(self, user))]
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PullRepo { repo }: PullRepo,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = Repo::get_resource_check_permissions(
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
PermissionLevel::Execute,
)
.await?;
@@ -142,39 +150,45 @@ impl Resolve<PullRepo, User> for State {
let _action_guard =
action_state.update(|state| state.pulling = true)?;
update_update(update.clone()).await?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
let server = Server::get_resource(&repo.config.server_id).await?;
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
operation: Operation::PullRepo,
target: ResourceTarget::Repo(repo.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = add_update(update.clone()).await?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let logs = match periphery
.request(api::git::PullRepo {
name: repo.name,
name: repo.name.clone(),
branch: optional_string(&repo.config.branch),
commit: optional_string(&repo.config.commit),
path: optional_string(&repo.config.path),
on_pull: repo.config.on_pull.into_option(),
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(logs) => logs,
Ok(res) => {
update.commit_hash = res.commit_hash.unwrap_or_default();
res.logs
}
Err(e) => {
vec![Log::error("pull repo", serialize_error_pretty(&e))]
vec![Log::error(
"pull repo",
format_serror(&e.context("failed to pull repo").into()),
)]
}
};
@@ -183,23 +197,459 @@ impl Resolve<PullRepo, User> for State {
update.finalize();
if update.success {
let res = db_client().await
.repos
.update_one(
doc! { "_id": ObjectId::from_str(&repo.id)? },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {
warn!(
"failed to update repo last_pulled_at | repo id: {} | {e:#}",
repo.id
);
update_last_pulled_time(&repo.name).await;
}
handle_server_update_return(update).await
}
}
#[instrument(skip_all, fields(update_id = update.id))]
async fn handle_server_update_return(
update: Update,
) -> anyhow::Result<Update> {
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
#[instrument]
async fn update_last_pulled_time(repo_name: &str) {
let res = db_client()
.await
.repos
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": komodo_timestamp() } },
)
.await;
if let Err(e) = res {
warn!(
"failed to update repo last_pulled_at | repo: {repo_name} | {e:#}",
);
}
}
impl Resolve<BuildRepo, (User, Update)> for State {
#[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
BuildRepo { repo }: BuildRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
if repo.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to BuildRepo"));
}
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.building = true)?;
update_update(update.clone()).await?;
let git_token = git_token(
&repo.config.git_provider,
&repo.config.git_account,
|https| repo.config.git_https = https,
)
.await
.with_context(
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
)?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let mut cancel_recv =
repo_cancel_channel().receiver.resubscribe();
let repo_id = repo.id.clone();
let builder =
resource::get::<Builder>(&repo.config.builder_id).await?;
let is_server_builder =
matches!(&builder.config, BuilderConfig::Server(_));
tokio::spawn(async move {
let poll = async {
loop {
let (incoming_repo_id, mut update) = tokio::select! {
_ = cancel_clone.cancelled() => return Ok(()),
id = cancel_recv.recv() => id?
};
if incoming_repo_id == repo_id {
if is_server_builder {
update.push_error_log("Cancel acknowledged", "Repo Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
} else {
update.push_simple_log("Cancel acknowledged", "The repo build cancellation has been queued, it may still take some time.");
}
update.finalize();
let id = update.id.clone();
if let Err(e) = update_update(update).await {
warn!("failed to modify Update {id} on db | {e:#}");
}
if !is_server_builder {
cancel_clone.cancel();
}
return Ok(());
}
}
#[allow(unreachable_code)]
anyhow::Ok(())
};
tokio::select! {
_ = cancel_clone.cancelled() => {}
_ = poll => {}
}
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
repo.name.clone(),
None,
builder,
&mut update,
)
.await
{
Ok(builder) => builder,
Err(e) => {
warn!("failed to get builder for repo {} | {e:#}", repo.name);
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_builder_early_return(
update, repo.id, repo.name, false,
)
.await;
}
};
// CLONE REPO
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
git_token,
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect()
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_builder_early_return(update, repo.id, repo.name, true).await
},
};
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.commit_hash = res.commit_hash.unwrap_or_default();
res.commit_message.unwrap_or_default()
}
Err(e) => {
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
};
update.finalize();
let db = db_client().await;
if update.success {
let _ = db
.repos
.update_one(
doc! { "name": &repo.name },
doc! { "$set": {
"info.last_built_at": komodo_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
)
.await;
}
// stop the cancel listening task from going forever
cancel.cancel();
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
id: repo.id,
name: repo.name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
#[instrument(skip(update))]
async fn handle_builder_early_return(
mut update: Update,
repo_id: String,
repo_name: String,
is_cancel: bool,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
id: repo_id,
name: repo_name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_repo_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
if let ExecuteRequest::CancelRepoBuild(req) = request {
let repo = resource::get::<Repo>(&req.repo).await?;
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates
.find_one(doc! {
"operation": "BuildRepo",
"target.id": &repo.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future(),
db.updates
.find_one(doc! {
"operation": "CancelRepoBuild",
"target.id": &repo.id,
},)
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build()
)
.into_future()
)?;
match (latest_build, latest_cancel) {
(Some(build), Some(cancel)) => {
if cancel.start_ts > build.start_ts {
return Err(anyhow!(
"Repo build has already been cancelled"
));
}
}
(None, _) => return Err(anyhow!("No repo build in progress")),
_ => {}
};
}
Ok(())
}
impl Resolve<CancelRepoBuild, (User, Update)> for State {
#[instrument(name = "CancelRepoBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelRepoBuild { repo }: CancelRepoBuild,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
// make sure the build is building
if !action_states()
.repo
.get(&repo.id)
.await
.and_then(|s| s.get().ok().map(|s| s.building))
.unwrap_or_default()
{
return Err(anyhow!("Repo is not building."));
}
update.push_simple_log(
"cancel triggered",
"the repo build cancel has been triggered",
);
update_update(update.clone()).await?;
repo_cancel_channel()
.sender
.lock()
.await
.send((repo.id, update.clone()))?;
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
let update_id = update.id.clone();
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
&db_client().await.updates,
&update_id,
doc! { "$set": { "status": "Complete" } },
None,
)
.await
{
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
}
});
Ok(update)
}
}
async fn interpolate(
repo: &mut Repo,
update: &mut Update,
) -> anyhow::Result<HashSet<(String, String)>> {
if !repo.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut repo.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_clone,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_pull,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
update,
&global_replacers,
&secret_replacers,
);
Ok(secret_replacers)
} else {
Ok(Default::default())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
use axum::async_trait;
use monitor_client::{
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
permission::PermissionLevel,
@@ -7,58 +8,73 @@ use monitor_client::{
server_template::{ServerTemplate, ServerTemplateConfig},
update::Update,
user::User,
Operation,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
cloud::aws::launch_ec2_instance,
helpers::{
resource::StateResource,
update::{add_update, make_update, update_update},
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
state::State,
helpers::update::update_update,
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<LaunchServer, User> for State {
#[instrument(name = "LaunchServer", skip(self, user))]
impl Resolve<LaunchServer, (User, Update)> for State {
#[instrument(name = "LaunchServer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
LaunchServer {
name,
server_template,
}: LaunchServer,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let template = ServerTemplate::get_resource_check_permissions(
// validate name isn't already taken by another server
if db_client()
.await
.servers
.find_one(doc! {
"name": &name
})
.await
.context("failed to query db for servers")?
.is_some()
{
return Err(anyhow!("name is already taken"));
}
let template = resource::get_check_permissions::<ServerTemplate>(
&server_template,
&user,
PermissionLevel::Execute,
)
.await?;
let mut update =
make_update(&template, Operation::LaunchServer, &user);
update.push_simple_log(
"launching server",
format!("{:#?}", template.config),
);
update.id = add_update(update.clone()).await?;
match template.config {
update_update(update.clone()).await?;
let config = match template.config {
ServerTemplateConfig::Aws(config) => {
let region = config.region.clone();
let instance = launch_ec2_instance(&name, config).await;
if let Err(e) = &instance {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
let instance = instance.unwrap();
let instance = match launch_ec2_instance(&name, config).await
{
Ok(instance) => instance,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
@@ -66,22 +82,58 @@ impl Resolve<LaunchServer, User> for State {
instance.ip
),
);
let _ = self
.resolve(
CreateServer {
name,
config: PartialServerConfig {
address: format!("http://{}:8120", instance.ip)
.into(),
region: region.into(),
..Default::default()
},
},
user,
)
.await;
PartialServerConfig {
address: format!("http://{}:8120", instance.ip).into(),
region: region.into(),
..Default::default()
}
}
}
ServerTemplateConfig::Hetzner(config) => {
let datacenter = config.datacenter;
let server = match launch_hetzner_server(&name, config).await
{
Ok(server) => server,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch hetzner server\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {name} on ip {}",
server.ip
),
);
PartialServerConfig {
address: format!("http://{}:8120", server.ip).into(),
region: datacenter.as_ref().to_string().into(),
..Default::default()
}
}
};
match self.resolve(CreateServer { name, config }, user).await {
Ok(server) => {
update.push_simple_log(
"create server",
format!("created server {} ({})", server.name, server.id),
);
update.other_data = server.id;
}
Err(e) => {
update.push_error_log(
"create server",
format_serror(&e.context("failed to create server").into()),
);
}
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)

View File

@@ -0,0 +1,359 @@
use std::collections::HashSet;
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
permission::PermissionLevel, stack::StackInfo, update::Update,
user::User,
},
};
use mungos::mongodb::bson::{doc, to_document};
use periphery_client::api::compose::*;
use resolver_api::Resolve;
use crate::{
helpers::{
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
},
periphery_client,
query::get_variables_and_secrets,
stack::{
execute::execute_compose, get_stack_and_server,
services::extract_services_into_res,
},
update::update_update,
},
monitor::update_cache_for_server,
state::{action_states, db_client, State},
};
impl Resolve<DeployStack, (User, Update)> for State {
#[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeployStack { stack, stop_time }: DeployStack,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Execute,
true,
)
.await?;
// get the action state for the stack (or insert default).
let action_state =
action_states().stack.get_or_insert_default(&stack.id).await;
// Will check to ensure stack not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.deploying = true)?;
update_update(update.clone()).await?;
let git_token = crate::helpers::git_token(
&stack.config.git_provider,
&stack.config.git_account,
|https| stack.config.git_https = https,
).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
)?;
let registry_token = crate::helpers::registry_token(
&stack.config.registry_provider,
&stack.config.registry_account,
).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
)?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers = if !stack.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut stack.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut stack.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut stack.config.build_extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let ComposeUpResponse {
logs,
deployed,
file_contents,
missing_files,
remote_errors,
commit_hash,
commit_message,
} = periphery_client(&server)?
.request(ComposeUp {
stack: stack.clone(),
service: None,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await?;
update.logs.extend(logs);
let update_info = async {
let latest_services = if !file_contents.is_empty() {
let mut services = Vec::new();
for contents in &file_contents {
if let Err(e) = extract_services_into_res(
&stack.project_name(true),
&contents.contents,
&mut services,
) {
update.push_error_log(
"extract services",
format_serror(&e.context(format!("Failed to extract stack services for compose file path {}. Things probably won't work correctly", contents.path)).into())
);
}
}
services
} else {
// maybe better to do something else here for services.
stack.info.latest_services.clone()
};
let project_name = stack.project_name(true);
let (
deployed_services,
deployed_contents,
deployed_hash,
deployed_message,
) = if deployed {
(
Some(latest_services.clone()),
Some(file_contents.clone()),
commit_hash.clone(),
commit_message.clone(),
)
} else {
(
stack.info.deployed_services,
stack.info.deployed_contents,
stack.info.deployed_hash,
stack.info.deployed_message,
)
};
let info = StackInfo {
missing_files,
deployed_project_name: project_name.into(),
deployed_services,
deployed_contents,
deployed_hash,
deployed_message,
latest_services,
remote_contents: stack
.config
.file_contents
.is_empty()
.then_some(file_contents),
remote_errors: stack
.config
.file_contents
.is_empty()
.then_some(remote_errors),
latest_hash: commit_hash,
latest_message: commit_message,
};
let info = to_document(&info)
.context("failed to serialize stack info to bson")?;
db_client()
.await
.stacks
.update_one(
doc! { "name": &stack.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update stack info on db")?;
anyhow::Ok(())
};
// This will be weird with single service deploys. Come back to it.
if let Err(e) = update_info.await {
update.push_error_log(
"refresh stack info",
format_serror(
&e.context("failed to refresh stack info on db").into(),
),
)
}
// Ensure cached stack state up to date by updating server cache
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartStack, (User, Update)> for State {
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartStack { stack, service }: StartStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<StartStack>(
&stack,
service,
&user,
|state| state.starting = true,
update,
(),
)
.await
}
}
impl Resolve<RestartStack, (User, Update)> for State {
#[instrument(name = "RestartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartStack { stack, service }: RestartStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<RestartStack>(
&stack,
service,
&user,
|state| {
state.restarting = true;
},
update,
(),
)
.await
}
}
impl Resolve<PauseStack, (User, Update)> for State {
#[instrument(name = "PauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseStack { stack, service }: PauseStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<PauseStack>(
&stack,
service,
&user,
|state| state.pausing = true,
update,
(),
)
.await
}
}
impl Resolve<UnpauseStack, (User, Update)> for State {
#[instrument(name = "UnpauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseStack { stack, service }: UnpauseStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<UnpauseStack>(
&stack,
service,
&user,
|state| state.unpausing = true,
update,
(),
)
.await
}
}
impl Resolve<StopStack, (User, Update)> for State {
#[instrument(name = "StopStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopStack {
stack,
stop_time,
service,
}: StopStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<StopStack>(
&stack,
service,
&user,
|state| state.stopping = true,
update,
stop_time,
)
.await
}
}
impl Resolve<DestroyStack, (User, Update)> for State {
#[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DestroyStack {
stack,
remove_orphans,
stop_time,
}: DestroyStack,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
execute_compose::<DestroyStack>(
&stack,
None,
&user,
|state| state.destroying = true,
update,
(stop_time, remove_orphans),
)
.await
}
}

View File

@@ -0,0 +1,448 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
komodo_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
update::{Log, Update},
user::{sync_user, User},
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use crate::{
helpers::{
query::get_id_to_tags,
sync::{
deploy::{
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
resource::{
get_updates_for_execution, AllResourcesById, ResourceSync,
},
},
update::update_update,
},
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, State},
};
impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
// Send update here for FE to recheck action state
update_update(update.clone()).await?;
let (res, logs, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
update.logs.extend(logs);
update_update(update.clone()).await?;
let resources = res?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| (deployment.name.clone(), deployment.clone()))
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deploy_cache = build_deploy_cache(SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
})
.await?;
let (servers_to_create, servers_to_update, servers_to_delete) =
get_updates_for_execution::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = get_updates_for_execution::<Deployment>(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
get_updates_for_execution::<Stack>(
resources.stacks,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
get_updates_for_execution::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (repos_to_create, repos_to_update, repos_to_delete) =
get_updates_for_execution::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
procedures_to_create,
procedures_to_update,
procedures_to_delete,
) = get_updates_for_execution::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
get_updates_for_execution::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
get_updates_for_execution::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
) = get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
) = get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
variables_to_create,
variables_to_update,
variables_to_delete,
) = crate::helpers::sync::variables::get_updates_for_execution(
resources.variables,
sync.config.delete,
)
.await?;
let (
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
) = crate::helpers::sync::user_groups::get_updates_for_execution(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await?;
if deploy_cache.is_empty()
&& resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
&& server_templates_to_update.is_empty()
&& server_templates_to_delete.is_empty()
&& servers_to_create.is_empty()
&& servers_to_update.is_empty()
&& servers_to_delete.is_empty()
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& stacks_to_create.is_empty()
&& stacks_to_update.is_empty()
&& stacks_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
&& builders_to_create.is_empty()
&& builders_to_update.is_empty()
&& builders_to_delete.is_empty()
&& alerters_to_create.is_empty()
&& alerters_to_update.is_empty()
&& alerters_to_delete.is_empty()
&& repos_to_create.is_empty()
&& repos_to_update.is_empty()
&& repos_to_delete.is_empty()
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
&& variables_to_create.is_empty()
&& variables_to_update.is_empty()
&& variables_to_delete.is_empty()
{
update.push_simple_log(
"No Changes",
format!(
"{}. exiting.",
colored("nothing to do", Color::Green)
),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
// =================
// No deps
maybe_extend(
&mut update.logs,
crate::helpers::sync::variables::run_updates(
variables_to_create,
variables_to_update,
variables_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
crate::helpers::sync::user_groups::run_updates(
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
entities::sync::ResourceSync::run_updates(
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::run_updates(
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Server::run_updates(
servers_to_create,
servers_to_update,
servers_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Alerter::run_updates(
alerters_to_create,
alerters_to_update,
alerters_to_delete,
)
.await,
);
// Dependent on server
maybe_extend(
&mut update.logs,
Builder::run_updates(
builders_to_create,
builders_to_update,
builders_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Repo::run_updates(
repos_to_create,
repos_to_update,
repos_to_delete,
)
.await,
);
// Dependant on builder
maybe_extend(
&mut update.logs,
Build::run_updates(
builds_to_create,
builds_to_update,
builds_to_delete,
)
.await,
);
// Dependant on server / build
maybe_extend(
&mut update.logs,
Deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await,
);
// stack only depends on server, but maybe will depend on build later.
maybe_extend(
&mut update.logs,
Stack::run_updates(
stacks_to_create,
stacks_to_update,
stacks_to_delete,
)
.await,
);
// Dependant on everything
maybe_extend(
&mut update.logs,
Procedure::run_updates(
procedures_to_create,
procedures_to_update,
procedures_to_delete,
)
.await,
);
// Execute the deploy cache
deploy_from_cache(deploy_cache, &mut update.logs).await;
let db = db_client().await;
if let Err(e) = update_one_by_id(
&db.resource_syncs,
&sync.id,
doc! {
"$set": {
"info.last_sync_ts": komodo_timestamp(),
"info.last_sync_hash": hash,
"info.last_sync_message": message,
}
},
None,
)
.await
{
warn!(
"failed to update resource sync {} info after sync | {e:#}",
sync.name
)
}
if let Err(e) = State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
sync_user().to_owned(),
)
.await
{
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
update.push_error_log(
"refresh sync",
format_serror(
&e.context("failed to refresh sync pending after run")
.into(),
),
);
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
fn maybe_extend(logs: &mut Vec<Log>, log: Option<Log>) {
if let Some(log) = log {
logs.push(log);
}
}

View File

@@ -1,4 +1,5 @@
pub mod auth;
pub mod execute;
pub mod read;
pub mod user;
pub mod write;

View File

@@ -1,6 +1,5 @@
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
@@ -14,13 +13,13 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
helpers::resource::StateResource,
config::core_config,
resource::get_resource_ids_for_user,
state::{db_client, State},
};
const NUM_ALERTS_PER_PAGE: u64 = 20;
const NUM_ALERTS_PER_PAGE: u64 = 100;
#[async_trait]
impl Resolve<ListAlerts, User> for State {
async fn resolve(
&self,
@@ -28,11 +27,11 @@ impl Resolve<ListAlerts, User> for State {
user: User,
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin {
if !user.admin && !core_config().transparent_mode {
let server_ids =
Server::get_resource_ids_for_non_admin(&user.id).await?;
get_resource_ids_for_user::<Server>(&user).await?;
let deployment_ids =
Deployment::get_resource_ids_for_non_admin(&user.id).await?;
get_resource_ids_for_user::<Deployment>(&user).await?;
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
@@ -65,7 +64,6 @@ impl Resolve<ListAlerts, User> for State {
}
}
#[async_trait]
impl Resolve<GetAlert, User> for State {
async fn resolve(
&self,

View File

@@ -1,34 +1,28 @@
use std::str::FromStr;
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
use mongo_indexed::Document;
use komodo_client::{
api::read::*,
entities::{
alerter::{Alerter, AlerterListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
},
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<GetAlerter, User> for State {
async fn resolve(
&self,
GetAlerter { alerter }: GetAlerter,
user: User,
) -> anyhow::Result<Alerter> {
Alerter::get_resource_check_permissions(
resource::get_check_permissions::<Alerter>(
&alerter,
&user,
PermissionLevel::Read,
@@ -37,44 +31,45 @@ impl Resolve<GetAlerter, User> for State {
}
}
#[async_trait]
impl Resolve<ListAlerters, User> for State {
async fn resolve(
&self,
ListAlerters { query }: ListAlerters,
user: User,
) -> anyhow::Result<Vec<AlerterListItem>> {
Alerter::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Alerter>(query, &user).await
}
}
impl Resolve<ListFullAlerters, User> for State {
async fn resolve(
&self,
ListFullAlerters { query }: ListFullAlerters,
user: User,
) -> anyhow::Result<ListFullAlertersResponse> {
resource::list_full_for_user::<Alerter>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetAlertersSummary, User> for State {
async fn resolve(
&self,
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
let query =
match resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
Some(query)
};
let total = db_client()
.await
.alerters
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {

View File

@@ -1,44 +1,40 @@
use std::{collections::HashMap, str::FromStr, sync::OnceLock};
use std::collections::{HashMap, HashSet};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use async_trait::async_trait;
use futures::TryStreamExt;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem},
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::{ResourceTargetVariant, UpdateStatus},
update::UpdateStatus,
user::User,
Operation,
},
};
use mungos::{
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId},
options::FindOptions,
},
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::{Resolve, ResolveToString};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
resource,
state::{
action_states, build_state_cache, db_client, github_client, State,
},
state::{action_states, db_client, State},
};
#[async_trait]
impl Resolve<GetBuild, User> for State {
async fn resolve(
&self,
GetBuild { build }: GetBuild,
user: User,
) -> anyhow::Result<Build> {
Build::get_resource_check_permissions(
resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
@@ -47,25 +43,33 @@ impl Resolve<GetBuild, User> for State {
}
}
#[async_trait]
impl Resolve<ListBuilds, User> for State {
async fn resolve(
&self,
ListBuilds { query }: ListBuilds,
user: User,
) -> anyhow::Result<Vec<BuildListItem>> {
Build::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Build>(query, &user).await
}
}
impl Resolve<ListFullBuilds, User> for State {
async fn resolve(
&self,
ListFullBuilds { query }: ListFullBuilds,
user: User,
) -> anyhow::Result<ListFullBuildsResponse> {
resource::list_full_for_user::<Build>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetBuildActionState, User> for State {
async fn resolve(
&self,
GetBuildActionState { build }: GetBuildActionState,
user: User,
) -> anyhow::Result<BuildActionState> {
let build = Build::get_resource_check_permissions(
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
@@ -81,45 +85,53 @@ impl Resolve<GetBuildActionState, User> for State {
}
}
#[async_trait]
impl Resolve<GetBuildsSummary, User> for State {
async fn resolve(
&self,
GetBuildsSummary {}: GetBuildsSummary,
user: User,
) -> anyhow::Result<GetBuildsSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Build,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.builds
.count_documents(query, None)
.await
.context("failed to count all build documents")?;
let res = GetBuildsSummaryResponse {
total: total as u32,
};
let builds = resource::list_full_for_user::<Build>(
Default::default(),
&user,
)
.await
.context("failed to get all builds")?;
let mut res = GetBuildsSummaryResponse::default();
let cache = build_state_cache();
let action_states = action_states();
for build in builds {
res.total += 1;
match (
cache.get(&build.id).await.unwrap_or_default(),
action_states
.build
.get(&build.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.building => {
res.building += 1;
}
(BuildState::Ok, _) => res.ok += 1,
(BuildState::Failed, _) => res.failed += 1,
(BuildState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(BuildState::Building, _) => unreachable!(),
}
}
Ok(res)
}
}
const ONE_DAY_MS: i64 = 86400000;
#[async_trait]
impl Resolve<GetBuildMonthlyStats, User> for State {
async fn resolve(
&self,
@@ -135,16 +147,13 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
.find(doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
None,
)
"operation": Operation::RunBuild.to_string(),
})
.await
.context("failed to get updates cursor")?;
@@ -181,22 +190,19 @@ fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}
const NUM_VERSIONS_PER_PAGE: u64 = 10;
#[async_trait]
impl Resolve<GetBuildVersions, User> for State {
impl Resolve<ListBuildVersions, User> for State {
async fn resolve(
&self,
GetBuildVersions {
ListBuildVersions {
build,
page,
major,
minor,
patch,
}: GetBuildVersions,
limit,
}: ListBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
let build = Build::get_resource_check_permissions(
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
@@ -227,8 +233,7 @@ impl Resolve<GetBuildVersions, User> for State {
filter,
FindOptions::builder()
.sort(doc! { "_id": -1 })
.limit(NUM_VERSIONS_PER_PAGE as i64)
.skip(page as u64 * NUM_VERSIONS_PER_PAGE)
.limit(limit)
.build(),
)
.await
@@ -242,21 +247,102 @@ impl Resolve<GetBuildVersions, User> for State {
}
}
fn docker_organizations() -> &'static String {
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
DOCKER_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().docker_organizations)
.expect("failed to serialize docker organizations")
})
}
#[async_trait]
impl ResolveToString<ListDockerOrganizations, User> for State {
async fn resolve_to_string(
impl Resolve<ListCommonBuildExtraArgs, User> for State {
async fn resolve(
&self,
ListDockerOrganizations {}: ListDockerOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(docker_organizations().clone())
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
let builds = resource::list_full_for_user::<Build>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for build in builds {
for extra_arg in build.config.extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}
impl Resolve<GetBuildWebhookEnabled, User> for State {
async fn resolve(
&self,
GetBuildWebhookEnabled { build }: GetBuildWebhookEnabled,
user: User,
) -> anyhow::Result<GetBuildWebhookEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
if build.config.git_provider != "github.com"
|| build.config.repo.is_empty()
{
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: true,
});
}
}
Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: false,
})
}
}

View File

@@ -1,35 +1,28 @@
use std::{collections::HashSet, str::FromStr};
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
api::read::{self, *},
use mongo_indexed::Document;
use komodo_client::{
api::read::*,
entities::{
builder::{Builder, BuilderConfig, BuilderListItem},
builder::{Builder, BuilderListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
},
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<GetBuilder, User> for State {
async fn resolve(
&self,
GetBuilder { builder }: GetBuilder,
user: User,
) -> anyhow::Result<Builder> {
Builder::get_resource_check_permissions(
resource::get_check_permissions::<Builder>(
&builder,
&user,
PermissionLevel::Read,
@@ -38,44 +31,45 @@ impl Resolve<GetBuilder, User> for State {
}
}
#[async_trait]
impl Resolve<ListBuilders, User> for State {
async fn resolve(
&self,
ListBuilders { query }: ListBuilders,
user: User,
) -> anyhow::Result<Vec<BuilderListItem>> {
Builder::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Builder>(query, &user).await
}
}
impl Resolve<ListFullBuilders, User> for State {
async fn resolve(
&self,
ListFullBuilders { query }: ListFullBuilders,
user: User,
) -> anyhow::Result<ListFullBuildersResponse> {
resource::list_full_for_user::<Builder>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetBuildersSummary, User> for State {
async fn resolve(
&self,
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
let query =
match resource::get_resource_ids_for_user::<Builder>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
Some(query)
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.count_documents(query)
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {
@@ -84,53 +78,3 @@ impl Resolve<GetBuildersSummary, User> for State {
Ok(res)
}
}
#[async_trait]
impl Resolve<GetBuilderAvailableAccounts, User> for State {
async fn resolve(
&self,
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
user: User,
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
let builder = Builder::get_resource_check_permissions(
&builder,
&user,
PermissionLevel::Read,
)
.await?;
let (github, docker) = match builder.config {
BuilderConfig::Aws(config) => {
(config.github_accounts, config.docker_accounts)
}
BuilderConfig::Server(config) => {
let res = self
.resolve(
read::GetAvailableAccounts {
server: config.server_id,
},
user,
)
.await?;
(res.github, res.docker)
}
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
Ok(GetBuilderAvailableAccountsResponse { github, docker })
}
}

View File

@@ -1,43 +1,36 @@
use std::{cmp, collections::HashSet, str::FromStr};
use std::{cmp, collections::HashSet};
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
deployment::{
Deployment, DeploymentActionState, DeploymentConfig,
DeploymentListItem, DockerContainerState, DockerContainerStats,
DeploymentListItem, DeploymentState,
},
docker::container::ContainerStats,
permission::PermissionLevel,
server::Server,
update::{Log, ResourceTargetVariant},
update::Log,
user::User,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId},
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client,
resource::{get_resource_ids_for_non_admin, StateResource},
},
state::{action_states, db_client, deployment_status_cache, State},
helpers::periphery_client,
resource,
state::{action_states, deployment_status_cache, State},
};
#[async_trait]
impl Resolve<GetDeployment, User> for State {
async fn resolve(
&self,
GetDeployment { deployment }: GetDeployment,
user: User,
) -> anyhow::Result<Deployment> {
Deployment::get_resource_check_permissions(
resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -46,25 +39,33 @@ impl Resolve<GetDeployment, User> for State {
}
}
#[async_trait]
impl Resolve<ListDeployments, User> for State {
async fn resolve(
&self,
ListDeployments { query }: ListDeployments,
user: User,
) -> anyhow::Result<Vec<DeploymentListItem>> {
Deployment::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Deployment>(query, &user).await
}
}
impl Resolve<ListFullDeployments, User> for State {
async fn resolve(
&self,
ListFullDeployments { query }: ListFullDeployments,
user: User,
) -> anyhow::Result<ListFullDeploymentsResponse> {
resource::list_full_for_user::<Deployment>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetDeploymentContainer, User> for State {
async fn resolve(
&self,
GetDeploymentContainer { deployment }: GetDeploymentContainer,
user: User,
) -> anyhow::Result<GetDeploymentContainerResponse> {
let deployment = Deployment::get_resource_check_permissions(
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -84,18 +85,17 @@ impl Resolve<GetDeploymentContainer, User> for State {
const MAX_LOG_LENGTH: u64 = 5000;
#[async_trait]
impl Resolve<GetLog, User> for State {
impl Resolve<GetDeploymentLog, User> for State {
async fn resolve(
&self,
GetLog { deployment, tail }: GetLog,
GetDeploymentLog { deployment, tail }: GetDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = Deployment::get_resource_check_permissions(
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -104,7 +104,7 @@ impl Resolve<GetLog, User> for State {
if server_id.is_empty() {
return Ok(Log::default());
}
let server = Server::get_resource(&server_id).await?;
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerLog {
name,
@@ -115,22 +115,22 @@ impl Resolve<GetLog, User> for State {
}
}
#[async_trait]
impl Resolve<SearchLog, User> for State {
impl Resolve<SearchDeploymentLog, User> for State {
async fn resolve(
&self,
SearchLog {
SearchDeploymentLog {
deployment,
terms,
combinator,
}: SearchLog,
invert,
}: SearchDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = Deployment::get_resource_check_permissions(
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -139,30 +139,30 @@ impl Resolve<SearchLog, User> for State {
if server_id.is_empty() {
return Ok(Log::default());
}
let server = Server::get_resource(&server_id).await?;
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerLogSearch {
name,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")
}
}
#[async_trait]
impl Resolve<GetDeploymentStats, User> for State {
async fn resolve(
&self,
GetDeploymentStats { deployment }: GetDeploymentStats,
user: User,
) -> anyhow::Result<DockerContainerStats> {
) -> anyhow::Result<ContainerStats> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = Deployment::get_resource_check_permissions(
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -171,7 +171,7 @@ impl Resolve<GetDeploymentStats, User> for State {
if server_id.is_empty() {
return Err(anyhow!("deployment has no server attached"));
}
let server = Server::get_resource(&server_id).await?;
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerStats { name })
.await
@@ -179,14 +179,13 @@ impl Resolve<GetDeploymentStats, User> for State {
}
}
#[async_trait]
impl Resolve<GetDeploymentActionState, User> for State {
async fn resolve(
&self,
GetDeploymentActionState { deployment }: GetDeploymentActionState,
user: User,
) -> anyhow::Result<DeploymentActionState> {
let deployment = Deployment::get_resource_check_permissions(
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
@@ -202,34 +201,18 @@ impl Resolve<GetDeploymentActionState, User> for State {
}
}
#[async_trait]
impl Resolve<GetDeploymentsSummary, User> for State {
async fn resolve(
&self,
GetDeploymentsSummary {}: GetDeploymentsSummary,
user: User,
) -> anyhow::Result<GetDeploymentsSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let deployments =
find_collect(&db_client().await.deployments, query, None)
.await
.context("failed to count all deployment documents")?;
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
&user,
)
.await
.context("failed to get deployments from db")?;
let mut res = GetDeploymentsSummaryResponse::default();
let status_cache = deployment_status_cache();
for deployment in deployments {
@@ -237,13 +220,13 @@ impl Resolve<GetDeploymentsSummary, User> for State {
let status =
status_cache.get(&deployment.id).await.unwrap_or_default();
match status.curr.state {
DockerContainerState::Running => {
DeploymentState::Running => {
res.running += 1;
}
DockerContainerState::Unknown => {
DeploymentState::Unknown => {
res.unknown += 1;
}
DockerContainerState::NotDeployed => {
DeploymentState::NotDeployed => {
res.not_deployed += 1;
}
_ => {
@@ -255,15 +238,14 @@ impl Resolve<GetDeploymentsSummary, User> for State {
}
}
#[async_trait]
impl Resolve<ListCommonExtraArgs, User> for State {
impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonExtraArgs { query }: ListCommonExtraArgs,
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
user: User,
) -> anyhow::Result<ListCommonExtraArgsResponse> {
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
let deployments =
Deployment::list_resources_for_user(query, &user)
resource::list_full_for_user::<Deployment>(query, &user)
.await
.context("failed to get resources matching query")?;
@@ -276,6 +258,8 @@ impl Resolve<ListCommonExtraArgs, User> for State {
}
}
Ok(res.into_iter().collect())
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}

View File

@@ -1,10 +1,21 @@
use std::time::Instant;
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::anyhow;
use async_trait::async_trait;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::read::*, entities::user::User};
use komodo_client::{
api::read::*,
entities::{
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
repo::Repo,
server::Server,
sync::ResourceSync,
user::User,
ResourceTarget,
},
};
use resolver_api::{
derive::Resolver, Resolve, ResolveToString, Resolver,
};
@@ -13,7 +24,10 @@ use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, config::core_config, state::State};
use crate::{
auth::auth_request, config::core_config, helpers::periphery_client,
resource, state::State,
};
mod alert;
mod alerter;
@@ -22,15 +36,19 @@ mod builder;
mod deployment;
mod permission;
mod procedure;
mod provider;
mod repo;
mod search;
mod server;
mod server_template;
mod stack;
mod sync;
mod tag;
mod toml;
mod update;
mod user;
mod user_group;
mod variable;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
@@ -38,16 +56,24 @@ mod user_group;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
#[to_string_resolver]
GetVersion(GetVersion),
#[to_string_resolver]
GetCoreInfo(GetCoreInfo),
#[to_string_resolver]
ListAwsEcrLabels(ListAwsEcrLabels),
ListSecrets(ListSecrets),
ListGitProvidersFromConfig(ListGitProvidersFromConfig),
ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig),
// ==== USER ====
ListUsers(ListUsers),
GetUsername(GetUsername),
GetPermissionLevel(GetPermissionLevel),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
GetPermissionLevel(GetPermissionLevel),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
@@ -62,63 +88,105 @@ enum ReadRequest {
GetProcedure(GetProcedure),
GetProcedureActionState(GetProcedureActionState),
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
ListServerTemplates(ListServerTemplates),
GetServerTemplateSummary(GetServerTemplatesSummary),
ListFullServerTemplates(ListFullServerTemplates),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
GetServerStatus(GetServerStatus),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
GetDockerImages(GetDockerImages),
GetDockerNetworks(GetDockerNetworks),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
GetAvailableAccounts(GetAvailableAccounts),
GetAvailableSecrets(GetAvailableSecrets),
ListServers(ListServers),
ListFullServers(ListFullServers),
InspectDockerContainer(InspectDockerContainer),
GetResourceMatchingContainer(GetResourceMatchingContainer),
GetContainerLog(GetContainerLog),
SearchContainerLog(SearchContainerLog),
InspectDockerNetwork(InspectDockerNetwork),
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
#[to_string_resolver]
ListDockerContainers(ListDockerContainers),
#[to_string_resolver]
ListDockerNetworks(ListDockerNetworks),
#[to_string_resolver]
ListDockerImages(ListDockerImages),
#[to_string_resolver]
ListDockerVolumes(ListDockerVolumes),
#[to_string_resolver]
ListComposeProjects(ListComposeProjects),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
ListCommonExtraArgs(ListCommonExtraArgs),
GetDeploymentLog(GetDeploymentLog),
SearchDeploymentLog(SearchDeploymentLog),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
// ==== BUILD ====
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
#[to_string_resolver]
ListDockerOrganizations(ListDockerOrganizations),
ListBuildVersions(ListBuildVersions),
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
ListRepos(ListRepos),
GetRepoActionState(GetRepoActionState),
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
GetResourceSyncActionState(GetResourceSyncActionState),
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
// ==== STACK ====
GetStacksSummary(GetStacksSummary),
GetStack(GetStack),
GetStackActionState(GetStackActionState),
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
GetStackServiceLog(GetStackServiceLog),
SearchStackServiceLog(SearchStackServiceLog),
ListStacks(ListStacks),
ListFullStacks(ListFullStacks),
ListStackServices(ListStackServices),
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
ListBuilders(ListBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
ListFullBuilders(ListFullBuilders),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
GetAlerter(GetAlerter),
ListAlerters(ListAlerters),
ListFullAlerters(ListFullAlerters),
// ==== TOML ====
ExportAllResourcesToToml(ExportAllResourcesToToml),
@@ -142,7 +210,17 @@ enum ReadRequest {
#[to_string_resolver]
GetSystemStats(GetSystemStats),
#[to_string_resolver]
GetSystemProcesses(GetSystemProcesses),
ListSystemProcesses(ListSystemProcesses),
// ==== VARIABLE ====
GetVariable(GetVariable),
ListVariables(ListVariables),
// ==== PROVIDER ====
GetGitProviderAccount(GetGitProviderAccount),
ListGitProviderAccounts(ListGitProviderAccounts),
GetDockerRegistryAccount(GetDockerRegistryAccount),
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
}
pub fn router() -> Router {
@@ -151,17 +229,14 @@ pub fn router() -> Router {
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "ReadHandler", level = "debug", skip(user))]
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/read request {req_id} | user: {} ({})",
user.username, user.id
);
debug!("/read request | user: {}", user.username);
let res =
State
.resolve_request(request, user)
@@ -173,43 +248,351 @@ async fn handler(
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/read request {req_id} error: {e:#}");
debug!("/read request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/read request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
#[async_trait]
impl Resolve<GetVersion, User> for State {
#[instrument(name = "GetVersion", level = "debug", skip(self))]
async fn resolve(
fn version() -> &'static String {
static VERSION: OnceLock<String> = OnceLock::new();
VERSION.get_or_init(|| {
serde_json::to_string(&GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
.context("failed to serialize GetVersionResponse")
.unwrap()
})
}
impl ResolveToString<GetVersion, User> for State {
async fn resolve_to_string(
&self,
GetVersion {}: GetVersion,
_: User,
) -> anyhow::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
) -> anyhow::Result<String> {
Ok(version().to_string())
}
}
#[async_trait]
impl Resolve<GetCoreInfo, User> for State {
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
async fn resolve(
fn core_info() -> &'static String {
static CORE_INFO: OnceLock<String> = OnceLock::new();
CORE_INFO.get_or_init(|| {
let config = core_config();
let info = GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
webhook_base_url: config
.webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
github_webhook_owners: config
.github_webhook_app
.installations
.iter()
.map(|i| i.namespace.to_string())
.collect(),
};
serde_json::to_string(&info)
.context("failed to serialize GetCoreInfoResponse")
.unwrap()
})
}
impl ResolveToString<GetCoreInfo, User> for State {
async fn resolve_to_string(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<GetCoreInfoResponse> {
let config = core_config();
Ok(GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
github_webhook_base_url: config
.github_webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
})
) -> anyhow::Result<String> {
Ok(core_info().to_string())
}
}
fn ecr_labels() -> &'static String {
static ECR_LABELS: OnceLock<String> = OnceLock::new();
ECR_LABELS.get_or_init(|| {
serde_json::to_string(
&core_config()
.aws_ecr_registries
.iter()
.map(|reg| reg.label.clone())
.collect::<Vec<_>>(),
)
.context("failed to serialize ecr registries")
.unwrap()
})
}
impl ResolveToString<ListAwsEcrLabels, User> for State {
async fn resolve_to_string(
&self,
ListAwsEcrLabels {}: ListAwsEcrLabels,
_: User,
) -> anyhow::Result<String> {
Ok(ecr_labels().to_string())
}
}
impl Resolve<ListSecrets, User> for State {
async fn resolve(
&self,
ListSecrets { target }: ListSecrets,
_: User,
) -> anyhow::Result<ListSecretsResponse> {
let mut secrets = core_config()
.secrets
.keys()
.cloned()
.collect::<HashSet<_>>();
if let Some(target) = target {
let server_id = match target {
ResourceTarget::Server(id) => Some(id),
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => Some(config.server_id),
BuilderConfig::Aws(config) => {
secrets.extend(config.secrets);
None
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
};
if let Some(id) = server_id {
let server = resource::get::<Server>(&id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListSecrets {})
.await
.with_context(|| {
format!(
"failed to get secrets from server {}",
server.name
)
})?;
secrets.extend(more);
}
}
let mut secrets = secrets.into_iter().collect::<Vec<_>>();
secrets.sort();
Ok(secrets)
}
}
impl Resolve<ListGitProvidersFromConfig, User> for State {
async fn resolve(
&self,
ListGitProvidersFromConfig { target }: ListGitProvidersFromConfig,
user: User,
) -> anyhow::Result<ListGitProvidersFromConfigResponse> {
let mut providers = core_config().git_providers.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_git_providers_for_server(&mut providers, &id).await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_git_providers_for_server(
&mut providers,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_git_providers(
&mut providers,
config.git_providers,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user
),
resource::list_full_for_user::<Repo>(Default::default(), &user),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user
),
)?;
for build in builds {
if !providers
.iter()
.any(|provider| provider.domain == build.config.git_provider)
{
providers.push(GitProvider {
domain: build.config.git_provider,
https: build.config.git_https,
accounts: Default::default(),
});
}
}
for repo in repos {
if !providers
.iter()
.any(|provider| provider.domain == repo.config.git_provider)
{
providers.push(GitProvider {
domain: repo.config.git_provider,
https: repo.config.git_https,
accounts: Default::default(),
});
}
}
for sync in syncs {
if !providers
.iter()
.any(|provider| provider.domain == sync.config.git_provider)
{
providers.push(GitProvider {
domain: sync.config.git_provider,
https: sync.config.git_https,
accounts: Default::default(),
});
}
}
providers.sort();
Ok(providers)
}
}
impl Resolve<ListDockerRegistriesFromConfig, User> for State {
async fn resolve(
&self,
ListDockerRegistriesFromConfig { target }: ListDockerRegistriesFromConfig,
_: User,
) -> anyhow::Result<ListDockerRegistriesFromConfigResponse> {
let mut registries = core_config().docker_registries.clone();
if let Some(target) = target {
match target {
ResourceTarget::Server(id) => {
merge_docker_registries_for_server(&mut registries, &id)
.await?;
}
ResourceTarget::Builder(id) => {
match resource::get::<Builder>(&id).await?.config {
BuilderConfig::Server(config) => {
merge_docker_registries_for_server(
&mut registries,
&config.server_id,
)
.await?;
}
BuilderConfig::Aws(config) => {
merge_docker_registries(
&mut registries,
config.docker_registries,
);
}
}
}
_ => {
return Err(anyhow!("target must be `Server` or `Builder`"))
}
}
}
registries.sort();
Ok(registries)
}
}
async fn merge_git_providers_for_server(
providers: &mut Vec<GitProvider>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListGitProviders {})
.await
.with_context(|| {
format!(
"failed to get git providers from server {}",
server.name
)
})?;
merge_git_providers(providers, more);
Ok(())
}
fn merge_git_providers(
providers: &mut Vec<GitProvider>,
more: Vec<GitProvider>,
) {
for incoming_provider in more {
if let Some(provider) = providers
.iter_mut()
.find(|provider| provider.domain == incoming_provider.domain)
{
for account in incoming_provider.accounts {
if !provider.accounts.contains(&account) {
provider.accounts.push(account);
}
}
} else {
providers.push(incoming_provider);
}
}
}
async fn merge_docker_registries_for_server(
registries: &mut Vec<DockerRegistry>,
server_id: &str,
) -> anyhow::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
.request(periphery_client::api::ListDockerRegistries {})
.await
.with_context(|| {
format!(
"failed to get docker registries from server {}",
server.name
)
})?;
merge_docker_registries(registries, more);
Ok(())
}
fn merge_docker_registries(
registries: &mut Vec<DockerRegistry>,
more: Vec<DockerRegistry>,
) {
for incoming_registry in more {
if let Some(registry) = registries
.iter_mut()
.find(|registry| registry.domain == incoming_registry.domain)
{
for account in incoming_registry.accounts {
if !registry.accounts.contains(&account) {
registry.accounts.push(account);
}
}
} else {
registries.push(incoming_registry);
}
}
}

View File

@@ -1,6 +1,5 @@
use anyhow::{anyhow, Context};
use axum::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
ListPermissionsResponse, ListUserTargetPermissions,
@@ -12,11 +11,10 @@ use mungos::{find::find_collect, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{
helpers::resource::get_user_permission_on_resource,
helpers::query::get_user_permission_on_target,
state::{db_client, State},
};
#[async_trait]
impl Resolve<ListPermissions, User> for State {
async fn resolve(
&self,
@@ -36,7 +34,6 @@ impl Resolve<ListPermissions, User> for State {
}
}
#[async_trait]
impl Resolve<GetPermissionLevel, User> for State {
async fn resolve(
&self,
@@ -46,12 +43,10 @@ impl Resolve<GetPermissionLevel, User> for State {
if user.admin {
return Ok(PermissionLevel::Write);
}
let (variant, id) = target.extract_variant_id();
get_user_permission_on_resource(&user.id, variant, id).await
get_user_permission_on_target(&user, &target).await
}
}
#[async_trait]
impl Resolve<ListUserTargetPermissions, User> for State {
async fn resolve(
&self,

View File

@@ -1,37 +1,26 @@
use std::str::FromStr;
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
api::read::{
GetProcedure, GetProcedureActionState,
GetProcedureActionStateResponse, GetProcedureResponse,
GetProceduresSummary, GetProceduresSummaryResponse,
ListProcedures, ListProceduresResponse,
},
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel, procedure::Procedure,
update::ResourceTargetVariant, user::User,
permission::PermissionLevel,
procedure::{Procedure, ProcedureState},
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
},
state::{action_states, db_client, State},
resource,
state::{action_states, procedure_state_cache, State},
};
#[async_trait]
impl Resolve<GetProcedure, User> for State {
async fn resolve(
&self,
GetProcedure { procedure }: GetProcedure,
user: User,
) -> anyhow::Result<GetProcedureResponse> {
Procedure::get_resource_check_permissions(
resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Read,
@@ -40,61 +29,78 @@ impl Resolve<GetProcedure, User> for State {
}
}
#[async_trait]
impl Resolve<ListProcedures, User> for State {
async fn resolve(
&self,
ListProcedures { query }: ListProcedures,
user: User,
) -> anyhow::Result<ListProceduresResponse> {
Procedure::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Procedure>(query, &user).await
}
}
impl Resolve<ListFullProcedures, User> for State {
async fn resolve(
&self,
ListFullProcedures { query }: ListFullProcedures,
user: User,
) -> anyhow::Result<ListFullProceduresResponse> {
resource::list_full_for_user::<Procedure>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetProceduresSummary, User> for State {
async fn resolve(
&self,
GetProceduresSummary {}: GetProceduresSummary,
user: User,
) -> anyhow::Result<GetProceduresSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Procedure,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.procedures
.count_documents(query, None)
.await
.context("failed to count all procedure documents")?;
let res = GetProceduresSummaryResponse {
total: total as u32,
};
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
&user,
)
.await
.context("failed to get procedures from db")?;
let mut res = GetProceduresSummaryResponse::default();
let cache = procedure_state_cache();
let action_states = action_states();
for procedure in procedures {
res.total += 1;
match (
cache.get(&procedure.id).await.unwrap_or_default(),
action_states
.procedure
.get(&procedure.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.running => {
res.running += 1;
}
(ProcedureState::Ok, _) => res.ok += 1,
(ProcedureState::Failed, _) => res.failed += 1,
(ProcedureState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the running state, since that comes from action states
(ProcedureState::Running, _) => unreachable!(),
}
}
Ok(res)
}
}
#[async_trait]
impl Resolve<GetProcedureActionState, User> for State {
async fn resolve(
&self,
GetProcedureActionState { procedure }: GetProcedureActionState,
user: User,
) -> anyhow::Result<GetProcedureActionStateResponse> {
let procedure = Procedure::get_resource_check_permissions(
let procedure = resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Read,

View File

@@ -0,0 +1,116 @@
use anyhow::{anyhow, Context};
use mongo_indexed::{doc, Document};
use komodo_client::{
api::read::{
GetDockerRegistryAccount, GetDockerRegistryAccountResponse,
GetGitProviderAccount, GetGitProviderAccountResponse,
ListDockerRegistryAccounts, ListDockerRegistryAccountsResponse,
ListGitProviderAccounts, ListGitProviderAccountsResponse,
},
entities::user::User,
};
use mungos::{
by_id::find_one_by_id, find::find_collect,
mongodb::options::FindOptions,
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<GetGitProviderAccount, User> for State {
async fn resolve(
&self,
GetGitProviderAccount { id }: GetGitProviderAccount,
user: User,
) -> anyhow::Result<GetGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read git provider accounts"
));
}
find_one_by_id(&db_client().await.git_accounts, &id)
.await
.context("failed to query db for git provider accounts")?
.context("did not find git provider account with the given id")
}
}
impl Resolve<ListGitProviderAccounts, User> for State {
async fn resolve(
&self,
ListGitProviderAccounts { domain, username }: ListGitProviderAccounts,
user: User,
) -> anyhow::Result<ListGitProviderAccountsResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read git provider accounts"
));
}
let mut filter = Document::new();
if let Some(domain) = domain {
filter.insert("domain", domain);
}
if let Some(username) = username {
filter.insert("username", username);
}
find_collect(
&db_client().await.git_accounts,
filter,
FindOptions::builder()
.sort(doc! { "domain": 1, "username": 1 })
.build(),
)
.await
.context("failed to query db for git provider accounts")
}
}
impl Resolve<GetDockerRegistryAccount, User> for State {
async fn resolve(
&self,
GetDockerRegistryAccount { id }: GetDockerRegistryAccount,
user: User,
) -> anyhow::Result<GetDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read docker registry accounts"
));
}
find_one_by_id(&db_client().await.registry_accounts, &id)
.await
.context("failed to query db for docker registry accounts")?
.context(
"did not find docker registry account with the given id",
)
}
}
impl Resolve<ListDockerRegistryAccounts, User> for State {
async fn resolve(
&self,
ListDockerRegistryAccounts { domain, username }: ListDockerRegistryAccounts,
user: User,
) -> anyhow::Result<ListDockerRegistryAccountsResponse> {
if !user.admin {
return Err(anyhow!(
"Only admins can read docker registry accounts"
));
}
let mut filter = Document::new();
if let Some(domain) = domain {
filter.insert("domain", domain);
}
if let Some(username) = username {
filter.insert("username", username);
}
find_collect(
&db_client().await.registry_accounts,
filter,
FindOptions::builder()
.sort(doc! { "domain": 1, "username": 1 })
.build(),
)
.await
.context("failed to query db for docker registry accounts")
}
}

View File

@@ -1,34 +1,28 @@
use std::str::FromStr;
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem},
update::ResourceTargetVariant,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
},
state::{action_states, db_client, State},
config::core_config,
resource,
state::{action_states, github_client, repo_state_cache, State},
};
#[async_trait]
impl Resolve<GetRepo, User> for State {
async fn resolve(
&self,
GetRepo { repo }: GetRepo,
user: User,
) -> anyhow::Result<Repo> {
Repo::get_resource_check_permissions(
resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
@@ -37,25 +31,33 @@ impl Resolve<GetRepo, User> for State {
}
}
#[async_trait]
impl Resolve<ListRepos, User> for State {
async fn resolve(
&self,
ListRepos { query }: ListRepos,
user: User,
) -> anyhow::Result<Vec<RepoListItem>> {
Repo::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Repo>(query, &user).await
}
}
impl Resolve<ListFullRepos, User> for State {
async fn resolve(
&self,
ListFullRepos { query }: ListFullRepos,
user: User,
) -> anyhow::Result<ListFullReposResponse> {
resource::list_full_for_user::<Repo>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetRepoActionState, User> for State {
async fn resolve(
&self,
GetRepoActionState { repo }: GetRepoActionState,
user: User,
) -> anyhow::Result<RepoActionState> {
let repo = Repo::get_resource_check_permissions(
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
@@ -71,38 +73,153 @@ impl Resolve<GetRepoActionState, User> for State {
}
}
#[async_trait]
impl Resolve<GetReposSummary, User> for State {
async fn resolve(
&self,
GetReposSummary {}: GetReposSummary,
user: User,
) -> anyhow::Result<GetReposSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.repos
.count_documents(query, None)
.await
.context("failed to count all build documents")?;
let res = GetReposSummaryResponse {
total: total as u32,
};
let repos =
resource::list_full_for_user::<Repo>(Default::default(), &user)
.await
.context("failed to get repos from db")?;
let mut res = GetReposSummaryResponse::default();
let cache = repo_state_cache();
let action_states = action_states();
for repo in repos {
res.total += 1;
match (
cache.get(&repo.id).await.unwrap_or_default(),
action_states
.repo
.get(&repo.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.cloning => {
res.cloning += 1;
}
(_, action_states) if action_states.pulling => {
res.pulling += 1;
}
(_, action_states) if action_states.building => {
res.building += 1;
}
(RepoState::Ok, _) => res.ok += 1,
(RepoState::Failed, _) => res.failed += 1,
(RepoState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(RepoState::Cloning, _)
| (RepoState::Pulling, _)
| (RepoState::Building, _) => {
unreachable!()
}
}
}
Ok(res)
}
}
impl Resolve<GetRepoWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetRepoWebhooksEnabled { repo }: GetRepoWebhooksEnabled,
user: User,
) -> anyhow::Result<GetRepoWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await?;
if repo.config.git_provider != "github.com"
|| repo.config.repo.is_empty()
{
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let clone_url =
format!("{host}/listener/github/repo/{}/clone", repo.id);
let pull_url =
format!("{host}/listener/github/repo/{}/pull", repo.id);
let build_url =
format!("{host}/listener/github/repo/{}/build", repo.id);
let mut clone_enabled = false;
let mut pull_enabled = false;
let mut build_enabled = false;
for webhook in webhooks {
if !webhook.active {
continue;
}
if webhook.config.url == clone_url {
clone_enabled = true
}
if webhook.config.url == pull_url {
pull_enabled = true
}
if webhook.config.url == build_url {
build_enabled = true
}
}
Ok(GetRepoWebhooksEnabledResponse {
managed: true,
clone_enabled,
pull_enabled,
build_enabled,
})
}
}

View File

@@ -1,20 +1,22 @@
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{FindResources, FindResourcesResponse},
entities::{
build, deployment, procedure, repo, server,
update::ResourceTargetVariant::{self, *},
user::User,
build::Build, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, user::User, ResourceTargetVariant,
},
};
use resolver_api::Resolve;
use crate::{helpers::resource::StateResource, state::State};
use crate::{resource, state::State};
const FIND_RESOURCE_TYPES: [ResourceTargetVariant; 5] =
[Server, Build, Deployment, Repo, Procedure];
const FIND_RESOURCE_TYPES: [ResourceTargetVariant; 5] = [
ResourceTargetVariant::Server,
ResourceTargetVariant::Build,
ResourceTargetVariant::Deployment,
ResourceTargetVariant::Repo,
ResourceTargetVariant::Procedure,
];
#[async_trait]
impl Resolve<FindResources, User> for State {
async fn resolve(
&self,
@@ -27,49 +29,50 @@ impl Resolve<FindResources, User> for State {
} else {
resources
.into_iter()
.filter(|r| !matches!(r, System | Builder | Alerter))
.filter(|r| {
!matches!(
r,
ResourceTargetVariant::System
| ResourceTargetVariant::Builder
| ResourceTargetVariant::Alerter
)
})
.collect()
};
for resource_type in resource_types {
match resource_type {
Server => {
res.servers =
server::Server::query_resource_list_items_for_user(
query.clone(),
&user,
)
.await?;
ResourceTargetVariant::Server => {
res.servers = resource::list_for_user_using_document::<
Server,
>(query.clone(), &user)
.await?;
}
Deployment => {
res.deployments =
deployment::Deployment::query_resource_list_items_for_user(
query.clone(),
&user,
)
.await?;
ResourceTargetVariant::Deployment => {
res.deployments = resource::list_for_user_using_document::<
Deployment,
>(query.clone(), &user)
.await?;
}
Build => {
ResourceTargetVariant::Build => {
res.builds =
build::Build::query_resource_list_items_for_user(
resource::list_for_user_using_document::<Build>(
query.clone(),
&user,
)
.await?;
}
Repo => {
res.repos = repo::Repo::query_resource_list_items_for_user(
ResourceTargetVariant::Repo => {
res.repos = resource::list_for_user_using_document::<Repo>(
query.clone(),
&user,
)
.await?;
}
Procedure => {
res.procedures =
procedure::Procedure::query_resource_list_items_for_user(
query.clone(),
&user,
)
.await?;
ResourceTargetVariant::Procedure => {
res.procedures = resource::list_for_user_using_document::<
Procedure,
>(query.clone(), &user)
.await?;
}
_ => {}
}

View File

@@ -1,5 +1,6 @@
use std::{
collections::{HashMap, HashSet},
cmp,
collections::HashMap,
sync::{Arc, OnceLock},
};
@@ -7,54 +8,66 @@ use anyhow::{anyhow, Context};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
deployment::ContainerSummary,
deployment::Deployment,
docker::{
container::Container,
image::{Image, ImageHistoryResponseItem},
network::Network,
volume::Volume,
},
permission::PermissionLevel,
server::{
docker_image::ImageSummary, docker_network::DockerNetwork,
Server, ServerActionState, ServerListItem, ServerStatus,
Server, ServerActionState, ServerListItem, ServerState,
},
stack::{Stack, StackServiceNames},
update::Log,
user::User,
ResourceTarget,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api::{self, GetAccountsResponse};
use periphery_client::api::{
self as periphery,
container::InspectContainer,
image::{ImageHistory, InspectImage},
network::InspectNetwork,
volume::InspectVolume,
};
use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
config::core_config, helpers::{periphery_client, resource::StateResource}, state::{action_states, db_client, server_status_cache, State}
helpers::{periphery_client, stack::compose_container_match_regex},
resource,
state::{action_states, db_client, server_status_cache, State},
};
#[async_trait]
impl Resolve<GetServersSummary, User> for State {
async fn resolve(
&self,
GetServersSummary {}: GetServersSummary,
user: User,
) -> anyhow::Result<GetServersSummaryResponse> {
let servers = Server::list_resource_list_items_for_user(
Default::default(),
&user,
)
.await?;
let servers =
resource::list_for_user::<Server>(Default::default(), &user)
.await?;
let mut res = GetServersSummaryResponse::default();
for server in servers {
res.total += 1;
match server.info.status {
ServerStatus::Ok => {
match server.info.state {
ServerState::Ok => {
res.healthy += 1;
}
ServerStatus::NotOk => {
ServerState::NotOk => {
res.unhealthy += 1;
}
ServerStatus::Disabled => {
ServerState::Disabled => {
res.disabled += 1;
}
}
@@ -63,14 +76,13 @@ impl Resolve<GetServersSummary, User> for State {
}
}
#[async_trait]
impl Resolve<GetPeripheryVersion, User> for State {
async fn resolve(
&self,
req: GetPeripheryVersion,
user: User,
) -> anyhow::Result<GetPeripheryVersionResponse> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&req.server,
&user,
PermissionLevel::Read,
@@ -85,14 +97,13 @@ impl Resolve<GetPeripheryVersion, User> for State {
}
}
#[async_trait]
impl Resolve<GetServer, User> for State {
async fn resolve(
&self,
req: GetServer,
user: User,
) -> anyhow::Result<Server> {
Server::get_resource_check_permissions(
resource::get_check_permissions::<Server>(
&req.server,
&user,
PermissionLevel::Read,
@@ -101,25 +112,33 @@ impl Resolve<GetServer, User> for State {
}
}
#[async_trait]
impl Resolve<ListServers, User> for State {
async fn resolve(
&self,
ListServers { query }: ListServers,
user: User,
) -> anyhow::Result<Vec<ServerListItem>> {
Server::list_resource_list_items_for_user(query, &user).await
resource::list_for_user::<Server>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetServerStatus, User> for State {
impl Resolve<ListFullServers, User> for State {
async fn resolve(
&self,
GetServerStatus { server }: GetServerStatus,
ListFullServers { query }: ListFullServers,
user: User,
) -> anyhow::Result<GetServerStatusResponse> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<ListFullServersResponse> {
resource::list_full_for_user::<Server>(query, &user).await
}
}
impl Resolve<GetServerState, User> for State {
async fn resolve(
&self,
GetServerState { server }: GetServerState,
user: User,
) -> anyhow::Result<GetServerStateResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -129,21 +148,20 @@ impl Resolve<GetServerStatus, User> for State {
.get(&server.id)
.await
.ok_or(anyhow!("did not find cached status for server"))?;
let response = GetServerStatusResponse {
status: status.status,
let response = GetServerStateResponse {
status: status.state,
};
Ok(response)
}
}
#[async_trait]
impl Resolve<GetServerActionState, User> for State {
async fn resolve(
&self,
GetServerActionState { server }: GetServerActionState,
user: User,
) -> anyhow::Result<ServerActionState> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -168,14 +186,13 @@ fn system_info_cache() -> &'static SystemInfoCache {
SYSTEM_INFO_CACHE.get_or_init(Default::default)
}
#[async_trait]
impl ResolveToString<GetSystemInformation, User> for State {
async fn resolve_to_string(
&self,
GetSystemInformation { server }: GetSystemInformation,
user: User,
) -> anyhow::Result<String> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -189,7 +206,7 @@ impl ResolveToString<GetSystemInformation, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemInformation {})
.request(periphery::stats::GetSystemInformation {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -204,14 +221,13 @@ impl ResolveToString<GetSystemInformation, User> for State {
}
}
#[async_trait]
impl ResolveToString<GetSystemStats, User> for State {
async fn resolve_to_string(
&self,
GetSystemStats { server }: GetSystemStats,
user: User,
) -> anyhow::Result<String> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -238,14 +254,13 @@ fn processes_cache() -> &'static ProcessesCache {
PROCESSES_CACHE.get_or_init(Default::default)
}
#[async_trait]
impl ResolveToString<GetSystemProcesses, User> for State {
impl ResolveToString<ListSystemProcesses, User> for State {
async fn resolve_to_string(
&self,
GetSystemProcesses { server }: GetSystemProcesses,
ListSystemProcesses { server }: ListSystemProcesses,
user: User,
) -> anyhow::Result<String> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -258,7 +273,7 @@ impl ResolveToString<GetSystemProcesses, User> for State {
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemProcesses {})
.request(periphery::stats::GetSystemProcesses {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
@@ -275,7 +290,6 @@ impl ResolveToString<GetSystemProcesses, User> for State {
const STATS_PER_PAGE: i64 = 500;
#[async_trait]
impl Resolve<GetHistoricalServerStats, User> for State {
async fn resolve(
&self,
@@ -286,7 +300,7 @@ impl Resolve<GetHistoricalServerStats, User> for State {
}: GetHistoricalServerStats,
user: User,
) -> anyhow::Result<GetHistoricalServerStatsResponse> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
@@ -329,121 +343,376 @@ impl Resolve<GetHistoricalServerStats, User> for State {
}
}
#[async_trait]
impl Resolve<GetDockerImages, User> for State {
impl ResolveToString<ListDockerContainers, User> for State {
async fn resolve_to_string(
&self,
ListDockerContainers { server }: ListDockerContainers,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(containers) = &cache.containers {
serde_json::to_string(containers)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<InspectDockerContainer, User> for State {
async fn resolve(
&self,
GetDockerImages { server }: GetDockerImages,
InspectDockerContainer { server, container }: InspectDockerContainer,
user: User,
) -> anyhow::Result<Vec<ImageSummary>> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<Container> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect container: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectContainer { name: container })
.await
}
}
const MAX_LOG_LENGTH: u64 = 5000;
impl Resolve<GetContainerLog, User> for State {
async fn resolve(
&self,
GetContainerLog {
server,
container,
tail,
}: GetContainerLog,
user: User,
) -> anyhow::Result<Log> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::build::GetImageList {})
.request(periphery::container::GetContainerLog {
name: container,
tail: cmp::min(tail, MAX_LOG_LENGTH),
})
.await
.context("failed at call to periphery")
}
}
#[async_trait]
impl Resolve<GetDockerNetworks, User> for State {
impl Resolve<SearchContainerLog, User> for State {
async fn resolve(
&self,
GetDockerNetworks { server }: GetDockerNetworks,
SearchContainerLog {
server,
container,
terms,
combinator,
invert,
}: SearchContainerLog,
user: User,
) -> anyhow::Result<Vec<DockerNetwork>> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<Log> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::network::GetNetworkList {})
.request(periphery::container::GetContainerLogSearch {
name: container,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")
}
}
#[async_trait]
impl Resolve<GetDockerContainers, User> for State {
impl Resolve<GetResourceMatchingContainer, User> for State {
async fn resolve(
&self,
GetDockerContainers { server }: GetDockerContainers,
GetResourceMatchingContainer { server, container }: GetResourceMatchingContainer,
user: User,
) -> anyhow::Result<Vec<ContainerSummary>> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<GetResourceMatchingContainerResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
// first check deployments
if let Ok(deployment) =
resource::get::<Deployment>(&container).await
{
return Ok(GetResourceMatchingContainerResponse {
resource: ResourceTarget::Deployment(deployment.id).into(),
});
}
// then check stacks
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
&user,
)
.await?;
// check matching stack
for stack in stacks {
for StackServiceNames {
service_name,
container_name,
} in stack
.info
.deployed_services
.unwrap_or(stack.info.latest_services)
{
let is_match = match compose_container_match_regex(&container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
continue;
}
}.is_match(&container);
if is_match {
return Ok(GetResourceMatchingContainerResponse {
resource: ResourceTarget::Stack(stack.id).into(),
});
}
}
}
Ok(GetResourceMatchingContainerResponse { resource: None })
}
}
impl ResolveToString<ListDockerNetworks, User> for State {
async fn resolve_to_string(
&self,
ListDockerNetworks { server }: ListDockerNetworks,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(networks) = &cache.networks {
serde_json::to_string(networks)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<InspectDockerNetwork, User> for State {
async fn resolve(
&self,
InspectDockerNetwork { server, network }: InspectDockerNetwork,
user: User,
) -> anyhow::Result<Network> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect network: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(api::container::GetContainerList {})
.request(InspectNetwork { name: network })
.await
}
}
#[async_trait]
impl Resolve<GetAvailableAccounts, User> for State {
async fn resolve(
impl ResolveToString<ListDockerImages, User> for State {
async fn resolve_to_string(
&self,
GetAvailableAccounts { server }: GetAvailableAccounts,
ListDockerImages { server }: ListDockerImages,
user: User,
) -> anyhow::Result<GetAvailableAccountsResponse> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
let res = GetAvailableAccountsResponse { github, docker };
Ok(res)
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(images) = &cache.images {
serde_json::to_string(images)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
#[async_trait]
impl Resolve<GetAvailableSecrets, User> for State {
impl Resolve<InspectDockerImage, User> for State {
async fn resolve(
&self,
GetAvailableSecrets { server }: GetAvailableSecrets,
InspectDockerImage { server, image }: InspectDockerImage,
user: User,
) -> anyhow::Result<GetAvailableSecretsResponse> {
let server = Server::get_resource_check_permissions(
) -> anyhow::Result<Image> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let secrets = periphery_client(&server)?
.request(api::GetSecrets {})
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect image: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectImage { name: image })
.await
.context("failed to get accounts from periphery")?;
Ok(secrets)
}
}
impl Resolve<ListDockerImageHistory, User> for State {
async fn resolve(
&self,
ListDockerImageHistory { server, image }: ListDockerImageHistory,
user: User,
) -> anyhow::Result<Vec<ImageHistoryResponseItem>> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot get image history: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(ImageHistory { name: image })
.await
}
}
impl ResolveToString<ListDockerVolumes, User> for State {
async fn resolve_to_string(
&self,
ListDockerVolumes { server }: ListDockerVolumes,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(volumes) = &cache.volumes {
serde_json::to_string(volumes)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<InspectDockerVolume, User> for State {
async fn resolve(
&self,
InspectDockerVolume { server, volume }: InspectDockerVolume,
user: User,
) -> anyhow::Result<Volume> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect volume: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectVolume { name: volume })
.await
}
}
impl ResolveToString<ListComposeProjects, User> for State {
async fn resolve_to_string(
&self,
ListComposeProjects { server }: ListComposeProjects,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(projects) = &cache.projects {
serde_json::to_string(projects)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}

View File

@@ -1,36 +1,27 @@
use std::str::FromStr;
use anyhow::Context;
use axum::async_trait;
use monitor_client::{
api::read::{
GetServerTemplate, GetServerTemplateResponse,
GetServerTemplatesSummary, GetServerTemplatesSummaryResponse,
ListServerTemplates, ListServerTemplatesResponse,
},
use mongo_indexed::Document;
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
update::ResourceTargetVariant, user::User,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::resource::{
get_resource_ids_for_non_admin, StateResource,
},
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<GetServerTemplate, User> for State {
async fn resolve(
&self,
GetServerTemplate { server_template }: GetServerTemplate,
user: User,
) -> anyhow::Result<GetServerTemplateResponse> {
ServerTemplate::get_resource_check_permissions(
resource::get_check_permissions::<ServerTemplate>(
&server_template,
&user,
PermissionLevel::Read,
@@ -39,47 +30,48 @@ impl Resolve<GetServerTemplate, User> for State {
}
}
#[async_trait]
impl Resolve<ListServerTemplates, User> for State {
async fn resolve(
&self,
ListServerTemplates { query }: ListServerTemplates,
user: User,
) -> anyhow::Result<ListServerTemplatesResponse> {
ServerTemplate::list_resource_list_items_for_user(query, &user)
.await
resource::list_for_user::<ServerTemplate>(query, &user).await
}
}
impl Resolve<ListFullServerTemplates, User> for State {
async fn resolve(
&self,
ListFullServerTemplates { query }: ListFullServerTemplates,
user: User,
) -> anyhow::Result<ListFullServerTemplatesResponse> {
resource::list_full_for_user::<ServerTemplate>(query, &user).await
}
}
#[async_trait]
impl Resolve<GetServerTemplatesSummary, User> for State {
async fn resolve(
&self,
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
let query = match resource::get_resource_ids_for_user::<
ServerTemplate,
>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
};
Some(query)
},
None => Document::new(),
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.server_templates
.count_documents(query)
.await
.context("failed to count all builder documents")?;
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {
total: total as u32,
};

View File

@@ -0,0 +1,338 @@
use std::collections::HashSet;
use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
stack::{Stack, StackActionState, StackListItem, StackState},
user::User,
},
};
use periphery_client::api::compose::{
GetComposeServiceLog, GetComposeServiceLogSearch,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{periphery_client, stack::get_stack_and_server},
resource,
state::{action_states, github_client, stack_status_cache, State},
};
impl Resolve<GetStack, User> for State {
async fn resolve(
&self,
GetStack { stack }: GetStack,
user: User,
) -> anyhow::Result<Stack> {
resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListStackServices, User> for State {
async fn resolve(
&self,
ListStackServices { stack }: ListStackServices,
user: User,
) -> anyhow::Result<ListStackServicesResponse> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
let services = stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.services
.clone();
Ok(services)
}
}
impl Resolve<GetStackServiceLog, User> for State {
async fn resolve(
&self,
GetStackServiceLog {
stack,
service,
tail,
}: GetStackServiceLog,
user: User,
) -> anyhow::Result<GetStackServiceLogResponse> {
let (stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Read,
true,
)
.await?;
periphery_client(&server)?
.request(GetComposeServiceLog {
project: stack.project_name(false),
service,
tail,
})
.await
.context("failed to get stack service log from periphery")
}
}
impl Resolve<SearchStackServiceLog, User> for State {
async fn resolve(
&self,
SearchStackServiceLog {
stack,
service,
terms,
combinator,
invert,
}: SearchStackServiceLog,
user: User,
) -> anyhow::Result<SearchStackServiceLogResponse> {
let (stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Read,
true,
)
.await?;
periphery_client(&server)?
.request(GetComposeServiceLogSearch {
project: stack.project_name(false),
service,
terms,
combinator,
invert,
})
.await
.context("failed to get stack service log from periphery")
}
}
impl Resolve<ListCommonStackExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for stack in stacks {
for extra_arg in stack.config.extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}
impl Resolve<ListCommonStackBuildExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonStackBuildExtraArgs { query }: ListCommonStackBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackBuildExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for stack in stacks {
for extra_arg in stack.config.build_extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}
impl Resolve<ListStacks, User> for State {
async fn resolve(
&self,
ListStacks { query }: ListStacks,
user: User,
) -> anyhow::Result<Vec<StackListItem>> {
resource::list_for_user::<Stack>(query, &user).await
}
}
impl Resolve<ListFullStacks, User> for State {
async fn resolve(
&self,
ListFullStacks { query }: ListFullStacks,
user: User,
) -> anyhow::Result<ListFullStacksResponse> {
resource::list_full_for_user::<Stack>(query, &user).await
}
}
impl Resolve<GetStackActionState, User> for State {
async fn resolve(
&self,
GetStackActionState { stack }: GetStackActionState,
user: User,
) -> anyhow::Result<StackActionState> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.stack
.get(&stack.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetStacksSummary, User> for State {
async fn resolve(
&self,
GetStacksSummary {}: GetStacksSummary,
user: User,
) -> anyhow::Result<GetStacksSummaryResponse> {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
&user,
)
.await
.context("failed to get stacks from db")?;
let mut res = GetStacksSummaryResponse::default();
let cache = stack_status_cache();
for stack in stacks {
res.total += 1;
match cache.get(&stack.id).await.unwrap_or_default().curr.state
{
StackState::Running => res.running += 1,
StackState::Paused => res.paused += 1,
StackState::Stopped => res.stopped += 1,
StackState::Restarting => res.restarting += 1,
StackState::Created => res.created += 1,
StackState::Removing => res.removing += 1,
StackState::Dead => res.dead += 1,
StackState::Unhealthy => res.unhealthy += 1,
StackState::Down => res.down += 1,
StackState::Unknown => res.unknown += 1,
}
}
Ok(res)
}
}
impl Resolve<GetStackWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetStackWebhooksEnabled { stack }: GetStackWebhooksEnabled,
user: User,
) -> anyhow::Result<GetStackWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read,
)
.await?;
if stack.config.git_provider != "github.com"
|| stack.config.repo.is_empty()
{
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/stack/{}/refresh", stack.id);
let deploy_url =
format!("{host}/listener/github/stack/{}/deploy", stack.id);
let mut refresh_enabled = false;
let mut deploy_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == deploy_url {
deploy_enabled = true
}
}
Ok(GetStackWebhooksEnabledResponse {
managed: true,
refresh_enabled,
deploy_enabled,
})
}
}

View File

@@ -0,0 +1,228 @@
use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
ResourceSyncListItem, ResourceSyncState,
},
user::User,
},
};
use resolver_api::Resolve;
use crate::{
config::core_config,
resource,
state::{
action_states, github_client, resource_sync_state_cache, State,
},
};
impl Resolve<GetResourceSync, User> for State {
async fn resolve(
&self,
GetResourceSync { sync }: GetResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListResourceSyncs, User> for State {
async fn resolve(
&self,
ListResourceSyncs { query }: ListResourceSyncs,
user: User,
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
resource::list_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<ListFullResourceSyncs, User> for State {
async fn resolve(
&self,
ListFullResourceSyncs { query }: ListFullResourceSyncs,
user: User,
) -> anyhow::Result<ListFullResourceSyncsResponse> {
resource::list_full_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<GetResourceSyncActionState, User> for State {
async fn resolve(
&self,
GetResourceSyncActionState { sync }: GetResourceSyncActionState,
user: User,
) -> anyhow::Result<ResourceSyncActionState> {
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.resource_sync
.get(&sync.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetResourceSyncsSummary, User> for State {
async fn resolve(
&self,
GetResourceSyncsSummary {}: GetResourceSyncsSummary,
user: User,
) -> anyhow::Result<GetResourceSyncsSummaryResponse> {
let resource_syncs =
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
)
.await
.context("failed to get resource_syncs from db")?;
let mut res = GetResourceSyncsSummaryResponse::default();
let cache = resource_sync_state_cache();
let action_states = action_states();
for resource_sync in resource_syncs {
res.total += 1;
match resource_sync.info.pending.data {
PendingSyncUpdatesData::Ok(data) => {
if !data.no_updates() {
res.pending += 1;
continue;
}
}
PendingSyncUpdatesData::Err(_) => {
res.failed += 1;
continue;
}
}
match (
cache.get(&resource_sync.id).await.unwrap_or_default(),
action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.syncing => {
res.syncing += 1;
}
(ResourceSyncState::Ok, _) => res.ok += 1,
(ResourceSyncState::Failed, _) => res.failed += 1,
(ResourceSyncState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(ResourceSyncState::Syncing, _) => {
unreachable!()
}
(ResourceSyncState::Pending, _) => {
unreachable!()
}
}
}
Ok(res)
}
}
impl Resolve<GetSyncWebhooksEnabled, User> for State {
async fn resolve(
&self,
GetSyncWebhooksEnabled { sync }: GetSyncWebhooksEnabled,
user: User,
) -> anyhow::Result<GetSyncWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
if sync.config.git_provider != "github.com"
|| sync.config.repo.is_empty()
{
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let refresh_url =
format!("{host}/listener/github/sync/{}/refresh", sync.id);
let sync_url =
format!("{host}/listener/github/sync/{}/sync", sync.id);
let mut refresh_enabled = false;
let mut sync_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == sync_url {
sync_enabled = true
}
}
Ok(GetSyncWebhooksEnabledResponse {
managed: true,
refresh_enabled,
sync_enabled,
})
}
}

View File

@@ -1,10 +1,10 @@
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
use mongo_indexed::doc;
use komodo_client::{
api::read::{GetTag, ListTags},
entities::{tag::Tag, user::User},
};
use mungos::find::find_collect;
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
@@ -12,7 +12,6 @@ use crate::{
state::{db_client, State},
};
#[async_trait]
impl Resolve<GetTag, User> for State {
async fn resolve(
&self,
@@ -23,15 +22,18 @@ impl Resolve<GetTag, User> for State {
}
}
#[async_trait]
impl Resolve<ListTags, User> for State {
async fn resolve(
&self,
ListTags { query }: ListTags,
_: User,
) -> anyhow::Result<Vec<Tag>> {
find_collect(&db_client().await.tags, query, None)
.await
.context("failed to get tags from db")
find_collect(
&db_client().await.tags,
query,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to get tags from db")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,7 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
alerter::Alerter,
@@ -14,8 +13,11 @@ use monitor_client::{
repo::Repo,
server::Server,
server_template::ServerTemplate,
update::{ResourceTarget, Update, UpdateListItem},
stack::Stack,
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
ResourceTarget,
},
};
use mungos::{
@@ -26,46 +28,137 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
helpers::resource::StateResource,
config::core_config,
resource,
state::{db_client, State},
};
const UPDATES_PER_PAGE: i64 = 20;
const UPDATES_PER_PAGE: i64 = 100;
#[async_trait]
impl Resolve<ListUpdates, User> for State {
async fn resolve(
&self,
ListUpdates { query, page }: ListUpdates,
user: User,
) -> anyhow::Result<ListUpdatesResponse> {
let query = if user.admin {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids =
Server::get_resource_ids_for_non_admin(&user.id).await?;
let deployment_ids =
Deployment::get_resource_ids_for_non_admin(&user.id).await?;
let build_ids =
Build::get_resource_ids_for_non_admin(&user.id).await?;
let repo_ids =
Repo::get_resource_ids_for_non_admin(&user.id).await?;
let procedure_ids =
Procedure::get_resource_ids_for_non_admin(&user.id).await?;
let builder_ids =
Builder::get_resource_ids_for_non_admin(&user.id).await?;
let alerter_ids =
Alerter::get_resource_ids_for_non_admin(&user.id).await?;
let server_query =
resource::get_resource_ids_for_user::<Server>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
resource::get_resource_ids_for_user::<Deployment>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query =
resource::get_resource_ids_for_user::<Stack>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query =
resource::get_resource_ids_for_user::<Build>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
resource::get_resource_ids_for_user::<Repo>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
resource::get_resource_ids_for_user::<Procedure>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query =
resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query = resource::get_resource_ids_for_user::<ServerTemplate>(
&user,
)
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query = resource::get_resource_ids_for_user::<ResourceSync>(
&user,
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = query.unwrap_or_default();
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
server_query,
deployment_query,
stack_query,
build_query,
repo_query,
procedure_query,
alerter_query,
builder_query,
server_template_query,
resource_sync_query,
]
});
query.into()
@@ -110,6 +203,7 @@ impl Resolve<ListUpdates, User> for State {
target: u.target,
status: u.status,
version: u.version,
other_data: u.other_data,
}
})
.collect::<Vec<_>>();
@@ -124,7 +218,6 @@ impl Resolve<ListUpdates, User> for State {
}
}
#[async_trait]
impl Resolve<GetUpdate, User> for State {
async fn resolve(
&self,
@@ -135,7 +228,7 @@ impl Resolve<GetUpdate, User> for State {
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
if user.admin {
if user.admin || core_config().transparent_mode {
return Ok(update);
}
match &update.target {
@@ -145,7 +238,7 @@ impl Resolve<GetUpdate, User> for State {
))
}
ResourceTarget::Server(id) => {
Server::get_resource_check_permissions(
resource::get_check_permissions::<Server>(
id,
&user,
PermissionLevel::Read,
@@ -153,7 +246,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Deployment(id) => {
Deployment::get_resource_check_permissions(
resource::get_check_permissions::<Deployment>(
id,
&user,
PermissionLevel::Read,
@@ -161,7 +254,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Build(id) => {
Build::get_resource_check_permissions(
resource::get_check_permissions::<Build>(
id,
&user,
PermissionLevel::Read,
@@ -169,7 +262,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Repo(id) => {
Repo::get_resource_check_permissions(
resource::get_check_permissions::<Repo>(
id,
&user,
PermissionLevel::Read,
@@ -177,7 +270,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Builder(id) => {
Builder::get_resource_check_permissions(
resource::get_check_permissions::<Builder>(
id,
&user,
PermissionLevel::Read,
@@ -185,7 +278,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Alerter(id) => {
Alerter::get_resource_check_permissions(
resource::get_check_permissions::<Alerter>(
id,
&user,
PermissionLevel::Read,
@@ -193,7 +286,7 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::Procedure(id) => {
Procedure::get_resource_check_permissions(
resource::get_check_permissions::<Procedure>(
id,
&user,
PermissionLevel::Read,
@@ -201,7 +294,23 @@ impl Resolve<GetUpdate, User> for State {
.await?;
}
ResourceTarget::ServerTemplate(id) => {
ServerTemplate::get_resource_check_permissions(
resource::get_check_permissions::<ServerTemplate>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
id,
&user,
PermissionLevel::Read,

View File

@@ -1,21 +1,25 @@
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{
GetUsername, GetUsernameResponse, ListApiKeys,
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
ListApiKeysResponse, ListUsers, ListUsersResponse,
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
ListApiKeys, ListApiKeysForServiceUser,
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
};
use mungos::{
by_id::find_one_by_id, find::find_collect, mongodb::bson::doc,
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
#[async_trait]
impl Resolve<GetUsername, User> for State {
async fn resolve(
&self,
@@ -27,13 +31,32 @@ impl Resolve<GetUsername, User> for State {
.context("failed at mongo query for user")?
.context("no user found with id")?;
let avatar = match user.config {
UserConfig::Github { avatar, .. } => Some(avatar),
UserConfig::Google { avatar, .. } => Some(avatar),
_ => None,
};
Ok(GetUsernameResponse {
username: user.username,
avatar,
})
}
}
#[async_trait]
impl Resolve<FindUser, User> for State {
async fn resolve(
&self,
FindUser { user }: FindUser,
admin: User,
) -> anyhow::Result<FindUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
get_user(&user).await
}
}
impl Resolve<ListUsers, User> for State {
async fn resolve(
&self,
@@ -43,22 +66,56 @@ impl Resolve<ListUsers, User> for State {
if !user.admin {
return Err(anyhow!("this route is only accessable by admins"));
}
let mut users =
find_collect(&db_client().await.users, None, None)
.await
.context("failed to pull users from db")?;
let mut users = find_collect(
&db_client().await.users,
None,
FindOptions::builder().sort(doc! { "username": 1 }).build(),
)
.await
.context("failed to pull users from db")?;
users.iter_mut().for_each(|user| user.sanitize());
Ok(users)
}
}
#[async_trait]
impl Resolve<ListApiKeys, User> for State {
async fn resolve(
&self,
ListApiKeys {}: ListApiKeys,
user: User,
) -> anyhow::Result<ListApiKeysResponse> {
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": &user.id },
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for api keys")?
.into_iter()
.map(|mut api_keys| {
api_keys.sanitize();
api_keys
})
.collect();
Ok(api_keys)
}
}
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = get_user(&user).await?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": &user.id },
@@ -75,37 +132,3 @@ impl Resolve<ListApiKeys, User> for State {
Ok(api_keys)
}
}
#[async_trait]
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for users")?
.context("user at id not found")?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": user_id },
None,
)
.await
.context("failed to query db for api keys")?
.into_iter()
.map(|mut api_keys| {
api_keys.sanitize();
api_keys
})
.collect();
Ok(api_keys)
}
}

View File

@@ -1,8 +1,7 @@
use std::str::FromStr;
use anyhow::Context;
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::read::{
GetUserGroup, GetUserGroupResponse, ListUserGroups,
ListUserGroupsResponse,
@@ -11,13 +10,15 @@ use monitor_client::{
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, Document},
mongodb::{
bson::{doc, oid::ObjectId, Document},
options::FindOptions,
},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
#[async_trait]
impl Resolve<GetUserGroup, User> for State {
async fn resolve(
&self,
@@ -36,14 +37,13 @@ impl Resolve<GetUserGroup, User> for State {
db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")
}
}
#[async_trait]
impl Resolve<ListUserGroups, User> for State {
async fn resolve(
&self,
@@ -54,8 +54,12 @@ impl Resolve<ListUserGroups, User> for State {
if !user.admin {
filter.insert("users", &user.id);
}
find_collect(&db_client().await.user_groups, filter, None)
.await
.context("failed to query db for UserGroups")
find_collect(
&db_client().await.user_groups,
filter,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for UserGroups")
}
}

View File

@@ -0,0 +1,60 @@
use anyhow::Context;
use mongo_indexed::doc;
use komodo_client::{
api::read::{
GetVariable, GetVariableResponse, ListVariables,
ListVariablesResponse,
},
entities::user::User,
};
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
helpers::query::get_variable,
state::{db_client, State},
};
impl Resolve<GetVariable, User> for State {
async fn resolve(
&self,
GetVariable { name }: GetVariable,
user: User,
) -> anyhow::Result<GetVariableResponse> {
let mut variable = get_variable(&name).await?;
if !variable.is_secret || user.admin {
return Ok(variable);
}
variable.value = "#".repeat(variable.value.len());
Ok(variable)
}
}
impl Resolve<ListVariables, User> for State {
async fn resolve(
&self,
ListVariables {}: ListVariables,
user: User,
) -> anyhow::Result<ListVariablesResponse> {
let variables = find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")?;
if user.admin {
return Ok(variables);
}
let variables = variables
.into_iter()
.map(|mut variable| {
if variable.is_secret {
variable.value = "#".repeat(variable.value.len());
}
variable
})
.collect();
Ok(variables)
}
}

212
bin/core/src/api/user.rs Normal file
View File

@@ -0,0 +1,212 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use komodo_client::{
api::user::{
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
DeleteApiKeyResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{api_key::ApiKey, komodo_timestamp, user::User},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::{query::get_user, random_string},
state::{db_client, State},
};
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum UserRequest {
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/user request {req_id} | user: {} ({})",
user.username, user.id
);
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/user request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/user request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<PushRecentlyViewed, User> for State {
#[instrument(
name = "PushRecentlyViewed",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
PushRecentlyViewed { resource }: PushRecentlyViewed,
user: User,
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (resource_type, id) = resource.extract_variant_id();
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { format!("recents.{resource_type}"): to_bson(&recents)? }
}
None => {
doc! { format!("recents.{resource_type}"): [id] }
}
};
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| {
format!("failed to update recents.{resource_type}")
})?;
Ok(PushRecentlyViewedResponse {})
}
}
impl Resolve<SetLastSeenUpdate, User> for State {
#[instrument(
name = "SetLastSeenUpdate",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
SetLastSeenUpdate {}: SetLastSeenUpdate,
user: User,
) -> anyhow::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": komodo_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateApiKey, User> for State {
#[instrument(
name = "CreateApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
CreateApiKey { name, expires }: CreateApiKey,
user: User,
) -> anyhow::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: komodo_timestamp(),
expires,
};
db_client()
.await
.api_keys
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
impl Resolve<DeleteApiKey, User> for State {
#[instrument(
name = "DeleteApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
DeleteApiKey { key }: DeleteApiKey,
user: User,
) -> anyhow::Result<DeleteApiKeyResponse> {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user"));
}
client
.api_keys
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}

View File

@@ -1,35 +1,15 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
},
entities::{
alerter::{Alerter, AlerterInfo},
monitor_timestamp,
permission::PermissionLevel,
user::User,
Operation,
alerter::Alerter, permission::PermissionLevel, user::User,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_bson},
};
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update},
},
state::{db_client, State},
};
use crate::{resource, state::State};
#[async_trait]
impl Resolve<CreateAlerter, User> for State {
#[instrument(name = "CreateAlerter", skip(self, user))]
async fn resolve(
@@ -37,61 +17,10 @@ impl Resolve<CreateAlerter, User> for State {
CreateAlerter { name, config }: CreateAlerter,
user: User,
) -> anyhow::Result<Alerter> {
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
let start_ts = monitor_timestamp();
let is_default = db_client()
.await
.alerters
.find_one(None, None)
.await?
.is_none();
let alerter = Alerter {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: AlerterInfo { is_default },
};
let alerter_id = db_client()
.await
.alerters
.insert_one(alerter, None)
.await
.context("failed to add alerter to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let alerter = Alerter::get_resource(&alerter_id).await?;
create_permission(&user, &alerter, PermissionLevel::Write).await;
let mut update =
make_update(&alerter, Operation::CreateAlerter, &user);
update.push_simple_log(
"create alerter",
format!(
"created alerter\nid: {}\nname: {}",
alerter.id, alerter.name
),
);
update
.push_simple_log("config", format!("{:#?}", alerter.config));
update.finalize();
add_update(update).await?;
Ok(alerter)
resource::create::<Alerter>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyAlerter, User> for State {
#[instrument(name = "CopyAlerter", skip(self, user))]
async fn resolve(
@@ -99,63 +28,16 @@ impl Resolve<CopyAlerter, User> for State {
CopyAlerter { name, id }: CopyAlerter,
user: User,
) -> anyhow::Result<Alerter> {
let Alerter {
config,
description,
..
} = Alerter::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
let Alerter { config, .. } = resource::get_check_permissions::<
Alerter,
>(
&id, &user, PermissionLevel::Write
)
.await?;
let start_ts = monitor_timestamp();
let alerter = Alerter {
id: Default::default(),
name,
updated_at: start_ts,
description,
config,
tags: Default::default(),
info: Default::default(),
};
let alerter_id = db_client()
.await
.alerters
.insert_one(alerter, None)
.await
.context("failed to add alerter to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let alerter = Alerter::get_resource(&alerter_id).await?;
create_permission(&user, &alerter, PermissionLevel::Write).await;
let mut update =
make_update(&alerter, Operation::CreateAlerter, &user);
update.push_simple_log(
"create alerter",
format!(
"created alerter\nid: {}\nname: {}",
alerter.id, alerter.name
),
);
update
.push_simple_log("config", format!("{:#?}", alerter.config));
update.finalize();
add_update(update).await?;
Ok(alerter)
resource::create::<Alerter>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<DeleteAlerter, User> for State {
#[instrument(name = "DeleteAlerter", skip(self, user))]
async fn resolve(
@@ -163,38 +45,10 @@ impl Resolve<DeleteAlerter, User> for State {
DeleteAlerter { id }: DeleteAlerter,
user: User,
) -> anyhow::Result<Alerter> {
let alerter = Alerter::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&alerter, Operation::DeleteAlerter, &user);
delete_one_by_id(&db_client().await.alerters, &id, None)
.await
.context("failed to delete alerter from database")?;
delete_all_permissions_on_resource(&alerter).await;
update.push_simple_log(
"delete alerter",
format!("deleted alerter {}", alerter.name),
);
update.finalize();
add_update(update).await?;
remove_from_recently_viewed(&alerter).await?;
Ok(alerter)
resource::delete::<Alerter>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateAlerter, User> for State {
#[instrument(name = "UpdateAlerter", skip(self, user))]
async fn resolve(
@@ -202,39 +56,6 @@ impl Resolve<UpdateAlerter, User> for State {
UpdateAlerter { id, config }: UpdateAlerter,
user: User,
) -> anyhow::Result<Alerter> {
let alerter = Alerter::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&alerter, Operation::UpdateAlerter, &user);
update.push_simple_log(
"alerter update",
serde_json::to_string_pretty(&config)?,
);
let config = alerter.config.merge_partial(config);
update_one_by_id(
&db_client().await.alerters,
&id,
mungos::update::Update::FlattenSet(
doc! { "config": to_bson(&config)? },
),
None,
)
.await
.with_context(|| format!("failed to update alerter {id}"))?;
let alerter = Alerter::get_resource(&id).await?;
update.finalize();
add_update(update).await?;
Ok(alerter)
resource::update::<Alerter>(&id, config, &user).await
}
}

View File

@@ -1,152 +0,0 @@
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
api::write::*,
entities::{
api_key::ApiKey,
monitor_timestamp,
user::{User, UserConfig},
},
};
use mungos::{by_id::find_one_by_id, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{
auth::random_string,
helpers::query::get_user,
state::{db_client, State},
};
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
#[async_trait]
impl Resolve<CreateApiKey, User> for State {
#[instrument(
name = "CreateApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
CreateApiKey { name, expires }: CreateApiKey,
user: User,
) -> anyhow::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: monitor_timestamp(),
expires,
};
db_client()
.await
.api_keys
.insert_one(api_key, None)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
#[async_trait]
impl Resolve<DeleteApiKey, User> for State {
#[instrument(
name = "DeleteApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
DeleteApiKey { key }: DeleteApiKey,
user: User,
) -> anyhow::Result<DeleteApiKeyResponse> {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user"));
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}
#[async_trait]
impl Resolve<CreateApiKeyForServiceUser, User> for State {
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateApiKeyForServiceUser {
user_id,
name,
expires,
}: CreateApiKeyForServiceUser,
user: User,
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let service_user =
find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
self
.resolve(CreateApiKey { name, expires }, service_user)
.await
}
}
#[async_trait]
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
user: User,
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().await.users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})
}
}

View File

@@ -1,105 +1,39 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use mongo_indexed::doc;
use komodo_client::{
api::write::*,
entities::{
build::Build,
builder::Builder,
monitor_timestamp,
build::{Build, BuildInfo, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
to_monitor_name,
update::{Log, UpdateStatus},
user::User,
Operation,
CloneArgs, NoData,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, oid::ObjectId, to_document},
use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, empty_or_only_spaces,
remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update, update_update},
},
state::{action_states, db_client, State},
config::core_config,
helpers::{git_token, random_string},
resource,
state::{db_client, github_client, State},
};
#[async_trait]
impl Resolve<CreateBuild, User> for State {
#[instrument(name = "CreateBuild", skip(self, user))]
async fn resolve(
&self,
CreateBuild { name, mut config }: CreateBuild,
CreateBuild { name, config }: CreateBuild,
user: User,
) -> anyhow::Result<Build> {
if !user.admin && !user.create_build_permissions {
return Err(anyhow!(
"User does not have create build permissions."
));
}
let name = to_monitor_name(&name);
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
if let Some(builder_id) = &config.builder_id {
let builder = Builder::get_resource_check_permissions(builder_id, &user, PermissionLevel::Read)
.await
.context("cannot create build using this builder. user must have at least read permissions on the builder.")?;
config.builder_id = Some(builder.id)
}
let start_ts = monitor_timestamp();
let build = Build {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: Default::default(),
};
let build_id = db_client()
.await
.builds
.insert_one(build, None)
.await
.context("failed to add build to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let build = Build::get_resource(&build_id).await?;
create_permission(&user, &build, PermissionLevel::Write).await;
let mut update =
make_update(&build, Operation::CreateBuild, &user);
update.push_simple_log(
"create build",
format!(
"created build\nid: {}\nname: {}",
build.id, build.name
),
);
update.push_simple_log("config", format!("{:#?}", build.config));
update.finalize();
add_update(update).await?;
Ok(build)
resource::create::<Build>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyBuild, User> for State {
#[instrument(name = "CopyBuild", skip(self, user))]
async fn resolve(
@@ -107,74 +41,19 @@ impl Resolve<CopyBuild, User> for State {
CopyBuild { name, id }: CopyBuild,
user: User,
) -> anyhow::Result<Build> {
if !user.admin && !user.create_build_permissions {
return Err(anyhow!(
"User does not have create build permissions."
));
}
let name = to_monitor_name(&name);
let Build {
config,
description,
tags,
..
} = Build::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Builder::get_resource_check_permissions(&config.builder_id, &user, PermissionLevel::Read)
.await
.context("cannot create build using this builder. user must have at least read permissions on the builder.")?;
let start_ts = monitor_timestamp();
let build = Build {
id: Default::default(),
name,
updated_at: start_ts,
description,
tags,
config,
info: Default::default(),
};
let build_id = db_client()
.await
.builds
.insert_one(build, None)
.await
.context("failed to add build to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let build = Build::get_resource(&build_id).await?;
create_permission(&user, &build, PermissionLevel::Write).await;
let mut update =
make_update(&build, Operation::CreateBuild, &user);
update.push_simple_log(
"create build",
format!(
"created build\nid: {}\nname: {}",
build.id, build.name
),
);
update.push_simple_log(
"config",
serde_json::to_string_pretty(&build)?,
);
update.finalize();
add_update(update).await?;
Ok(build)
let Build { mut config, .. } =
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// reset version to 0.0.0
config.version = Default::default();
resource::create::<Build>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<DeleteBuild, User> for State {
#[instrument(name = "DeleteBuild", skip(self, user))]
async fn resolve(
@@ -182,128 +61,297 @@ impl Resolve<DeleteBuild, User> for State {
DeleteBuild { id }: DeleteBuild,
user: User,
) -> anyhow::Result<Build> {
if action_states()
.build
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("build busy"));
}
let build = Build::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&build, Operation::DeleteBuild, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;
let res = db_client()
.await
.builds
.delete_one(doc! { "_id": ObjectId::from_str(&id)? }, None)
.await
.context("failed to delete build from database");
delete_all_permissions_on_resource(&build).await;
let log = match res {
Ok(_) => Log::simple(
"delete build",
format!("deleted build {}", build.name),
),
Err(e) => Log::error(
"delete build",
format!("failed to delete build\n{e:#?}"),
),
};
update.logs.push(log);
update.finalize();
update_update(update).await?;
remove_from_recently_viewed(&build).await?;
Ok(build)
resource::delete::<Build>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateBuild, User> for State {
#[instrument(name = "UpdateBuild", skip(self, user))]
async fn resolve(
&self,
UpdateBuild { id, mut config }: UpdateBuild,
UpdateBuild { id, config }: UpdateBuild,
user: User,
) -> anyhow::Result<Build> {
if action_states()
.build
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("build busy"));
resource::update::<Build>(&id, config, &user).await
}
}
impl Resolve<RefreshBuildCache, User> for State {
#[instrument(
name = "RefreshBuildCache",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
RefreshBuildCache { build }: RefreshBuildCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// build should be able to do this.
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
if build.config.repo.is_empty() {
// Nothing to do here
return Ok(NoData {})
}
let build = Build::get_resource_check_permissions(
&id,
let config = core_config();
let repo_dir = config.repo_directory.join(random_string(10));
let mut clone_args: CloneArgs = (&build).into();
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
clone_args.destination = Some(repo_dir.display().to_string());
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
let (_, latest_hash, latest_message, _) = git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.context("failed to clone build repo")?;
let info = BuildInfo {
last_built_at: build.info.last_built_at,
built_hash: build.info.built_hash,
built_message: build.info.built_message,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize build info to bson")?;
db_client()
.await
.builds
.update_one(
doc! { "name": &build.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update build info on db")?;
if repo_dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_dir) {
warn!("failed to remove build cache update repo directory | {e:?}")
}
}
Ok(NoData {})
}
}
impl Resolve<CreateBuildWebhook, User> for State {
#[instrument(name = "CreateBuildWebhook", skip(self, user))]
async fn resolve(
&self,
CreateBuildWebhook { build }: CreateBuildWebhook,
user: User,
) -> anyhow::Result<CreateBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if let Some(builder_id) = &config.builder_id {
let builder = Builder::get_resource_check_permissions(builder_id, &user, PermissionLevel::Read)
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if build.config.webhook_secret.is_empty() {
webhook_secret
} else {
&build.config.webhook_secret
};
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !build.config.webhook_enabled {
self
.resolve(
UpdateBuild {
id: build.id,
config: PartialBuildConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("cannot create build using this builder. user must have at least read permissions on the builder.")?;
config.builder_id = Some(builder.id)
.context("failed to update build to enable webhook")?;
}
if let Some(build_args) = &mut config.build_args {
build_args.retain(|v| {
!empty_or_only_spaces(&v.variable)
&& !empty_or_only_spaces(&v.value)
})
}
if let Some(extra_args) = &mut config.extra_args {
extra_args.retain(|v| !empty_or_only_spaces(v))
}
let config_doc = to_document(&config)
.context("failed to serialize config to bson document")?;
update_one_by_id(
&db_client().await.builds,
&build.id,
mungos::update::Update::FlattenSet(
doc! { "config": config_doc },
),
None,
)
.await
.context("failed to update build on database")?;
let mut update =
make_update(&build, Operation::UpdateBuild, &user);
update.push_simple_log(
"build update",
serde_json::to_string_pretty(&config)
.context("failed to serialize config to json")?,
);
update.finalize();
add_update(update).await?;
let build = Build::get_resource(&build.id).await?;
Ok(build)
Ok(NoData {})
}
}
impl Resolve<DeleteBuildWebhook, User> for State {
#[instrument(name = "DeleteBuildWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteBuildWebhook { build }: DeleteBuildWebhook,
user: User,
) -> anyhow::Result<DeleteBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Write,
)
.await?;
if build.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if build.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't delete webhook"
));
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,120 +1,24 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
builder::{
Builder, PartialBuilderConfig, PartialServerBuilderConfig,
},
monitor_timestamp,
permission::PermissionLevel,
server::Server,
update::{Log, ResourceTarget, Update},
user::User,
Operation,
builder::Builder, permission::PermissionLevel, user::User,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update},
},
state::{db_client, State},
};
use crate::{resource, state::State};
#[instrument(skip(user))]
async fn validate_config(
config: &mut PartialBuilderConfig,
user: &User,
) -> anyhow::Result<()> {
match config {
PartialBuilderConfig::Server(PartialServerBuilderConfig {
server_id: Some(server_id),
}) if !server_id.is_empty() => {
let server = Server::get_resource_check_permissions(
server_id,
user,
PermissionLevel::Write,
)
.await?;
*server_id = server.id;
}
_ => {}
}
Ok(())
}
#[async_trait]
impl Resolve<CreateBuilder, User> for State {
#[instrument(name = "CreateBuilder", skip(self, user))]
async fn resolve(
&self,
CreateBuilder { name, mut config }: CreateBuilder,
CreateBuilder { name, config }: CreateBuilder,
user: User,
) -> anyhow::Result<Builder> {
let start_ts = monitor_timestamp();
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
validate_config(&mut config, &user).await?;
let builder = Builder {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: Default::default(),
};
let builder_id = db_client()
.await
.builders
.insert_one(builder, None)
.await
.context("failed to add builder to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let builder = Builder::get_resource(&builder_id).await?;
create_permission(&user, &builder, PermissionLevel::Write).await;
let update = Update {
target: ResourceTarget::Builder(builder_id),
operation: Operation::CreateBuilder,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
logs: vec![
Log::simple(
"create builder",
format!(
"created builder\nid: {}\nname: {}",
builder.id, builder.name
),
),
Log::simple("config", format!("{:#?}", builder.config)),
],
..Default::default()
};
add_update(update).await?;
Ok(builder)
resource::create::<Builder>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyBuilder, User> for State {
#[instrument(name = "CopyBuilder", skip(self, user))]
async fn resolve(
@@ -122,65 +26,16 @@ impl Resolve<CopyBuilder, User> for State {
CopyBuilder { name, id }: CopyBuilder,
user: User,
) -> anyhow::Result<Builder> {
let Builder {
config,
description,
..
} = Builder::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
let Builder { config, .. } = resource::get_check_permissions::<
Builder,
>(
&id, &user, PermissionLevel::Write
)
.await?;
let start_ts = monitor_timestamp();
let builder = Builder {
id: Default::default(),
name,
updated_at: start_ts,
description,
tags: Default::default(),
config,
info: (),
};
let builder_id = db_client()
.await
.builders
.insert_one(builder, None)
.await
.context("failed to add builder to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let builder = Builder::get_resource(&builder_id).await?;
create_permission(&user, &builder, PermissionLevel::Write).await;
let update = Update {
target: ResourceTarget::Builder(builder_id),
operation: Operation::CreateBuilder,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
logs: vec![
Log::simple(
"create builder",
format!(
"created builder\nid: {}\nname: {}",
builder.id, builder.name
),
),
Log::simple("config", format!("{:#?}", builder.config)),
],
..Default::default()
};
add_update(update).await?;
Ok(builder)
resource::create::<Builder>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<DeleteBuilder, User> for State {
#[instrument(name = "DeleteBuilder", skip(self, user))]
async fn resolve(
@@ -188,96 +43,17 @@ impl Resolve<DeleteBuilder, User> for State {
DeleteBuilder { id }: DeleteBuilder,
user: User,
) -> anyhow::Result<Builder> {
let builder = Builder::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// remove the builder from any attached builds
db_client()
.await
.builds
.update_many(
doc! { "config.builder.params.builder_id": &id },
mungos::update::Update::Set(
doc! { "config.builder.params.builder_id": "" },
),
None,
)
.await?;
delete_one_by_id(&db_client().await.builders, &id, None)
.await
.context("failed to delete builder from database")?;
delete_all_permissions_on_resource(&builder).await;
let mut update =
make_update(&builder, Operation::DeleteBuilder, &user);
update.push_simple_log(
"delete builder",
format!("deleted builder {}", builder.name),
);
update.finalize();
add_update(update).await?;
remove_from_recently_viewed(&builder).await?;
Ok(builder)
resource::delete::<Builder>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateBuilder, User> for State {
#[instrument(name = "UpdateBuilder", skip(self, user))]
async fn resolve(
&self,
UpdateBuilder { id, mut config }: UpdateBuilder,
UpdateBuilder { id, config }: UpdateBuilder,
user: User,
) -> anyhow::Result<Builder> {
let builder = Builder::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
validate_config(&mut config, &user).await?;
let mut update = Update {
target: ResourceTarget::Builder(id.clone()),
operation: Operation::UpdateBuilder,
start_ts: monitor_timestamp(),
logs: vec![Log::simple(
"builder update",
serde_json::to_string_pretty(&config)
.context("failed to serialize config update")?,
)],
operator: user.id.clone(),
..Default::default()
};
let config = builder.config.merge_partial(config);
update_one_by_id(
&db_client().await.builders,
&id,
mungos::update::Update::FlattenSet(
doc! { "config": to_document(&config)? },
),
None,
)
.await?;
let builder = Builder::get_resource(&id).await?;
update.finalize();
add_update(update).await?;
Ok(builder)
resource::update::<Builder>(&id, config, &user).await
}
}

View File

@@ -1,150 +1,42 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
all_logs_success,
build::Build,
deployment::{
Deployment, DeploymentImage, DockerContainerState,
PartialDeploymentConfig,
},
monitor_timestamp,
deployment::{Deployment, DeploymentState},
komodo_timestamp,
permission::PermissionLevel,
server::Server,
to_monitor_name,
update::{Log, Update, UpdateStatus},
to_komodo_name,
update::Update,
user::User,
Operation,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_document},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, empty_or_only_spaces, periphery_client,
periphery_client,
query::get_deployment_state,
remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update, update_update},
update::{add_update, make_update},
},
resource,
state::{action_states, db_client, State},
};
#[instrument(skip(user))]
async fn validate_config(
config: &mut PartialDeploymentConfig,
user: &User,
) -> anyhow::Result<()> {
if let Some(server_id) = &config.server_id {
if !server_id.is_empty() {
let server = Server::get_resource_check_permissions(server_id, user, PermissionLevel::Write)
.await
.context("cannot create deployment on this server. user must have update permissions on the server to perform this action.")?;
config.server_id = Some(server.id);
}
}
if let Some(DeploymentImage::Build { build_id, version }) =
&config.image
{
if !build_id.is_empty() {
let build = Build::get_resource_check_permissions(build_id, user, PermissionLevel::Read)
.await
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
config.image = Some(DeploymentImage::Build {
build_id: build.id,
version: version.clone(),
});
}
}
if let Some(volumes) = &mut config.volumes {
volumes.retain(|v| {
!empty_or_only_spaces(&v.local)
&& !empty_or_only_spaces(&v.container)
})
}
if let Some(ports) = &mut config.ports {
ports.retain(|v| {
!empty_or_only_spaces(&v.local)
&& !empty_or_only_spaces(&v.container)
})
}
if let Some(environment) = &mut config.environment {
environment.retain(|v| {
!empty_or_only_spaces(&v.variable)
&& !empty_or_only_spaces(&v.value)
})
}
if let Some(extra_args) = &mut config.extra_args {
extra_args.retain(|v| !empty_or_only_spaces(v))
}
Ok(())
}
#[async_trait]
impl Resolve<CreateDeployment, User> for State {
#[instrument(name = "CreateDeployment", skip(self, user))]
async fn resolve(
&self,
CreateDeployment { name, mut config }: CreateDeployment,
CreateDeployment { name, config }: CreateDeployment,
user: User,
) -> anyhow::Result<Deployment> {
let name = to_monitor_name(&name);
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
validate_config(&mut config, &user).await?;
let start_ts = monitor_timestamp();
let deployment = Deployment {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: (),
};
let deployment_id = db_client()
.await
.deployments
.insert_one(&deployment, None)
.await
.context("failed to add deployment to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let deployment = Deployment::get_resource(&deployment_id).await?;
create_permission(&user, &deployment, PermissionLevel::Write)
.await;
let mut update =
make_update(&deployment, Operation::CreateDeployment, &user);
update.push_simple_log(
"create deployment",
format!(
"created deployment\nid: {}\nname: {}",
deployment.id, deployment.name
),
);
update
.push_simple_log("config", format!("{:#?}", deployment.config));
update.finalize();
add_update(update).await?;
Ok(deployment)
resource::create::<Deployment>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyDeployment, User> for State {
#[instrument(name = "CopyDeployment", skip(self, user))]
async fn resolve(
@@ -152,75 +44,17 @@ impl Resolve<CopyDeployment, User> for State {
CopyDeployment { name, id }: CopyDeployment,
user: User,
) -> anyhow::Result<Deployment> {
let name = to_monitor_name(&name);
let Deployment {
config,
description,
tags,
..
} = Deployment::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
if !config.server_id.is_empty() {
Server::get_resource_check_permissions(&config.server_id, &user, PermissionLevel::Write)
.await
.context("cannot create deployment on this server. user must have update permissions on the server to perform this action.")?;
}
if let DeploymentImage::Build { build_id, .. } = &config.image {
if !build_id.is_empty() {
Build::get_resource_check_permissions(build_id, &user, PermissionLevel::Read)
.await
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
}
}
let start_ts = monitor_timestamp();
let deployment = Deployment {
id: Default::default(),
name,
updated_at: start_ts,
description,
tags,
config,
info: (),
};
let deployment_id = db_client()
.await
.deployments
.insert_one(&deployment, None)
.await
.context("failed to add deployment to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let deployment = Deployment::get_resource(&deployment_id).await?;
create_permission(&user, &deployment, PermissionLevel::Write)
.await;
let mut update =
make_update(&deployment, Operation::CreateDeployment, &user);
update.push_simple_log(
"create deployment",
format!(
"created deployment\nid: {}\nname: {}",
deployment.id, deployment.name
),
);
update
.push_simple_log("config", format!("{:#?}", deployment.config));
update.finalize();
add_update(update).await?;
Ok(deployment)
let Deployment { config, .. } =
resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Deployment>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<DeleteDeployment, User> for State {
#[instrument(name = "DeleteDeployment", skip(self, user))]
async fn resolve(
@@ -228,172 +62,21 @@ impl Resolve<DeleteDeployment, User> for State {
DeleteDeployment { id }: DeleteDeployment,
user: User,
) -> anyhow::Result<Deployment> {
let deployment = Deployment::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.deleting = true)?;
let state = get_deployment_state(&deployment)
.await
.context("failed to get container state")?;
let mut update =
make_update(&deployment, Operation::DeleteDeployment, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
if !matches!(
state,
DockerContainerState::NotDeployed
| DockerContainerState::Unknown
) {
// container needs to be destroyed
let server =
Server::get_resource(&deployment.config.server_id).await;
if let Err(e) = server {
update.logs.push(Log::error(
"remove container",
format!(
"failed to retrieve server at {} from db | {e:#?}",
deployment.config.server_id
),
));
} else if let Ok(server) = server {
match periphery_client(&server) {
Ok(periphery) => match periphery
.request(api::container::RemoveContainer {
name: deployment.name.clone(),
signal: deployment.config.termination_signal.into(),
time: deployment.config.termination_timeout.into(),
})
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"remove container",
format!(
"failed to remove container on periphery | {e:#?}"
),
),
},
Err(e) => update.push_error_log(
"remove container",
format!(
"failed to remove container on periphery | {e:#?}"
),
),
};
}
}
let res = delete_one_by_id(
&db_client().await.deployments,
&deployment.id,
None,
)
.await
.context("failed to delete deployment from mongo");
let log = match res {
Ok(_) => Log::simple(
"delete deployment",
format!("deleted deployment {}", deployment.name),
),
Err(e) => Log::error(
"delete deployment",
format!("failed to delete deployment\n{e:#?}"),
),
};
delete_all_permissions_on_resource(&deployment).await;
update.logs.push(log);
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
update.success = all_logs_success(&update.logs);
update_update(update).await?;
remove_from_recently_viewed(&deployment).await?;
Ok(deployment)
resource::delete::<Deployment>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateDeployment, User> for State {
#[instrument(name = "UpdateDeployment", skip(self, user))]
async fn resolve(
&self,
UpdateDeployment { id, mut config }: UpdateDeployment,
UpdateDeployment { id, config }: UpdateDeployment,
user: User,
) -> anyhow::Result<Deployment> {
if action_states()
.deployment
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("deployment busy"));
}
let deployment = Deployment::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&deployment, Operation::UpdateDeployment, &user);
validate_config(&mut config, &user).await?;
let config_doc = to_document(&config)
.context("failed to serialize config to bson")?;
update_one_by_id(
&db_client().await.deployments,
&id,
mungos::update::Update::FlattenSet(
doc! { "config": config_doc },
),
None,
)
.await
.context("failed to update server on mongo")?;
update.push_simple_log(
"deployment update",
serde_json::to_string_pretty(&config)
.context("failed to serialize config to json")?,
);
update.finalize();
add_update(update).await?;
let deployment: Deployment =
Deployment::get_resource(&id).await?;
Ok(deployment)
resource::update::<Deployment>(&id, config, &user).await
}
}
#[async_trait]
impl Resolve<RenameDeployment, User> for State {
#[instrument(name = "RenameDeployment", skip(self, user))]
async fn resolve(
@@ -401,7 +84,7 @@ impl Resolve<RenameDeployment, User> for State {
RenameDeployment { id, name }: RenameDeployment,
user: User,
) -> anyhow::Result<Update> {
let deployment = Deployment::get_resource_check_permissions(
let deployment = resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
@@ -419,11 +102,11 @@ impl Resolve<RenameDeployment, User> for State {
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_monitor_name(&name);
let name = to_komodo_name(&name);
let container_state = get_deployment_state(&deployment).await?;
if container_state == DockerContainerState::Unknown {
if container_state == DeploymentState::Unknown {
return Err(anyhow!(
"cannot rename deployment when container status is unknown"
));
@@ -436,16 +119,16 @@ impl Resolve<RenameDeployment, User> for State {
&db_client().await.deployments,
&deployment.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": monitor_timestamp() },
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
.await
.context("failed to update deployment name on db")?;
if container_state != DockerContainerState::NotDeployed {
if container_state != DeploymentState::NotDeployed {
let server =
Server::get_resource(&deployment.config.server_id).await?;
resource::get::<Server>(&deployment.config.server_id).await?;
let log = periphery_client(&server)?
.request(api::container::RenameContainer {
curr_name: deployment.name.clone(),

View File

@@ -1,19 +1,17 @@
use anyhow::anyhow;
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
update::ResourceTarget, user::User,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, user::User, ResourceTarget,
},
};
use resolver_api::Resolve;
use crate::{helpers::resource::StateResource, state::State};
use crate::{resource, state::State};
#[async_trait]
impl Resolve<UpdateDescription, User> for State {
#[instrument(name = "UpdateDescription", skip(self, user))]
async fn resolve(
@@ -31,31 +29,84 @@ impl Resolve<UpdateDescription, User> for State {
))
}
ResourceTarget::Server(id) => {
Server::update_description(&id, &description, &user).await?;
resource::update_description::<Server>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Deployment(id) => {
Deployment::update_description(&id, &description, &user)
.await?;
resource::update_description::<Deployment>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Build(id) => {
Build::update_description(&id, &description, &user).await?;
resource::update_description::<Build>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Repo(id) => {
Repo::update_description(&id, &description, &user).await?;
resource::update_description::<Repo>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Builder(id) => {
Builder::update_description(&id, &description, &user).await?;
resource::update_description::<Builder>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Alerter(id) => {
Alerter::update_description(&id, &description, &user).await?;
resource::update_description::<Alerter>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Procedure(id) => {
Procedure::update_description(&id, &description, &user)
.await?;
resource::update_description::<Procedure>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
ServerTemplate::update_description(&id, &description, &user)
.await?;
resource::update_description::<ServerTemplate>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_description::<ResourceSync>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Stack(id) => {
resource::update_description::<Stack>(
&id,
&description,
&user,
)
.await?;
}
}
Ok(UpdateDescriptionResponse {})

View File

@@ -3,8 +3,9 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::write::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::write::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
@@ -13,37 +14,37 @@ use uuid::Uuid;
use crate::{auth::auth_request, state::State};
mod alerter;
mod api_key;
mod build;
mod builder;
mod deployment;
mod description;
mod permissions;
mod procedure;
mod provider;
mod repo;
mod server;
mod server_template;
mod service_user;
mod stack;
mod sync;
mod tag;
mod user;
mod user_group;
mod variable;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
)]
#[variant_derive(Debug)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum WriteRequest {
// ==== API KEY ====
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER ====
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
pub enum WriteRequest {
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER GROUP ====
CreateUserGroup(CreateUserGroup),
@@ -55,6 +56,7 @@ enum WriteRequest {
// ==== PERMISSIONS ====
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
@@ -66,7 +68,6 @@ enum WriteRequest {
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
DeleteNetwork(DeleteNetwork),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
@@ -80,6 +81,9 @@ enum WriteRequest {
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
RefreshBuildCache(RefreshBuildCache),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
@@ -98,6 +102,9 @@ enum WriteRequest {
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
RefreshRepoCache(RefreshRepoCache),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
@@ -111,11 +118,45 @@ enum WriteRequest {
DeleteProcedure(DeleteProcedure),
UpdateProcedure(UpdateProcedure),
// ==== SYNC ====
CreateResourceSync(CreateResourceSync),
CopyResourceSync(CopyResourceSync),
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== STACK ====
CreateStack(CreateStack),
CopyStack(CopyStack),
DeleteStack(DeleteStack),
UpdateStack(UpdateStack),
RenameStack(RenameStack),
RefreshStackCache(RefreshStackCache),
CreateStackWebhook(CreateStackWebhook),
DeleteStackWebhook(DeleteStackWebhook),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
RenameTag(RenameTag),
UpdateTagsOnResource(UpdateTagsOnResource),
// ==== VARIABLE ====
CreateVariable(CreateVariable),
UpdateVariableValue(UpdateVariableValue),
UpdateVariableDescription(UpdateVariableDescription),
UpdateVariableIsSecret(UpdateVariableIsSecret),
DeleteVariable(DeleteVariable),
// ==== PROVIDERS ====
CreateGitProviderAccount(CreateGitProviderAccount),
UpdateGitProviderAccount(UpdateGitProviderAccount),
DeleteGitProviderAccount(DeleteGitProviderAccount),
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
}
pub fn router() -> Router {
@@ -141,16 +182,17 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "WriteRequest", skip(user))]
#[instrument(
name = "WriteRequest",
skip(user, request),
fields(user_id = user.id, request = format!("{:?}", request.extract_variant()))
)]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> anyhow::Result<String> {
info!(
"/write request {req_id} | user: {} ({})",
user.username, user.id
);
info!("/write request | user: {}", user.username);
let timer = Instant::now();
@@ -170,7 +212,7 @@ async fn task(
}
let elapsed = timer.elapsed();
info!("/write request {req_id} | resolve time: {elapsed:?}");
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -1,16 +1,17 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
UpdatePermissionOnResourceType,
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
UpdatePermissionOnTargetResponse, UpdateUserBasePermissions,
UpdateUserBasePermissionsResponse,
},
entities::{
permission::{UserTarget, UserTargetVariant},
update::{ResourceTarget, ResourceTargetVariant},
user::User,
ResourceTarget, ResourceTargetVariant,
},
};
use mungos::{
@@ -27,7 +28,6 @@ use crate::{
state::{db_client, State},
};
#[async_trait]
impl Resolve<UpdateUserBasePermissions, User> for State {
#[instrument(name = "UpdateUserBasePermissions", skip(self, admin))]
async fn resolve(
@@ -43,6 +43,7 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query mongo for user")?
@@ -75,7 +76,73 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
}
}
#[async_trait]
impl Resolve<UpdatePermissionOnResourceType, User> for State {
#[instrument(
name = "UpdatePermissionOnResourceType",
skip(self, admin)
)]
async fn resolve(
&self,
UpdatePermissionOnResourceType {
user_target,
resource_type,
permission,
}: UpdatePermissionOnResourceType,
admin: User,
) -> anyhow::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
if !user.enabled {
return Err(anyhow!("user not enabled"));
}
}
let (user_target_variant, user_target_id) =
extract_user_target_with_validation(&user_target).await?;
let id = ObjectId::from_str(&user_target_id)
.context("id is not ObjectId")?;
let field = format!("all.{resource_type}");
let filter = doc! { "_id": id };
let update = doc! { "$set": { &field: permission.as_ref() } };
match user_target_variant {
UserTargetVariant::User => {
db_client()
.await
.users
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
UserTargetVariant::UserGroup => {
db_client()
.await
.user_groups
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {permission} on db")
})?;
}
}
Ok(UpdatePermissionOnResourceTypeResponse {})
}
}
impl Resolve<UpdatePermissionOnTarget, User> for State {
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
async fn resolve(
@@ -132,8 +199,8 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.with_options(UpdateOptions::builder().upsert(true).build())
.await?;
Ok(UpdatePermissionOnTargetResponse {})
@@ -153,7 +220,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for users")?
.context("no matching user found")?
@@ -168,7 +235,7 @@ async fn extract_user_target_with_validation(
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
@@ -195,7 +262,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builds
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
@@ -210,7 +277,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.builders
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
@@ -225,7 +292,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.deployments
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
@@ -240,7 +307,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.servers
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
@@ -255,7 +322,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.repos
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
@@ -270,7 +337,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.alerters
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
@@ -285,7 +352,7 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.procedures
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
@@ -300,12 +367,42 @@ async fn extract_resource_target_with_validation(
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
.id;
Ok((ResourceTargetVariant::ServerTemplate, id))
}
ResourceTarget::ResourceSync(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.resource_syncs
.find_one(filter)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?
.id;
Ok((ResourceTargetVariant::ResourceSync, id))
}
ResourceTarget::Stack(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.stacks
.find_one(filter)
.await
.context("failed to query db for stacks")?
.context("no matching stack found")?
.id;
Ok((ResourceTargetVariant::Stack, id))
}
}
}

View File

@@ -1,233 +1,24 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
api::{execute::Execution, write::*},
use komodo_client::{
api::write::*,
entities::{
build::Build,
deployment::Deployment,
monitor_timestamp,
permission::PermissionLevel,
procedure::{PartialProcedureConfig, Procedure},
repo::Repo,
server::Server,
update::Log,
user::User,
Operation,
permission::PermissionLevel, procedure::Procedure, user::User,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update, update_update},
},
state::{action_states, db_client, State},
};
use crate::{resource, state::State};
#[async_trait]
impl Resolve<CreateProcedure, User> for State {
#[instrument(name = "CreateProcedure", skip(self, user))]
async fn resolve(
&self,
CreateProcedure { name, mut config }: CreateProcedure,
CreateProcedure { name, config }: CreateProcedure,
user: User,
) -> anyhow::Result<CreateProcedureResponse> {
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
let start_ts = monitor_timestamp();
validate_procedure_config(&mut config, &user, None).await?;
let procedure = Procedure {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
info: Default::default(),
config: config.into(),
};
let procedure_id = db_client()
.await
.procedures
.insert_one(procedure, None)
.await
.context("failed to add procedure to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let procedure = Procedure::get_resource(&procedure_id).await?;
create_permission(&user, &procedure, PermissionLevel::Write)
.await;
let mut update =
make_update(&procedure, Operation::CreateProcedure, &user);
update.push_simple_log(
"create procedure",
format!(
"created procedure\nid: {}\nname: {}",
procedure.id, procedure.name
),
);
update
.push_simple_log("config", format!("{:#?}", procedure.config));
update.finalize();
add_update(update).await?;
Ok(procedure)
resource::create::<Procedure>(&name, config, &user).await
}
}
#[instrument]
async fn validate_procedure_config(
config: &mut PartialProcedureConfig,
user: &User,
id: Option<&str>,
) -> anyhow::Result<()> {
let Some(executions) = &mut config.executions else {
return Ok(());
};
for exec in executions {
match &mut exec.execution {
Execution::None(_) => {}
Execution::RunProcedure(params) => {
let procedure = Procedure::get_resource_check_permissions(
&params.procedure,
user,
PermissionLevel::Execute,
)
.await?;
match id {
Some(id) if procedure.id == id => {
return Err(anyhow!(
"Cannot have self-referential procedure"
))
}
_ => {}
}
params.procedure = procedure.id;
}
Execution::RunBuild(params) => {
let build = Build::get_resource_check_permissions(
&params.build,
user,
PermissionLevel::Execute,
)
.await?;
params.build = build.id;
}
Execution::Deploy(params) => {
let deployment = Deployment::get_resource_check_permissions(
&params.deployment,
user,
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
}
Execution::StartContainer(params) => {
let deployment = Deployment::get_resource_check_permissions(
&params.deployment,
user,
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
}
Execution::StopContainer(params) => {
let deployment = Deployment::get_resource_check_permissions(
&params.deployment,
user,
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
}
Execution::StopAllContainers(params) => {
let server = Server::get_resource_check_permissions(
&params.server,
user,
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
}
Execution::RemoveContainer(params) => {
let deployment = Deployment::get_resource_check_permissions(
&params.deployment,
user,
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
}
Execution::CloneRepo(params) => {
let repo = Repo::get_resource_check_permissions(
&params.repo,
user,
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
}
Execution::PullRepo(params) => {
let repo = Repo::get_resource_check_permissions(
&params.repo,
user,
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
}
Execution::PruneDockerNetworks(params) => {
let server = Server::get_resource_check_permissions(
&params.server,
user,
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
}
Execution::PruneDockerImages(params) => {
let server = Server::get_resource_check_permissions(
&params.server,
user,
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
}
Execution::PruneDockerContainers(params) => {
let server = Server::get_resource_check_permissions(
&params.server,
user,
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
}
}
}
Ok(())
}
#[async_trait]
impl Resolve<CopyProcedure, User> for State {
#[instrument(name = "CopyProcedure", skip(self, user))]
async fn resolve(
@@ -235,117 +26,28 @@ impl Resolve<CopyProcedure, User> for State {
CopyProcedure { name, id }: CopyProcedure,
user: User,
) -> anyhow::Result<CopyProcedureResponse> {
let Procedure {
config,
description,
tags,
..
} = Procedure::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let start_ts = monitor_timestamp();
let build = Procedure {
id: Default::default(),
name,
updated_at: start_ts,
description,
tags,
config,
info: Default::default(),
};
let procedure_id = db_client()
.await
.procedures
.insert_one(build, None)
.await
.context("failed to add build to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let procedure = Procedure::get_resource(&procedure_id).await?;
create_permission(&user, &procedure, PermissionLevel::Write)
.await;
let mut update =
make_update(&procedure, Operation::CreateProcedure, &user);
update.push_simple_log(
"create procedure",
format!(
"created procedure\nid: {}\nname: {}",
procedure.id, procedure.name
),
);
update.push_simple_log(
"config",
serde_json::to_string_pretty(&procedure)?,
);
update.finalize();
add_update(update).await?;
Ok(procedure)
let Procedure { config, .. } =
resource::get_check_permissions::<Procedure>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Procedure>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<UpdateProcedure, User> for State {
#[instrument(name = "UpdateProcedure", skip(self, user))]
async fn resolve(
&self,
UpdateProcedure { id, mut config }: UpdateProcedure,
UpdateProcedure { id, config }: UpdateProcedure,
user: User,
) -> anyhow::Result<UpdateProcedureResponse> {
let procedure = Procedure::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
validate_procedure_config(
&mut config,
&user,
Some(&procedure.id),
)
.await?;
update_one_by_id(
&db_client().await.procedures,
&procedure.id,
mungos::update::Update::FlattenSet(
doc! { "config": to_document(&config)? },
),
None,
)
.await
.context("failed to update procedure on database")?;
let mut update =
make_update(&procedure, Operation::UpdateProcedure, &user);
update.push_simple_log(
"procedure update",
serde_json::to_string_pretty(&config)?,
);
update.finalize();
add_update(update).await?;
let procedure = Procedure::get_resource(&procedure.id).await?;
Ok(procedure)
resource::update::<Procedure>(&id, config, &user).await
}
}
#[async_trait]
impl Resolve<DeleteProcedure, User> for State {
#[instrument(name = "DeleteProcedure", skip(self, user))]
async fn resolve(
@@ -353,53 +55,6 @@ impl Resolve<DeleteProcedure, User> for State {
DeleteProcedure { id }: DeleteProcedure,
user: User,
) -> anyhow::Result<DeleteProcedureResponse> {
// needs to pull its id from all container procedures
if action_states()
.procedure
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("procedure busy"));
}
let procedure = Procedure::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&procedure, Operation::DeleteProcedure, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let res =
delete_one_by_id(&db_client().await.procedures, &id, None)
.await
.context("failed to delete build from database");
delete_all_permissions_on_resource(&procedure).await;
let log = match res {
Ok(_) => Log::simple(
"delete procedure",
format!("deleted procedure {}", procedure.name),
),
Err(e) => Log::error(
"delete procedure",
format!("failed to delete procedure\n{e:#?}"),
),
};
update.logs.push(log);
update.finalize();
update_update(update).await?;
remove_from_recently_viewed(&procedure).await?;
Ok(procedure)
resource::delete::<Procedure>(&id, &user).await
}
}

View File

@@ -0,0 +1,401 @@
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{
provider::{DockerRegistryAccount, GitProviderAccount},
user::User,
Operation, ResourceTarget,
},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
mongodb::bson::{doc, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::update::{add_update, make_update},
state::{db_client, State},
};
impl Resolve<CreateGitProviderAccount, User> for State {
async fn resolve(
&self,
CreateGitProviderAccount { account }: CreateGitProviderAccount,
user: User,
) -> anyhow::Result<CreateGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can create git provider accounts"
));
}
let mut account: GitProviderAccount = account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string."));
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string."));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateGitProviderAccount,
&user,
);
account.id = db_client()
.await
.git_accounts
.insert_one(&account)
.await
.context("failed to create git provider account on db")?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create git provider account",
format!(
"Created git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<UpdateGitProviderAccount, User> for State {
async fn resolve(
&self,
UpdateGitProviderAccount { id, mut account }: UpdateGitProviderAccount,
user: User,
) -> anyhow::Result<UpdateGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can update git provider accounts"
));
}
if let Some(domain) = &account.domain {
if domain.is_empty() {
return Err(anyhow!(
"cannot update git provider with empty domain"
));
}
}
if let Some(username) = &account.username {
if username.is_empty() {
return Err(anyhow!(
"cannot update git provider with empty username"
));
}
}
// Ensure update does not change id
account.id = None;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
);
let account = to_document(&account).context(
"failed to serialize partial git provider account to bson",
)?;
let db = db_client().await;
update_one_by_id(
&db.git_accounts,
&id,
doc! { "$set": account },
None,
)
.await
.context("failed to update git provider account on db")?;
let Some(account) =
find_one_by_id(&db.git_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
update.push_simple_log(
"update git provider account",
format!(
"Updated git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<DeleteGitProviderAccount, User> for State {
async fn resolve(
&self,
DeleteGitProviderAccount { id }: DeleteGitProviderAccount,
user: User,
) -> anyhow::Result<DeleteGitProviderAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can delete git provider accounts"
));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
);
let db = db_client().await;
let Some(account) =
find_one_by_id(&db.git_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
delete_one_by_id(&db.git_accounts, &id, None)
.await
.context("failed to delete git account on db")?;
update.push_simple_log(
"delete git provider account",
format!(
"Deleted git provider account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete git provider account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<CreateDockerRegistryAccount, User> for State {
async fn resolve(
&self,
CreateDockerRegistryAccount { account }: CreateDockerRegistryAccount,
user: User,
) -> anyhow::Result<CreateDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can create docker registry account accounts"
));
}
let mut account: DockerRegistryAccount = account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string."));
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string."));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateDockerRegistryAccount,
&user,
);
account.id = db_client()
.await
.registry_accounts
.insert_one(&account)
.await
.context(
"failed to create docker registry account account on db",
)?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create docker registry account",
format!(
"Created docker registry account account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<UpdateDockerRegistryAccount, User> for State {
async fn resolve(
&self,
UpdateDockerRegistryAccount { id, mut account }: UpdateDockerRegistryAccount,
user: User,
) -> anyhow::Result<UpdateDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can update docker registry accounts"
));
}
if let Some(domain) = &account.domain {
if domain.is_empty() {
return Err(anyhow!(
"cannot update docker registry account with empty domain"
));
}
}
if let Some(username) = &account.username {
if username.is_empty() {
return Err(anyhow!(
"cannot update docker registry account with empty username"
));
}
}
account.id = None;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
);
let account = to_document(&account).context(
"failed to serialize partial docker registry account account to bson",
)?;
let db = db_client().await;
update_one_by_id(
&db.registry_accounts,
&id,
doc! { "$set": account },
None,
)
.await
.context(
"failed to update docker registry account account on db",
)?;
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
.await
.context("failed to query db for registry accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
update.push_simple_log(
"update docker registry account",
format!(
"Updated docker registry account account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}
impl Resolve<DeleteDockerRegistryAccount, User> for State {
async fn resolve(
&self,
DeleteDockerRegistryAccount { id }: DeleteDockerRegistryAccount,
user: User,
) -> anyhow::Result<DeleteDockerRegistryAccountResponse> {
if !user.admin {
return Err(anyhow!(
"only admins can delete docker registry accounts"
));
}
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
);
let db = db_client().await;
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
.await
.context("failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id"));
};
delete_one_by_id(&db.registry_accounts, &id, None)
.await
.context("failed to delete registry account on db")?;
update.push_simple_log(
"delete registry account",
format!(
"Deleted registry account for {} with username {}",
account.domain, account.username
),
);
update.finalize();
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete docker registry account | {e:#}")
})
.ok();
Ok(account)
}
}

View File

@@ -1,136 +1,39 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
api::{execute, write::*},
use mongo_indexed::doc;
use komodo_client::{
api::write::*,
entities::{
monitor_timestamp,
config::core::CoreConfig,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo},
server::Server,
to_monitor_name,
update::{Log, ResourceTarget, Update},
repo::{PartialRepoConfig, Repo, RepoInfo},
user::User,
Operation,
CloneArgs, NoData,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_bson},
use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
create_permission, periphery_client, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update, update_update},
},
state::{action_states, db_client, State},
config::core_config,
helpers::{git_token, random_string},
resource,
state::{db_client, github_client, State},
};
#[instrument(skip(user))]
async fn validate_config(
config: &mut PartialRepoConfig,
user: &User,
) -> anyhow::Result<()> {
match &config.server_id {
Some(server_id) if !server_id.is_empty() => {
let server = Server::get_resource_check_permissions(
server_id,
user,
PermissionLevel::Write,
)
.await
.context("cannot create repo on this server. user must have update permissions on the server.")?;
config.server_id = Some(server.id);
}
_ => {}
}
Ok(())
}
#[async_trait]
impl Resolve<CreateRepo, User> for State {
#[instrument(name = "CreateRepo", skip(self, user))]
async fn resolve(
&self,
CreateRepo { name, mut config }: CreateRepo,
CreateRepo { name, config }: CreateRepo,
user: User,
) -> anyhow::Result<Repo> {
let name = to_monitor_name(&name);
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
validate_config(&mut config, &user).await?;
let start_ts = monitor_timestamp();
let repo = Repo {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: Default::default(),
};
let repo_id = db_client()
.await
.repos
.insert_one(repo, None)
.await
.context("failed to add repo to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let repo = Repo::get_resource(&repo_id).await?;
create_permission(&user, &repo, PermissionLevel::Write).await;
let update = Update {
target: ResourceTarget::Repo(repo_id),
operation: Operation::CreateRepo,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
logs: vec![
Log::simple(
"create repo",
format!(
"created repo\nid: {}\nname: {}",
repo.id, repo.name
),
),
Log::simple("config", format!("{:#?}", repo.config)),
],
..Default::default()
};
add_update(update).await?;
if !repo.config.repo.is_empty()
&& !repo.config.server_id.is_empty()
{
let _ = self
.resolve(
execute::CloneRepo {
repo: repo.id.clone(),
},
user,
)
.await;
}
Ok(repo)
resource::create::<Repo>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyRepo, User> for State {
#[instrument(name = "CopyRepo", skip(self, user))]
async fn resolve(
@@ -138,63 +41,17 @@ impl Resolve<CopyRepo, User> for State {
CopyRepo { name, id }: CopyRepo,
user: User,
) -> anyhow::Result<Repo> {
let Repo {
config,
description,
tags,
..
} = Repo::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
if !config.server_id.is_empty() {
Server::get_resource_check_permissions(
&config.server_id,
&user,
PermissionLevel::Write,
)
.await
.context("cannot create repo on this server. user must have update permissions on the server.")?;
}
let start_ts = monitor_timestamp();
let repo = Repo {
id: Default::default(),
name,
updated_at: start_ts,
description,
tags,
config,
info: Default::default(),
};
let repo_id = db_client()
.await
.repos
.insert_one(repo, None)
.await
.context("failed to add repo to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let repo = Repo::get_resource(&repo_id).await?;
create_permission(&user, &repo, PermissionLevel::Write).await;
let mut update = make_update(&repo, Operation::CreateRepo, &user);
update.push_simple_log(
"create repo",
format!("created repo\nid: {}\nname: {}", repo.id, repo.name),
);
update.push_simple_log("config", format!("{:#?}", repo.config));
update.finalize();
add_update(update).await?;
Ok(repo)
let Repo { config, .. } =
resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Repo>(&name, config.into(), &user).await
}
}
#[async_trait]
impl Resolve<DeleteRepo, User> for State {
#[instrument(name = "DeleteRepo", skip(self, user))]
async fn resolve(
@@ -202,157 +59,321 @@ impl Resolve<DeleteRepo, User> for State {
DeleteRepo { id }: DeleteRepo,
user: User,
) -> anyhow::Result<Repo> {
let repo = Repo::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.deleting = true)?;
let periphery = if repo.config.server_id.is_empty() {
None
} else {
let server =
Server::get_resource(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
Some(periphery)
};
let mut update = make_update(&repo, Operation::DeleteRepo, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let res =
delete_one_by_id(&db_client().await.repos, &repo.id, None)
.await
.context("failed to delete repo from database");
delete_all_permissions_on_resource(&repo).await;
let log = match res {
Ok(_) => Log::simple(
"delete repo",
format!("deleted repo {}", repo.name),
),
Err(e) => Log::error(
"delete repo",
format!("failed to delete repo\n{e:#?}"),
),
};
update.logs.push(log);
if let Some(periphery) = periphery {
match periphery
.request(api::git::DeleteRepo {
name: repo.name.clone(),
})
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.logs.push(Log::error(
"delete repo on periphery",
serialize_error_pretty(&e),
)),
}
}
update.finalize();
update_update(update).await?;
remove_from_recently_viewed(&repo).await?;
Ok(repo)
resource::delete::<Repo>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateRepo, User> for State {
#[instrument(name = "UpdateRepo", skip(self, user))]
async fn resolve(
&self,
UpdateRepo { id, mut config }: UpdateRepo,
UpdateRepo { id, config }: UpdateRepo,
user: User,
) -> anyhow::Result<Repo> {
validate_config(&mut config, &user).await?;
resource::update::<Repo>(&id, config, &user).await
}
}
let repo = Repo::get_resource_check_permissions(
&id,
impl Resolve<RefreshRepoCache, User> for State {
#[instrument(
name = "RefreshRepoCache",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
RefreshRepoCache { repo }: RefreshRepoCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// repo should be able to do this.
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
if repo.config.repo.is_empty() {
// Nothing to do
return Ok(NoData {});
}
let config = core_config();
let repo_dir = config.repo_directory.join(random_string(10));
let mut clone_args: CloneArgs = (&repo).into();
// No reason to to the commands here.
clone_args.on_clone = None;
clone_args.on_pull = None;
clone_args.destination = Some(repo_dir.display().to_string());
let access_token = match (&clone_args.account, &clone_args.provider)
{
(None, _) => None,
(Some(_), None) => {
return Err(anyhow!(
"Account is configured, but provider is empty"
))
}
(Some(username), Some(provider)) => {
git_token(provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {provider} | {username}"),
)?
}
};
let (_, latest_hash, latest_message, _) = git::clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.context("failed to clone repo (the resource) repo")?;
let info = RepoInfo {
last_pulled_at: repo.info.last_pulled_at,
last_built_at: repo.info.last_built_at,
built_hash: repo.info.built_hash,
built_message: repo.info.built_message,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize repo info to bson")?;
db_client()
.await
.repos
.update_one(
doc! { "name": &repo.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update repo info on db")?;
if repo_dir.exists() {
if let Err(e) = std::fs::remove_dir_all(&repo_dir) {
warn!(
"failed to remove repo (resource) cache update repo directory | {e:?}"
)
}
}
Ok(NoData {})
}
}
impl Resolve<CreateRepoWebhook, User> for State {
#[instrument(name = "CreateRepoWebhook", skip(self, user))]
async fn resolve(
&self,
CreateRepoWebhook { repo, action }: CreateRepoWebhook,
user: User,
) -> anyhow::Result<CreateRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.updating = true)?;
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
update_one_by_id(
&db_client().await.repos,
&repo.id,
mungos::update::Update::FlattenSet(
doc! { "config": to_bson(&config)? },
),
None,
)
.await
.context("failed to update repo on database")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let mut update = make_update(&repo, Operation::UpdateRepo, &user);
update.in_progress();
update.push_simple_log(
"repo update",
serde_json::to_string_pretty(&config).unwrap(),
);
update.id = add_update(update.clone()).await?;
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
if let Some(new_server_id) = config.server_id {
if !repo.config.server_id.is_empty()
&& new_server_id != repo.config.server_id
{
let old_server: anyhow::Result<Server> =
Server::get_resource(&repo.config.server_id).await;
let periphery =
old_server.and_then(|server| periphery_client(&server));
match periphery {
Ok(periphery) => match periphery
.request(api::git::DeleteRepo { name: repo.name })
.await
{
Ok(mut log) => {
log.stage = String::from("cleanup previous server");
update.logs.push(log);
}
Err(e) => update.push_error_log(
"cleanup previous server",
format!("failed to cleanup previous server | {e:#?}"),
),
},
Err(e) => update.push_error_log(
"cleanup previous server",
format!("failed to cleanup previous server | {e:#?}"),
),
}
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if repo.config.webhook_secret.is_empty() {
webhook_secret
} else {
&repo.config.webhook_secret
};
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
update.finalize();
update_update(update).await?;
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo_name, &request)
.await
.context("failed to create webhook")?;
Repo::get_resource(&repo.id).await
if !repo.config.webhook_enabled {
self
.resolve(
UpdateRepo {
id: repo.id,
config: PartialRepoConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update repo to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteRepoWebhook, User> for State {
#[instrument(name = "DeleteRepoWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteRepoWebhook { repo, action }: DeleteRepoWebhook,
user: User,
) -> anyhow::Result<DeleteRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if repo.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo_name, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,38 +1,29 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use async_trait::async_trait;
use monitor_client::{
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
server::Server,
update::{Log, ResourceTarget, Update, UpdateStatus},
update::{Update, UpdateStatus},
user::User,
Operation,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_bson},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
create_permission, periphery_client, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
periphery_client,
update::{add_update, make_update, update_update},
},
monitor::update_cache_for_server,
state::{action_states, db_client, server_status_cache, State},
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<CreateServer, User> for State {
#[instrument(name = "CreateServer", skip(self, user))]
async fn resolve(
@@ -40,65 +31,10 @@ impl Resolve<CreateServer, User> for State {
CreateServer { name, config }: CreateServer,
user: User,
) -> anyhow::Result<Server> {
if !user.admin && !user.create_server_permissions {
return Err(anyhow!(
"user does not have create server permissions"
));
}
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
let start_ts = monitor_timestamp();
let server = Server {
id: Default::default(),
name,
updated_at: start_ts,
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: (),
};
let server_id = db_client()
.await
.servers
.insert_one(&server, None)
.await
.context("failed to add server to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let server = Server::get_resource(&server_id).await?;
create_permission(&user, &server, PermissionLevel::Write).await;
let update = Update {
target: ResourceTarget::Server(server_id),
operation: Operation::CreateServer,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
logs: vec![
Log::simple(
"create server",
format!(
"created server\nid: {}\nname: {}",
server.id, server.name
),
),
Log::simple("config", format!("{:#?}", server.config)),
],
..Default::default()
};
add_update(update).await?;
update_cache_for_server(&server).await;
Ok(server)
resource::create::<Server>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<DeleteServer, User> for State {
#[instrument(name = "DeleteServer", skip(self, user))]
async fn resolve(
@@ -106,95 +42,10 @@ impl Resolve<DeleteServer, User> for State {
DeleteServer { id }: DeleteServer,
user: User,
) -> anyhow::Result<Server> {
if action_states()
.server
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("server busy"));
}
let server = Server::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
db_client()
.await
.builders
.update_many(
doc! { "config.params.server_id": &id },
doc! { "$set": { "config.params.server_id": "" } },
None,
)
.await
.context("failed to detach server from builders")?;
db_client()
.await
.deployments
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from deployments")?;
db_client()
.await
.repos
.update_many(
doc! { "config.server_id": &id },
doc! { "$set": { "config.server_id": "" } },
None,
)
.await
.context("failed to detach server from repos")?;
db_client()
.await
.alerts
.update_many(
doc! { "target.type": "Server", "target.id": &id },
doc! { "$set": {
"resolved": true,
"resolved_ts": unix_timestamp_ms() as i64
} },
None,
)
.await
.context("failed to detach server from repos")?;
delete_one_by_id(&db_client().await.servers, &id, None)
.await
.context("failed to delete server from mongo")?;
delete_all_permissions_on_resource(&server).await;
let mut update =
make_update(&server, Operation::DeleteServer, &user);
update.push_simple_log(
"delete server",
format!("deleted server {}", server.name),
);
update.finalize();
add_update(update).await?;
server_status_cache().remove(&id).await;
remove_from_recently_viewed(&server).await?;
Ok(server)
resource::delete::<Server>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateServer, User> for State {
#[instrument(name = "UpdateServer", skip(self, user))]
async fn resolve(
@@ -202,54 +53,10 @@ impl Resolve<UpdateServer, User> for State {
UpdateServer { id, config }: UpdateServer,
user: User,
) -> anyhow::Result<Server> {
if action_states()
.server
.get(&id)
.await
.unwrap_or_default()
.busy()?
{
return Err(anyhow!("server busy"));
}
let server = Server::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&server, Operation::UpdateServer, &user);
update_one_by_id(
&db_client().await.servers,
&id,
mungos::update::Update::FlattenSet(
doc! { "config": to_bson(&config)? },
),
None,
)
.await
.context("failed to update server on mongo")?;
update.push_simple_log(
"server update",
serde_json::to_string_pretty(&config)?,
);
let new_server = Server::get_resource(&id).await?;
update_cache_for_server(&new_server).await;
update.finalize();
add_update(update).await?;
Ok(new_server)
resource::update::<Server>(&id, config, &user).await
}
}
#[async_trait]
impl Resolve<RenameServer, User> for State {
#[instrument(name = "RenameServer", skip(self, user))]
async fn resolve(
@@ -257,7 +64,7 @@ impl Resolve<RenameServer, User> for State {
RenameServer { id, name }: RenameServer,
user: User,
) -> anyhow::Result<Update> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Write,
@@ -266,7 +73,7 @@ impl Resolve<RenameServer, User> for State {
let mut update =
make_update(&server, Operation::RenameServer, &user);
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None)
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None)
.await
.context("failed to update server on db. this name may already be taken.")?;
update.push_simple_log(
@@ -279,7 +86,6 @@ impl Resolve<RenameServer, User> for State {
}
}
#[async_trait]
impl Resolve<CreateNetwork, User> for State {
#[instrument(name = "CreateNetwork", skip(self, user))]
async fn resolve(
@@ -287,7 +93,7 @@ impl Resolve<CreateNetwork, User> for State {
CreateNetwork { server, name }: CreateNetwork,
user: User,
) -> anyhow::Result<Update> {
let server = Server::get_resource_check_permissions(
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Write,
@@ -306,46 +112,10 @@ impl Resolve<CreateNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("create network", serialize_error_pretty(&e)),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
#[async_trait]
impl Resolve<DeleteNetwork, User> for State {
#[instrument(name = "DeleteNetwork", skip(self, user))]
async fn resolve(
&self,
DeleteNetwork { server, name }: DeleteNetwork,
user: User,
) -> anyhow::Result<Update> {
let server = Server::get_resource_check_permissions(
&server,
&user,
PermissionLevel::Write,
)
.await?;
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::DeleteNetwork, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::DeleteNetwork { name })
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("delete network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
),
};
update.finalize();

View File

@@ -1,249 +1,65 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use axum::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
UpdateServerTemplate,
},
entities::{
monitor_timestamp, permission::PermissionLevel,
server_template::ServerTemplate, user::User, Operation,
permission::PermissionLevel, server_template::ServerTemplate,
user::User,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{
create_permission, remove_from_recently_viewed,
resource::{delete_all_permissions_on_resource, StateResource},
update::{add_update, make_update},
},
state::{db_client, State},
};
use crate::{resource, state::State};
#[async_trait]
impl Resolve<CreateServerTemplate, User> for State {
#[instrument(name = "CreateServerTemplate", skip(self, user))]
async fn resolve(
&self,
CreateServerTemplate { name, config }: CreateServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
if !user.admin {
return Err(anyhow!("only admins can create server templates"));
}
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names"));
}
let server_template = ServerTemplate {
id: Default::default(),
name,
updated_at: monitor_timestamp(),
description: Default::default(),
tags: Default::default(),
config: config.into(),
info: (),
};
let server_template_id = db_client()
.await
.server_templates
.insert_one(server_template, None)
.await
.context("failed to add server_template to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let server_template =
ServerTemplate::get_resource(&server_template_id).await?;
create_permission(
&user,
&server_template,
PermissionLevel::Write,
)
.await;
let mut update = make_update(
&server_template,
Operation::CreateServerTemplate,
&user,
);
update.push_simple_log(
"create server template",
format!(
"created server template\nid: {}\nname: {}",
server_template.id, server_template.name
),
);
update.push_simple_log(
"config",
format!("{:#?}", server_template.config),
);
update.finalize();
add_update(update).await?;
Ok(server_template)
resource::create::<ServerTemplate>(&name, config, &user).await
}
}
#[async_trait]
impl Resolve<CopyServerTemplate, User> for State {
#[instrument(name = "CopyServerTemplate", skip(self, user))]
async fn resolve(
&self,
CopyServerTemplate { name, id }: CopyServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
let ServerTemplate {
config,
description,
..
} = ServerTemplate::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let server_template = ServerTemplate {
id: Default::default(),
name,
updated_at: monitor_timestamp(),
description,
tags: Default::default(),
config,
info: (),
};
let server_template_id = db_client()
let ServerTemplate { config, .. } =
resource::get_check_permissions::<ServerTemplate>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<ServerTemplate>(&name, config.into(), &user)
.await
.server_templates
.insert_one(server_template, None)
.await
.context("failed to add server_template to db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let server_template =
ServerTemplate::get_resource(&server_template_id).await?;
create_permission(
&user,
&server_template,
PermissionLevel::Write,
)
.await;
let mut update = make_update(
&server_template,
Operation::CreateServerTemplate,
&user,
);
update.push_simple_log(
"create server template",
format!(
"created server template\nid: {}\nname: {}",
server_template.id, server_template.name
),
);
update.push_simple_log(
"config",
format!("{:#?}", server_template.config),
);
update.finalize();
add_update(update).await?;
Ok(server_template)
}
}
#[async_trait]
impl Resolve<DeleteServerTemplate, User> for State {
#[instrument(name = "DeleteServerTemplate", skip(self, user))]
async fn resolve(
&self,
DeleteServerTemplate { id }: DeleteServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
let server_template =
ServerTemplate::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
delete_one_by_id(&db_client().await.server_templates, &id, None)
.await
.context("failed to delete server templates from database")?;
delete_all_permissions_on_resource(&server_template).await;
let mut update = make_update(
&server_template,
Operation::DeleteServerTemplate,
&user,
);
update.push_simple_log(
"delete server template",
format!("deleted server template {}", server_template.name),
);
update.finalize();
add_update(update).await?;
remove_from_recently_viewed(&server_template).await?;
Ok(server_template)
resource::delete::<ServerTemplate>(&id, &user).await
}
}
#[async_trait]
impl Resolve<UpdateServerTemplate, User> for State {
#[instrument(name = "UpdateServerTemplate", skip(self, user))]
async fn resolve(
&self,
UpdateServerTemplate { id, config }: UpdateServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
let server_template =
ServerTemplate::get_resource_check_permissions(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update = make_update(
&server_template,
Operation::UpdateServerTemplate,
&user,
);
update.push_simple_log(
"server template update",
serde_json::to_string_pretty(&config)
.context("failed to serialize config update")?,
);
let config = server_template.config.merge_partial(config);
let config = to_document(&config)
.context("failed to serialize update to bson document")?;
update_one_by_id(
&db_client().await.server_templates,
&id,
mungos::update::Update::FlattenSet(doc! { "config": config }),
None,
)
.await?;
let server_template = ServerTemplate::get_resource(&id).await?;
update.finalize();
add_update(update).await?;
Ok(server_template)
resource::update::<ServerTemplate>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,172 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use komodo_client::{
api::{
user::CreateApiKey,
write::{
CreateApiKeyForServiceUser, CreateApiKeyForServiceUserResponse,
CreateServiceUser, CreateServiceUserResponse,
DeleteApiKeyForServiceUser, DeleteApiKeyForServiceUserResponse,
UpdateServiceUserDescription,
UpdateServiceUserDescriptionResponse,
},
},
entities::{
komodo_timestamp,
user::{User, UserConfig},
},
};
use mungos::{
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<CreateServiceUser, User> for State {
#[instrument(name = "CreateServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateServiceUser {
username,
description,
}: CreateServiceUser,
user: User,
) -> anyhow::Result<CreateServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
if ObjectId::from_str(&username).is_ok() {
return Err(anyhow!("username cannot be valid ObjectId"));
}
let config = UserConfig::Service { description };
let mut user = User {
id: Default::default(),
username,
config,
enabled: true,
admin: false,
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
updated_at: komodo_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user)
.await
.context("failed to create service user on db")?
.inserted_id
.as_object_id()
.context("inserted id is not object id")?
.to_string();
Ok(user)
}
}
impl Resolve<UpdateServiceUserDescription, User> for State {
#[instrument(
name = "UpdateServiceUserDescription",
skip(self, user)
)]
async fn resolve(
&self,
UpdateServiceUserDescription {
username,
description,
}: UpdateServiceUserDescription,
user: User,
) -> anyhow::Result<UpdateServiceUserDescriptionResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("no user with given username")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.users
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username })
.await
.context("failed to query db for user")?
.context("user with username not found")
}
}
impl Resolve<CreateApiKeyForServiceUser, User> for State {
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateApiKeyForServiceUser {
user_id,
name,
expires,
}: CreateApiKeyForServiceUser,
user: User,
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let service_user =
find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
self
.resolve(CreateApiKey { name, expires }, service_user)
.await
}
}
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
user: User,
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().await.users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key })
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})
}
}

View File

@@ -0,0 +1,511 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
server::ServerState,
stack::{ComposeContents, PartialStackConfig, Stack, StackInfo},
update::Update,
user::User,
NoData, Operation,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api::compose::{
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
periphery_client,
query::get_server_with_state,
stack::{
remote::get_remote_compose_contents,
services::extract_services_into_res,
},
update::{add_update, make_update},
},
resource,
state::{db_client, github_client, State},
};
impl Resolve<CreateStack, User> for State {
#[instrument(name = "CreateStack", skip(self, user))]
async fn resolve(
&self,
CreateStack { name, config }: CreateStack,
user: User,
) -> anyhow::Result<Stack> {
resource::create::<Stack>(&name, config, &user).await
}
}
impl Resolve<CopyStack, User> for State {
#[instrument(name = "CopyStack", skip(self, user))]
async fn resolve(
&self,
CopyStack { name, id }: CopyStack,
user: User,
) -> anyhow::Result<Stack> {
let Stack { config, .. } =
resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Stack>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteStack, User> for State {
#[instrument(name = "DeleteStack", skip(self, user))]
async fn resolve(
&self,
DeleteStack { id }: DeleteStack,
user: User,
) -> anyhow::Result<Stack> {
resource::delete::<Stack>(&id, &user).await
}
}
impl Resolve<UpdateStack, User> for State {
#[instrument(name = "UpdateStack", skip(self, user))]
async fn resolve(
&self,
UpdateStack { id, config }: UpdateStack,
user: User,
) -> anyhow::Result<Stack> {
resource::update::<Stack>(&id, config, &user).await
}
}
impl Resolve<RenameStack, User> for State {
#[instrument(name = "RenameStack", skip(self, user))]
async fn resolve(
&self,
RenameStack { id, name }: RenameStack,
user: User,
) -> anyhow::Result<Update> {
let stack = resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&stack, Operation::RenameStack, &user);
update_one_by_id(
&db_client().await.stacks,
&stack.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
.await
.context("failed to update stack name on db")?;
update.push_simple_log(
"rename stack",
format!("renamed stack from {} to {}", stack.name, name),
);
update.finalize();
add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RefreshStackCache, User> for State {
#[instrument(
name = "RefreshStackCache",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
RefreshStackCache { stack }: RefreshStackCache,
user: User,
) -> anyhow::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// stack should be able to do this.
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Execute,
)
.await?;
let file_contents_empty = stack.config.file_contents.is_empty();
if !stack.config.files_on_host
&& file_contents_empty
&& stack.config.repo.is_empty()
{
// Nothing to do without one of these
return Ok(NoData {});
}
let mut missing_files = Vec::new();
let (
latest_services,
remote_contents,
remote_errors,
latest_hash,
latest_message,
) = if stack.config.files_on_host {
// =============
// FILES ON HOST
// =============
if stack.config.server_id.is_empty() {
(vec![], None, None, None, None)
} else {
let (server, status) =
get_server_with_state(&stack.config.server_id).await?;
if status != ServerState::Ok {
(vec![], None, None, None, None)
} else {
let GetComposeContentsOnHostResponse { contents, errors } =
match periphery_client(&server)?
.request(GetComposeContentsOnHost {
file_paths: stack.file_paths().to_vec(),
name: stack.name.clone(),
run_directory: stack.config.run_directory.clone(),
})
.await
.context(
"failed to get compose file contents from host",
) {
Ok(res) => res,
Err(e) => GetComposeContentsOnHostResponse {
contents: Default::default(),
errors: vec![ComposeContents {
path: stack.config.run_directory.clone(),
contents: format_serror(&e.into()),
}],
},
};
let project_name = stack.project_name(true);
let mut services = Vec::new();
for contents in &contents {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
}
}
(services, Some(contents), Some(errors), None, None)
}
}
} else if file_contents_empty {
// ================
// REPO BASED STACK
// ================
let (
remote_contents,
remote_errors,
_,
latest_hash,
latest_message,
) =
get_remote_compose_contents(&stack, Some(&mut missing_files))
.await
.context("failed to clone remote compose file")?;
let project_name = stack.project_name(true);
let mut services = Vec::new();
for contents in &remote_contents {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
}
}
(
services,
Some(remote_contents),
Some(remote_errors),
latest_hash,
latest_message,
)
} else {
// =============
// UI BASED FILE
// =============
let mut services = Vec::new();
if let Err(e) = extract_services_into_res(
// this should latest (not deployed), so make the project name fresh.
&stack.project_name(true),
&stack.config.file_contents,
&mut services,
) {
warn!(
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
stack.name
);
services.extend(stack.info.latest_services);
};
(services, None, None, None, None)
};
let info = StackInfo {
missing_files,
deployed_services: stack.info.deployed_services,
deployed_project_name: stack.info.deployed_project_name,
deployed_contents: stack.info.deployed_contents,
deployed_hash: stack.info.deployed_hash,
deployed_message: stack.info.deployed_message,
latest_services,
remote_contents,
remote_errors,
latest_hash,
latest_message,
};
let info = to_document(&info)
.context("failed to serialize stack info to bson")?;
db_client()
.await
.stacks
.update_one(
doc! { "name": &stack.name },
doc! { "$set": { "info": info } },
)
.await
.context("failed to update stack info on db")?;
Ok(NoData {})
}
}
impl Resolve<CreateStackWebhook, User> for State {
#[instrument(name = "CreateStackWebhook", skip(self, user))]
async fn resolve(
&self,
CreateStackWebhook { stack, action }: CreateStackWebhook,
user: User,
) -> anyhow::Result<CreateStackWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Write,
)
.await?;
if stack.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Stack repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if stack.config.webhook_secret.is_empty() {
webhook_secret
} else {
&stack.config.webhook_secret
};
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !stack.config.webhook_enabled {
self
.resolve(
UpdateStack {
id: stack.id,
config: PartialStackConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update stack to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteStackWebhook, User> for State {
#[instrument(name = "DeleteStackWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteStackWebhook { stack, action }: DeleteStackWebhook,
user: User,
) -> anyhow::Result<DeleteStackWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Write,
)
.await?;
if stack.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if stack.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -0,0 +1,570 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
self,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
build::Build,
builder::Builder,
config::core::CoreConfig,
deployment::Deployment,
komodo_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::{
PartialResourceSyncConfig, PendingSyncUpdates,
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
PendingSyncUpdatesDataOk, ResourceSync,
},
ResourceTarget,
user::User,
NoData,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::{
deploy::SyncDeployParams,
resource::{get_updates_for_view, AllResourcesById},
},
},
resource,
state::{db_client, github_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
#[instrument(name = "CreateResourceSync", skip(self, user))]
async fn resolve(
&self,
CreateResourceSync { name, config }: CreateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::create::<ResourceSync>(&name, config, &user).await
}
}
impl Resolve<CopyResourceSync, User> for State {
#[instrument(name = "CopyResourceSync", skip(self, user))]
async fn resolve(
&self,
CopyResourceSync { name, id }: CopyResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
let ResourceSync { config, .. } =
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<ResourceSync>(&name, config.into(), &user)
.await
}
}
impl Resolve<DeleteResourceSync, User> for State {
#[instrument(name = "DeleteResourceSync", skip(self, user))]
async fn resolve(
&self,
DeleteResourceSync { id }: DeleteResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::delete::<ResourceSync>(&id, &user).await
}
}
impl Resolve<UpdateResourceSync, User> for State {
#[instrument(name = "UpdateResourceSync", skip(self, user))]
async fn resolve(
&self,
UpdateResourceSync { id, config }: UpdateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::update::<ResourceSync>(&id, config, &user).await
}
}
impl Resolve<RefreshResourceSyncPending, User> for State {
#[instrument(
name = "RefreshResourceSyncPending",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
user: User,
) -> anyhow::Result<ResourceSync> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// sync should be able to do this.
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
let res = async {
let (res, _, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
let resources = res?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deploy_updates =
crate::helpers::sync::deploy::get_updates_for_view(
SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
},
)
.await;
let data = PendingSyncUpdatesDataOk {
server_updates: get_updates_for_view::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server updates")?,
deployment_updates: get_updates_for_view::<Deployment>(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get deployment updates")?,
stack_updates: get_updates_for_view::<Stack>(
resources.stacks,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get stack updates")?,
build_updates: get_updates_for_view::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get build updates")?,
repo_updates: get_updates_for_view::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get repo updates")?,
procedure_updates: get_updates_for_view::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get procedure updates")?,
alerter_updates: get_updates_for_view::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get alerter updates")?,
builder_updates: get_updates_for_view::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get builder updates")?,
server_template_updates:
get_updates_for_view::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server template updates")?,
resource_sync_updates: get_updates_for_view::<
entities::sync::ResourceSync,
>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get resource sync updates")?,
variable_updates:
crate::helpers::sync::variables::get_updates_for_view(
resources.variables,
sync.config.delete,
)
.await
.context("failed to get variable updates")?,
user_group_updates:
crate::helpers::sync::user_groups::get_updates_for_view(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await
.context("failed to get user group updates")?,
deploy_updates,
};
anyhow::Ok((hash, message, data))
}
.await;
let (pending, has_updates) = match res {
Ok((hash, message, data)) => {
let has_updates = !data.no_updates();
(
PendingSyncUpdates {
hash: Some(hash),
message: Some(message),
data: PendingSyncUpdatesData::Ok(data),
},
has_updates,
)
}
Err(e) => (
PendingSyncUpdates {
hash: None,
message: None,
data: PendingSyncUpdatesData::Err(
PendingSyncUpdatesDataErr {
message: format_serror(&e.into()),
},
),
},
false,
),
};
let pending = to_document(&pending)
.context("failed to serialize pending to document")?;
update_one_by_id(
&db_client().await.resource_syncs,
&sync.id,
doc! { "$set": { "info.pending": pending } },
None,
)
.await?;
// check to update alert
let id = sync.id.clone();
let name = sync.name.clone();
tokio::task::spawn(async move {
let db = db_client().await;
let Some(existing) = db_client()
.await
.alerts
.find_one(doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
})
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
.ok()
else {
return;
};
match (existing, has_updates) {
// OPEN A NEW ALERT
(None, true) => {
let alert = Alert {
id: Default::default(),
ts: komodo_timestamp(),
resolved: false,
level: SeverityLevel::Ok,
target: ResourceTarget::ResourceSync(id.clone()),
data: AlertData::ResourceSyncPendingUpdates { id, name },
resolved_ts: None,
};
db.alerts
.insert_one(&alert)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
update_one_by_id(
&db.alerts,
&existing.id,
doc! {
"$set": {
"resolved": true,
"resolved_ts": komodo_timestamp()
}
},
None,
)
.await
.context("failed to close existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
}
// NOTHING TO DO
_ => {}
}
});
crate::resource::get::<ResourceSync>(&sync.id).await
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(
&self,
CreateSyncWebhook { sync, action }: CreateSyncWebhook,
user: User,
) -> anyhow::Result<CreateSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if sync.config.webhook_secret.is_empty() {
webhook_secret
} else {
&sync.config.webhook_secret
};
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !sync.config.webhook_enabled {
self
.resolve(
UpdateResourceSync {
id: sync.id,
config: PartialResourceSyncConfig {
webhook_enabled: Some(true),
..Default::default()
},
},
user,
)
.await
.context("failed to update sync to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<DeleteSyncWebhook, User> for State {
#[instrument(name = "DeleteSyncWebhook", skip(self, user))]
async fn resolve(
&self,
DeleteSyncWebhook { sync, action }: DeleteSyncWebhook,
user: User,
) -> anyhow::Result<DeleteSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(anyhow!(
"github_webhook_app is not configured in core config toml"
));
};
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if sync.config.git_provider != "github.com" {
return Err(anyhow!(
"Can only manage github.com repo webhooks"
));
}
if sync.config.repo.is_empty() {
return Err(anyhow!(
"No repo configured, can't create webhook"
));
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(anyhow!(
"Cannot manage repo webhooks under owner {owner}"
));
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = webhook_base_url.as_ref().unwrap_or(host);
let url = match action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,8 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
UpdateTagsOnResourceResponse,
@@ -11,8 +10,8 @@ use monitor_client::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, tag::Tag,
update::ResourceTarget, user::User,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, user::User, ResourceTarget,
},
};
use mungos::{
@@ -22,14 +21,11 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
helpers::{
query::{get_tag, get_tag_check_owner},
resource::StateResource,
},
helpers::query::{get_tag, get_tag_check_owner},
resource,
state::{db_client, State},
};
#[async_trait]
impl Resolve<CreateTag, User> for State {
#[instrument(name = "CreateTag", skip(self, user))]
async fn resolve(
@@ -50,7 +46,7 @@ impl Resolve<CreateTag, User> for State {
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.insert_one(&tag)
.await
.context("failed to create tag on db")?
.inserted_id
@@ -62,8 +58,8 @@ impl Resolve<CreateTag, User> for State {
}
}
#[async_trait]
impl Resolve<RenameTag, User> for State {
#[instrument(name = "RenameTag", skip(self, user))]
async fn resolve(
&self,
RenameTag { id, name }: RenameTag,
@@ -88,7 +84,6 @@ impl Resolve<RenameTag, User> for State {
}
}
#[async_trait]
impl Resolve<DeleteTag, User> for State {
#[instrument(name = "DeleteTag", skip(self, user))]
async fn resolve(
@@ -99,13 +94,14 @@ impl Resolve<DeleteTag, User> for State {
let tag = get_tag_check_owner(&id, &user).await?;
tokio::try_join!(
Server::remove_tag_from_resources(&id,),
Deployment::remove_tag_from_resources(&id,),
Build::remove_tag_from_resources(&id,),
Repo::remove_tag_from_resources(&id,),
Builder::remove_tag_from_resources(&id,),
Alerter::remove_tag_from_resources(&id,),
Procedure::remove_tag_from_resources(&id,),
resource::remove_tag_from_all::<Server>(&id),
resource::remove_tag_from_all::<Deployment>(&id),
resource::remove_tag_from_all::<Build>(&id),
resource::remove_tag_from_all::<Repo>(&id),
resource::remove_tag_from_all::<Builder>(&id),
resource::remove_tag_from_all::<Alerter>(&id),
resource::remove_tag_from_all::<Procedure>(&id),
resource::remove_tag_from_all::<ServerTemplate>(&id),
)?;
delete_one_by_id(&db_client().await.tags, &id, None).await?;
@@ -114,7 +110,6 @@ impl Resolve<DeleteTag, User> for State {
}
}
#[async_trait]
impl Resolve<UpdateTagsOnResource, User> for State {
#[instrument(name = "UpdateTagsOnResource", skip(self, user))]
async fn resolve(
@@ -125,78 +120,96 @@ impl Resolve<UpdateTagsOnResource, User> for State {
match target {
ResourceTarget::System(_) => return Err(anyhow!("")),
ResourceTarget::Build(id) => {
Build::get_resource_check_permissions(
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Build::update_tags_on_resource(&id, tags, user).await?;
resource::update_tags::<Build>(&id, tags, user).await?;
}
ResourceTarget::Builder(id) => {
Builder::get_resource_check_permissions(
resource::get_check_permissions::<Builder>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Builder::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Builder>(&id, tags, user).await?
}
ResourceTarget::Deployment(id) => {
Deployment::get_resource_check_permissions(
resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Deployment::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Deployment>(&id, tags, user).await?
}
ResourceTarget::Server(id) => {
Server::get_resource_check_permissions(
resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Server::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Server>(&id, tags, user).await?
}
ResourceTarget::Repo(id) => {
Repo::get_resource_check_permissions(
resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Repo::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Repo>(&id, tags, user).await?
}
ResourceTarget::Alerter(id) => {
Alerter::get_resource_check_permissions(
resource::get_check_permissions::<Alerter>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Alerter::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Alerter>(&id, tags, user).await?
}
ResourceTarget::Procedure(id) => {
Procedure::get_resource_check_permissions(
resource::get_check_permissions::<Procedure>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
Procedure::update_tags_on_resource(&id, tags, user).await?
resource::update_tags::<Procedure>(&id, tags, user).await?
}
ResourceTarget::ServerTemplate(id) => {
ServerTemplate::get_resource_check_permissions(
resource::get_check_permissions::<ServerTemplate>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
ServerTemplate::update_tags_on_resource(&id, tags, user)
resource::update_tags::<ServerTemplate>(&id, tags, user)
.await?
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ResourceSync>(&id, tags, user).await?
}
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Stack>(&id, tags, user).await?
}
};
Ok(UpdateTagsOnResourceResponse {})
}

View File

@@ -1,173 +0,0 @@
use std::{collections::VecDeque, str::FromStr};
use anyhow::{anyhow, Context};
use async_trait::async_trait;
use monitor_client::{
api::write::{
CreateServiceUser, CreateServiceUserResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse, UpdateServiceUserDescription,
UpdateServiceUserDescriptionResponse,
},
entities::{
monitor_timestamp,
user::{User, UserConfig},
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, oid::ObjectId, to_bson},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
const RECENTLY_VIEWED_MAX: usize = 10;
#[async_trait]
impl Resolve<PushRecentlyViewed, User> for State {
#[instrument(name = "PushRecentlyViewed", skip(self, user))]
async fn resolve(
&self,
PushRecentlyViewed { resource }: PushRecentlyViewed,
user: User,
) -> anyhow::Result<PushRecentlyViewedResponse> {
let mut recently_viewed = get_user(&user.id)
.await?
.recently_viewed
.into_iter()
.filter(|r| !resource.eq(r))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recently_viewed.push_front(resource);
let recently_viewed = to_bson(&recently_viewed)
.context("failed to convert recently views to bson")?;
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"recently_viewed": recently_viewed
}),
None,
)
.await
.context("context")?;
Ok(PushRecentlyViewedResponse {})
}
}
#[async_trait]
impl Resolve<SetLastSeenUpdate, User> for State {
#[instrument(name = "SetLastSeenUpdate", skip(self, user))]
async fn resolve(
&self,
SetLastSeenUpdate {}: SetLastSeenUpdate,
user: User,
) -> anyhow::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": monitor_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
#[async_trait]
impl Resolve<CreateServiceUser, User> for State {
#[instrument(name = "CreateServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateServiceUser {
username,
description,
}: CreateServiceUser,
user: User,
) -> anyhow::Result<CreateServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
if ObjectId::from_str(&username).is_ok() {
return Err(anyhow!("username cannot be valid ObjectId"));
}
let config = UserConfig::Service { description };
let mut user = User {
id: Default::default(),
username,
config,
enabled: true,
admin: false,
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recently_viewed: Vec::new(),
updated_at: monitor_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user, None)
.await
.context("failed to create service user on db")?
.inserted_id
.as_object_id()
.context("inserted id is not object id")?
.to_string();
Ok(user)
}
}
#[async_trait]
impl Resolve<UpdateServiceUserDescription, User> for State {
#[instrument(
name = "UpdateServiceUserDescription",
skip(self, user)
)]
async fn resolve(
&self,
UpdateServiceUserDescription {
username,
description,
}: UpdateServiceUserDescription,
user: User,
) -> anyhow::Result<UpdateServiceUserDescriptionResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed to query db for user")?
.context("no user with given username")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.users
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed to query db for user")?
.context("user with username not found")
}
}

View File

@@ -1,13 +1,12 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use axum::async_trait;
use monitor_client::{
use komodo_client::{
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
},
entities::{monitor_timestamp, user::User, user_group::UserGroup},
entities::{komodo_timestamp, user::User, user_group::UserGroup},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
@@ -18,7 +17,6 @@ use resolver_api::Resolve;
use crate::state::{db_client, State};
#[async_trait]
impl Resolve<CreateUserGroup, User> for State {
async fn resolve(
&self,
@@ -31,13 +29,14 @@ impl Resolve<CreateUserGroup, User> for State {
let user_group = UserGroup {
id: Default::default(),
users: Default::default(),
updated_at: monitor_timestamp(),
all: Default::default(),
updated_at: komodo_timestamp(),
name,
};
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.insert_one(user_group)
.await
.context("failed to create UserGroup on db")?
.inserted_id
@@ -51,7 +50,6 @@ impl Resolve<CreateUserGroup, User> for State {
}
}
#[async_trait]
impl Resolve<RenameUserGroup, User> for State {
async fn resolve(
&self,
@@ -77,7 +75,6 @@ impl Resolve<RenameUserGroup, User> for State {
}
}
#[async_trait]
impl Resolve<DeleteUserGroup, User> for State {
async fn resolve(
&self,
@@ -103,7 +100,7 @@ impl Resolve<DeleteUserGroup, User> for State {
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
})
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
@@ -111,7 +108,6 @@ impl Resolve<DeleteUserGroup, User> for State {
}
}
#[async_trait]
impl Resolve<AddUserToUserGroup, User> for State {
async fn resolve(
&self,
@@ -130,7 +126,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -143,19 +139,17 @@ impl Resolve<AddUserToUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}
#[async_trait]
impl Resolve<RemoveUserFromUserGroup, User> for State {
async fn resolve(
&self,
@@ -177,7 +171,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
};
let user = db
.users
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
@@ -190,19 +184,17 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}
#[async_trait]
impl Resolve<SetUsersInUserGroup, User> for State {
async fn resolve(
&self,
@@ -236,15 +228,11 @@ impl Resolve<SetUsersInUserGroup, User> for State {
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.update_one(filter.clone(), doc! { "$set": { "users": users } })
.await
.context("failed to add user to group on db")?;
.context("failed to set users on user group")?;
db.user_groups
.find_one(filter, None)
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")

View File

@@ -0,0 +1,202 @@
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
CreateVariable, CreateVariableResponse, DeleteVariable,
DeleteVariableResponse, UpdateVariableDescription,
UpdateVariableDescriptionResponse, UpdateVariableIsSecret,
UpdateVariableIsSecretResponse, UpdateVariableValue,
UpdateVariableValueResponse,
},
entities::{
user::User, variable::Variable, Operation, ResourceTarget,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::{
query::get_variable,
update::{add_update, make_update},
},
state::{db_client, State},
};
impl Resolve<CreateVariable, User> for State {
#[instrument(name = "CreateVariable", skip(self, user, value))]
async fn resolve(
&self,
CreateVariable {
name,
value,
description,
is_secret,
}: CreateVariable,
user: User,
) -> anyhow::Result<CreateVariableResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
}
let variable = Variable {
name,
value,
description,
is_secret,
};
db_client()
.await
.variables
.insert_one(&variable)
.await
.context("failed to create variable on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateVariable,
&user,
);
update
.push_simple_log("create variable", format!("{variable:#?}"));
update.finalize();
add_update(update).await?;
get_variable(&variable.name).await
}
}
impl Resolve<UpdateVariableValue, User> for State {
#[instrument(name = "UpdateVariableValue", skip(self, user, value))]
async fn resolve(
&self,
UpdateVariableValue { name, value }: UpdateVariableValue,
user: User,
) -> anyhow::Result<UpdateVariableValueResponse> {
if !user.admin {
return Err(anyhow!("only admins can update variables"));
}
let variable = get_variable(&name).await?;
if value == variable.value {
return Err(anyhow!("no change"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
)
.await
.context("failed to update variable value on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateVariableValue,
&user,
);
let log = if variable.is_secret {
format!(
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
variable.value.replace(|_| true, "#")
)
} else {
format!(
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
variable.value
)
};
update.push_simple_log("update variable value", log);
update.finalize();
add_update(update).await?;
get_variable(&name).await
}
}
impl Resolve<UpdateVariableDescription, User> for State {
#[instrument(name = "UpdateVariableDescription", skip(self, user))]
async fn resolve(
&self,
UpdateVariableDescription { name, description }: UpdateVariableDescription,
user: User,
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
if !user.admin {
return Err(anyhow!("only admins can update variables"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
)
.await
.context("failed to update variable description on db")?;
get_variable(&name).await
}
}
impl Resolve<UpdateVariableIsSecret, User> for State {
#[instrument(name = "UpdateVariableIsSecret", skip(self, user))]
async fn resolve(
&self,
UpdateVariableIsSecret { name, is_secret }: UpdateVariableIsSecret,
user: User,
) -> anyhow::Result<UpdateVariableIsSecretResponse> {
if !user.admin {
return Err(anyhow!("only admins can update variables"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "is_secret": is_secret } },
)
.await
.context("failed to update variable is secret on db")?;
get_variable(&name).await
}
}
impl Resolve<DeleteVariable, User> for State {
async fn resolve(
&self,
DeleteVariable { name }: DeleteVariable,
user: User,
) -> anyhow::Result<DeleteVariableResponse> {
if !user.admin {
return Err(anyhow!("only admins can delete variables"));
}
let variable = get_variable(&name).await?;
db_client()
.await
.variables
.delete_one(doc! { "name": &name })
.await
.context("failed to delete variable on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::DeleteVariable,
&user,
);
update
.push_simple_log("delete variable", format!("{variable:#?}"));
update.finalize();
add_update(update).await?;
Ok(variable)
}
}

View File

@@ -1,7 +1,7 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use monitor_client::entities::config::core::{
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
@@ -9,8 +9,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
@@ -216,8 +216,8 @@ impl GithubOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize)]
@@ -225,5 +225,5 @@ pub struct GithubUserResponse {
pub login: String,
pub id: u128,
pub avatar_url: String,
pub email: Option<String>,
// pub email: Option<String>,
}

View File

@@ -2,8 +2,9 @@ use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use monitor_client::entities::{
monitor_timestamp,
use mongo_indexed::Document;
use komodo_client::entities::{
komodo_timestamp,
user::{User, UserConfig},
};
use mungos::mongodb::bson::doc;
@@ -66,7 +67,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.find_one(doc! { "config.data.github_id": &github_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -74,19 +75,20 @@ async fn callback(
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let ts = komodo_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
enabled: no_users_exist,
enabled: no_users_exist || core_config().enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recently_viewed: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Github {
github_id,
avatar: github_user.avatar_url,
@@ -94,7 +96,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -2,15 +2,17 @@ use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use jwt::Token;
use monitor_client::entities::config::core::{CoreConfig, OauthCredentials};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize};
use serde_json::Value;
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
@@ -71,7 +73,7 @@ impl GoogleOauthClient {
client_id: id.clone(),
client_secret: secret.clone(),
redirect_uri: format!("{host}/auth/google/callback"),
user_agent: String::from("monitor"),
user_agent: String::from("komodo"),
states: Default::default(),
scopes,
}
@@ -183,10 +185,10 @@ impl GoogleOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
// pub access_token: String,
pub id_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize, Clone)]

View File

@@ -3,7 +3,8 @@ use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use monitor_client::entities::user::{User, UserConfig};
use mongo_indexed::Document;
use komodo_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
use serde::Deserialize;
@@ -75,7 +76,7 @@ async fn callback(
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.find_one(doc! { "config.data.google_id": &google_id })
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
@@ -85,7 +86,7 @@ async fn callback(
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
@@ -95,13 +96,14 @@ async fn callback(
.first()
.unwrap()
.to_string(),
enabled: no_users_exist,
enabled: no_users_exist || core_config().enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recently_viewed: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Google {
google_id,
avatar: google_user.picture,
@@ -109,7 +111,7 @@ async fn callback(
};
let user_id = db_client
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user on mongo")?
.inserted_id

View File

@@ -6,13 +6,13 @@ use async_timing_util::{
};
use hmac::{Hmac, Mac};
use jwt::SignWithKey;
use monitor_client::entities::config::core::CoreConfig;
use komodo_client::entities::config::core::CoreConfig;
use mungos::mongodb::bson::doc;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
use super::random_string;
use crate::helpers::random_string;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
@@ -25,26 +25,31 @@ pub struct JwtClaims {
pub struct JwtClient {
pub key: Hmac<Sha256>,
valid_for_ms: u128,
ttl_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
impl JwtClient {
pub fn new(config: &CoreConfig) -> JwtClient {
let key = Hmac::new_from_slice(random_string(40).as_bytes())
.expect("failed at taking HmacSha256 of jwt secret");
JwtClient {
pub fn new(config: &CoreConfig) -> anyhow::Result<JwtClient> {
let secret = if config.jwt_secret.is_empty() {
random_string(40)
} else {
config.jwt_secret.clone()
};
let key = Hmac::new_from_slice(secret.as_bytes())
.context("failed at taking HmacSha256 of jwt secret")?;
Ok(JwtClient {
key,
valid_for_ms: get_timelength_in_ms(
config.jwt_valid_for.to_string().parse().unwrap(),
ttl_ms: get_timelength_in_ms(
config.jwt_ttl.to_string().parse()?,
),
exchange_tokens: Default::default(),
}
})
}
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.valid_for_ms;
let exp = iat + self.ttl_ms;
let claims = JwtClaims {
id: user_id,
iat,

View File

@@ -2,9 +2,9 @@ use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use async_trait::async_trait;
use axum::http::HeaderMap;
use monitor_client::{
use mongo_indexed::Document;
use komodo_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
LoginLocalUserResponse,
@@ -22,7 +22,6 @@ use crate::{
const BCRYPT_COST: u32 = 10;
#[async_trait]
impl Resolve<CreateLocalUser, HeaderMap> for State {
#[instrument(name = "CreateLocalUser", skip(self))]
async fn resolve(
@@ -30,7 +29,9 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
CreateLocalUser { username, password }: CreateLocalUser,
_: HeaderMap,
) -> anyhow::Result<CreateLocalUserResponse> {
if !core_config().local_auth {
let core_config = core_config();
if !core_config.local_auth {
return Err(anyhow!("local auth is not enabled"));
}
@@ -42,13 +43,17 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
return Err(anyhow!("username cannot be valid ObjectId"));
}
if password.is_empty() {
return Err(anyhow!("password cannot be empty string"));
}
let password = bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")?;
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.find_one(Document::new())
.await?
.is_none();
@@ -57,20 +62,21 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
let user = User {
id: Default::default(),
username,
enabled: no_users_exist,
enabled: no_users_exist || core_config.enable_new_users,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recently_viewed: Vec::new(),
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
};
let user_id = db_client()
.await
.users
.insert_one(user, None)
.insert_one(user)
.await
.context("failed to create user")?
.inserted_id
@@ -86,7 +92,6 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
}
}
#[async_trait]
impl Resolve<LoginLocalUser, HeaderMap> for State {
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
async fn resolve(
@@ -101,7 +106,7 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.find_one(doc! { "username": &username })
.await
.context("failed at db query for users")?
.with_context(|| {

View File

@@ -5,10 +5,11 @@ use axum::{
extract::Request, http::HeaderMap, middleware::Next,
response::Response,
};
use monitor_client::entities::{monitor_timestamp, user::User};
use komodo_client::entities::{komodo_timestamp, user::User};
use mungos::mongodb::bson::doc;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use crate::{
helpers::query::get_user,
@@ -36,19 +37,13 @@ pub async fn auth_request(
mut req: Request,
next: Next,
) -> serror::Result<Response> {
let user = authenticate_check_enabled(&headers).await?;
let user = authenticate_check_enabled(&headers)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
req.extensions_mut().insert(user);
Ok(next.run(req).await)
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
#[instrument(level = "debug")]
pub async fn get_user_id_from_headers(
headers: &HeaderMap,
@@ -123,11 +118,11 @@ pub async fn auth_api_key_get_user_id(
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.find_one(doc! { "key": key })
.await
.context("failed to query db")?
.context("no api key matching key")?;
if key.expires != 0 && key.expires < monitor_timestamp() {
if key.expires != 0 && key.expires < komodo_timestamp() {
return Err(anyhow!("api key expired"));
}
if bcrypt::verify(secret, &key.secret)

View File

@@ -1,9 +1,8 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use aws_config::BehaviorVersion;
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,
@@ -12,12 +11,12 @@ use aws_sdk_ec2::{
},
Client,
};
use monitor_client::entities::{
alert::{Alert, AlertData, AlertDataVariant},
monitor_timestamp,
server::stats::SeverityLevel,
server_template::AwsServerTemplateConfig,
update::ResourceTarget,
use base64::Engine;
use komodo_client::entities::{
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
server_template::aws::AwsServerTemplateConfig,
ResourceTarget,
};
use crate::{config::core_config, helpers::alert::send_alerts};
@@ -42,7 +41,7 @@ async fn create_ec2_client(region: String) -> Client {
&core_config().aws.secret_access_key,
);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2023_11_09())
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
@@ -64,6 +63,7 @@ pub async fn launch_ec2_instance(
key_pair_name,
assign_public_ip,
use_public_ip,
user_data,
port: _,
} = config;
let instance_type = handle_unknown_instance_type(
@@ -90,23 +90,28 @@ pub async fn launch_ec2_instance(
.build(),
)
.min_count(1)
.max_count(1);
.max_count(1)
.user_data(
base64::engine::general_purpose::STANDARD_NO_PAD
.encode(user_data),
);
for volume in volumes {
let mut ebs = EbsBlockDevice::builder()
let ebs = EbsBlockDevice::builder()
.volume_size(volume.size_gb)
.set_iops(volume.iops)
.set_throughput(volume.throughput);
if let Some(volume_type) = &volume.volume_type {
ebs = ebs.volume_type(
VolumeType::from_str(volume_type)
.volume_type(
VolumeType::from_str(volume.volume_type.as_ref())
.context("invalid volume type")?,
);
}
)
.set_iops((volume.iops != 0).then_some(volume.iops))
.set_throughput(
(volume.throughput != 0).then_some(volume.throughput),
)
.build();
req = req.block_device_mappings(
BlockDeviceMapping::builder()
.set_device_name(volume.device_name.clone().into())
.set_ebs(ebs.build().into())
.set_device_name(volume.device_name.into())
.set_ebs(ebs.into())
.build(),
)
}
@@ -162,16 +167,16 @@ pub async fn terminate_ec2_instance_with_retry(
}
Err(e) => {
if i == MAX_TERMINATION_TRIES - 1 {
error!("failed to terminate instance {instance_id}.");
error!("failed to terminate aws instance {instance_id}.");
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
ts: komodo_timestamp(),
resolved: false,
level: SeverityLevel::Critical,
target: ResourceTarget::system(),
variant: AlertDataVariant::AwsBuilderTerminationFailed,
data: AlertData::AwsBuilderTerminationFailed {
instance_id: instance_id.to_string(),
message: format!("{e:#}"),
},
resolved_ts: None,
};
@@ -188,7 +193,7 @@ pub async fn terminate_ec2_instance_with_retry(
unreachable!()
}
#[instrument]
#[instrument(skip(client))]
async fn terminate_ec2_instance_inner(
client: &Client,
instance_id: &str,

View File

@@ -0,0 +1,82 @@
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ecr::Client as EcrClient;
use run_command::async_run_command;
#[tracing::instrument(skip(access_key_id, secret_access_key))]
async fn make_ecr_client(
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> EcrClient {
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
EcrClient::new(&config)
}
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn maybe_create_repo(
repo: &str,
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<()> {
let client =
make_ecr_client(region, access_key_id, secret_access_key).await;
let existing = client
.describe_repositories()
.send()
.await
.context("failed to describe existing repositories")?
.repositories
.unwrap_or_default();
if existing.iter().any(|r| {
if let Some(name) = r.repository_name() {
name == repo
} else {
false
}
}) {
return Ok(());
};
client
.create_repository()
.repository_name(repo)
.send()
.await
.context("failed to create repository")?;
Ok(())
}
/// Gets a token docker login.
///
/// Requires the aws cli be installed on the host
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn get_ecr_token(
region: &str,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<String> {
let log = async_run_command(&format!(
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
))
.await;
if log.success() {
Ok(log.stdout)
} else {
Err(
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
.context("failed to get aws ecr login token"),
)
}
}

View File

@@ -0,0 +1,2 @@
pub mod ec2;
pub mod ecr;

View File

@@ -0,0 +1,157 @@
use anyhow::{anyhow, Context};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{de::DeserializeOwned, Serialize};
use super::{
common::{
HetznerActionResponse, HetznerDatacenterResponse,
HetznerServerResponse, HetznerVolumeResponse,
},
create_server::{CreateServerBody, CreateServerResponse},
create_volume::{CreateVolumeBody, CreateVolumeResponse},
};
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
pub struct HetznerClient(reqwest::Client);
impl HetznerClient {
pub fn new(token: &str) -> HetznerClient {
HetznerClient(
reqwest::ClientBuilder::new()
.default_headers(
[(
HeaderName::from_static("authorization"),
HeaderValue::from_str(&format!("Bearer {token}"))
.unwrap(),
)]
.into_iter()
.collect(),
)
.build()
.context("failed to build Hetzner request client")
.unwrap(),
)
}
pub async fn get_server(
&self,
id: i64,
) -> anyhow::Result<HetznerServerResponse> {
self.get(&format!("/servers/{id}")).await
}
pub async fn create_server(
&self,
body: &CreateServerBody,
) -> anyhow::Result<CreateServerResponse> {
self.post("/servers", body).await
}
#[allow(unused)]
pub async fn delete_server(
&self,
id: i64,
) -> anyhow::Result<HetznerActionResponse> {
self.delete(&format!("/servers/{id}")).await
}
pub async fn get_volume(
&self,
id: i64,
) -> anyhow::Result<HetznerVolumeResponse> {
self.get(&format!("/volumes/{id}")).await
}
pub async fn create_volume(
&self,
body: &CreateVolumeBody,
) -> anyhow::Result<CreateVolumeResponse> {
self.post("/volumes", body).await
}
#[allow(unused)]
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
let res = self
.0
.delete(format!("{BASE_URL}/volumes/{id}"))
.send()
.await
.context("failed at request to delete volume")?;
let status = res.status();
if status == StatusCode::NO_CONTENT {
Ok(())
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
Err(anyhow!("{status} | {text}"))
}
}
#[allow(unused)]
pub async fn list_datacenters(
&self,
) -> anyhow::Result<HetznerDatacenterResponse> {
self.get("/datacenters").await
}
async fn get<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.get(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at GET request to Hetzner | path: {path}")
})
}
async fn post<Body: Serialize, Res: DeserializeOwned>(
&self,
path: &str,
body: &Body,
) -> anyhow::Result<Res> {
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
handle_req(req).await.with_context(|| {
format!("failed at POST request to Hetzner | path: {path}")
})
}
#[allow(unused)]
async fn delete<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.delete(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at DELETE request to Hetzner | path: {path}")
})
}
}
async fn handle_req<Res: DeserializeOwned>(
req: RequestBuilder,
) -> anyhow::Result<Res> {
let res = req.send().await?;
let status = res.status();
if status.is_success() {
res.json().await.context("failed to parse response to json")
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
if let Ok(json_error) =
serde_json::from_str::<serde_json::Value>(&text)
{
return Err(anyhow!("{status} | {json_error:?}"));
}
Err(anyhow!("{status} | {text}"))
}
}

View File

@@ -0,0 +1,280 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerResponse {
pub server: HetznerServer,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServer {
pub id: i64,
pub name: String,
pub primary_disk_size: f64,
pub image: Option<HetznerImage>,
pub private_net: Vec<HetznerPrivateNet>,
pub public_net: HetznerPublicNet,
pub server_type: HetznerServerTypeDetails,
pub status: HetznerServerStatus,
#[serde(default)]
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerTypeDetails {
pub architecture: String,
pub cores: i64,
pub cpu_type: String,
pub description: String,
pub disk: f64,
pub id: i64,
pub memory: f64,
pub name: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPrivateNet {
pub alias_ips: Vec<String>,
pub ip: String,
pub mac_address: String,
pub network: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPublicNet {
#[serde(default)]
pub firewalls: Vec<HetznerFirewall>,
pub floating_ips: Vec<i64>,
pub ipv4: Option<HetznerIpv4>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerFirewall {
pub id: i64,
pub status: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerIpv4 {
pub id: Option<i64>,
pub blocked: bool,
pub dns_ptr: String,
pub ip: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerImage {
pub id: i64,
pub description: String,
pub name: Option<String>,
pub os_flavor: String,
pub os_version: Option<String>,
pub rapid_deploy: Option<bool>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerActionResponse {
pub action: HetznerAction,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerAction {
pub command: String,
pub error: Option<HetznerError>,
pub finished: Option<String>,
pub id: i64,
pub progress: i32,
pub resources: Vec<HetznerResource>,
pub started: String,
pub status: HetznerActionStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerError {
pub code: String,
pub message: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerResource {
pub id: i64,
#[serde(rename = "type")]
pub ty: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolumeResponse {
pub volume: HetznerVolume,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolume {
/// Name of the Resource. Must be unique per Project.
pub name: String,
/// Point in time when the Resource was created (in ISO-8601 format).
pub created: String,
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
pub format: Option<HetznerVolumeFormat>,
/// ID of the Volume.
pub id: i64,
/// User-defined labels ( key/value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Device path on the file system for the Volume
pub linux_device: String,
/// Protection configuration for the Resource.
pub protection: HetznerProtection,
/// ID of the Server the Volume is attached to, null if it is not attached at all
pub server: Option<i64>,
/// Size in GB of the Volume
pub size: i64,
/// Current status of the Volume. Allowed: `creating`, `available`
pub status: HetznerVolumeStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerProtection {
/// Prevent the Resource from being deleted.
pub delete: bool,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterResponse {
pub datacenters: Vec<HetznerDatacenterDetails>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterDetails {
pub id: i64,
pub name: String,
pub location: serde_json::Map<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HetznerLocation {
#[serde(rename = "nbg1")]
Nuremberg1,
#[serde(rename = "hel1")]
Helsinki1,
#[serde(rename = "fsn1")]
Falkenstein1,
#[serde(rename = "ash")]
Ashburn,
#[serde(rename = "hil")]
Hillsboro,
#[serde(rename = "sin")]
Singapore,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum HetznerDatacenter {
#[serde(rename = "nbg1-dc3")]
Nuremberg1Dc3,
#[serde(rename = "hel1-dc2")]
Helsinki1Dc2,
#[serde(rename = "fsn1-dc14")]
Falkenstein1Dc14,
#[serde(rename = "ash-dc1")]
AshburnDc1,
#[serde(rename = "hil-dc1")]
HillsboroDc1,
#[serde(rename = "sin-dc1")]
SingaporeDc1,
}
impl From<HetznerDatacenter> for HetznerLocation {
fn from(value: HetznerDatacenter) -> Self {
match value {
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
HetznerDatacenter::Falkenstein1Dc14 => {
HetznerLocation::Falkenstein1
}
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
HetznerDatacenter::SingaporeDc1 => HetznerLocation::Singapore,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeFormat {
Xfs,
Ext4,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeStatus {
Creating,
Available,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerServerStatus {
Running,
Initializing,
Starting,
Stopping,
Off,
Deleting,
Migrating,
Rebuilding,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerActionStatus {
Running,
Success,
Error,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx22")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx32")]
SharedIntel4Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx42")]
SharedIntel8Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx52")]
SharedIntel16Core32Ram320Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated
#[serde(rename = "ccx13")]
DedicatedAmd2Core8Ram80Disk,
#[serde(rename = "ccx23")]
DedicatedAmd4Core16Ram160Disk,
#[serde(rename = "ccx33")]
DedicatedAmd8Core32Ram240Disk,
#[serde(rename = "ccx43")]
DedicatedAmd16Core64Ram360Disk,
#[serde(rename = "ccx53")]
DedicatedAmd32Core128Ram600Disk,
#[serde(rename = "ccx63")]
DedicatedAmd48Core192Ram960Disk,
}

View File

@@ -0,0 +1,75 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
HetznerServerType,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateServerBody {
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
pub name: String,
/// Auto-mount Volumes after attach
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// ID or name of Datacenter to create Server in (must not be used together with location)
#[serde(skip_serializing_if = "Option::is_none")]
pub datacenter: Option<HetznerDatacenter>,
/// ID or name of Location to create Server in (must not be used together with datacenter)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Firewalls which should be applied on the Server's public network interface at creation time
pub firewalls: Vec<Firewall>,
/// ID or name of the Image the Server is created from
pub image: String,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Network IDs which should be attached to the Server private network interface at the creation time
pub networks: Vec<i64>,
/// ID of the Placement Group the server should be in
#[serde(skip_serializing_if = "Option::is_none")]
pub placement_group: Option<i64>,
/// Public Network options
pub public_net: PublicNet,
/// ID or name of the Server type this Server should be created with
pub server_type: HetznerServerType,
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
pub ssh_keys: Vec<String>,
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
pub start_after_create: bool,
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
#[serde(skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct Firewall {
/// ID of the Firewall
pub firewall: i64,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct PublicNet {
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
pub enable_ipv4: bool,
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
pub enable_ipv6: bool,
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv4: Option<i64>,
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv6: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateServerResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub root_password: Option<String>,
pub server: HetznerServer,
}

View File

@@ -0,0 +1,36 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateVolumeBody {
/// Name of the volume
pub name: String,
/// Auto-mount Volume after attach. server must be provided.
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// Format Volume after creation. One of: xfs, ext4
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<HetznerVolumeFormat>,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Location to create the Volume in (can be omitted if Server is specified)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
#[serde(skip_serializing_if = "Option::is_none")]
pub server: Option<i64>,
/// Size of the Volume in GB
pub size: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateVolumeResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub volume: HetznerVolume,
}

View File

@@ -0,0 +1,280 @@
use std::{
sync::{Arc, Mutex, OnceLock},
time::Duration,
};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use komodo_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
HetznerVolumeFormat,
};
use crate::{
cloud::hetzner::{
common::HetznerServerStatus, create_server::CreateServerBody,
create_volume::CreateVolumeBody,
},
config::core_config,
};
use self::{client::HetznerClient, common::HetznerVolumeStatus};
mod client;
mod common;
mod create_server;
mod create_volume;
fn hetzner() -> Option<&'static HetznerClient> {
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
OnceLock::new();
HETZNER_CLIENT
.get_or_init(|| {
let token = &core_config().hetzner.token;
(!token.is_empty()).then(|| HetznerClient::new(token))
})
.as_ref()
}
pub struct HetznerServerMinimal {
pub id: i64,
pub ip: String,
}
const POLL_RATE_SECS: u64 = 3;
const MAX_POLL_TRIES: usize = 100;
#[instrument]
pub async fn launch_hetzner_server(
name: &str,
config: HetznerServerTemplateConfig,
) -> anyhow::Result<HetznerServerMinimal> {
let hetzner =
*hetzner().as_ref().context("Hetzner token not configured")?;
let HetznerServerTemplateConfig {
image,
datacenter,
private_network_ids,
placement_group,
enable_public_ipv4,
enable_public_ipv6,
firewall_ids,
server_type,
ssh_keys,
user_data,
use_public_ip,
labels,
volumes,
port: _,
} = config;
let datacenter = hetzner_datacenter(datacenter);
// Create volumes and get their ids
let mut volume_ids = Vec::new();
for volume in volumes {
let body = CreateVolumeBody {
name: volume.name,
format: Some(hetzner_format(volume.format)),
location: Some(datacenter.into()),
labels: volume.labels,
size: volume.size_gb,
automount: None,
server: None,
};
let id = hetzner
.create_volume(&body)
.await
.context("failed to create hetzner volume")?
.volume
.id;
volume_ids.push(id);
}
// Make sure volumes are available before continue
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
for _ in 0..MAX_POLL_TRIES {
if vol_ids_poll.lock().unwrap().is_empty() {
break;
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let ids = vol_ids_poll.lock().unwrap().clone();
let futures = ids.into_iter().map(|id| {
let vol_ids = vol_ids_poll.clone();
async move {
let Ok(res) = hetzner.get_volume(id).await else {
return;
};
if matches!(res.volume.status, HetznerVolumeStatus::Available)
{
vol_ids.lock().unwrap().retain(|_id| *_id != id);
}
}
});
join_all(futures).await;
}
if !vol_ids_poll.lock().unwrap().is_empty() {
return Err(anyhow!("Volumes not ready after poll"));
}
let body = CreateServerBody {
name: name.to_string(),
automount: None,
datacenter: Some(datacenter),
location: None,
firewalls: firewall_ids
.into_iter()
.map(|firewall| create_server::Firewall { firewall })
.collect(),
image,
labels,
networks: private_network_ids,
placement_group: (placement_group > 0).then_some(placement_group),
public_net: create_server::PublicNet {
enable_ipv4: enable_public_ipv4,
enable_ipv6: enable_public_ipv6,
ipv4: None,
ipv6: None,
},
server_type: hetzner_server_type(server_type),
ssh_keys,
start_after_create: true,
user_data: (!user_data.is_empty()).then_some(user_data),
volumes: volume_ids,
};
let server_id = hetzner
.create_server(&body)
.await
.context("failed to create hetnzer server")?
.server
.id;
for _ in 0..MAX_POLL_TRIES {
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let Ok(res) = hetzner.get_server(server_id).await else {
continue;
};
if matches!(res.server.status, HetznerServerStatus::Running) {
let ip = if use_public_ip {
res
.server
.public_net
.ipv4
.context("instance does not have public ipv4 attached")?
.ip
} else {
res
.server
.private_net
.first()
.context("no private networks attached")?
.ip
.to_string()
};
let server = HetznerServerMinimal { id: server_id, ip };
return Ok(server);
}
}
Err(anyhow!(
"failed to verify server running after polling status"
))
}
fn hetzner_format(
format: HetznerVolumeFormat,
) -> common::HetznerVolumeFormat {
match format {
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
}
}
fn hetzner_datacenter(
datacenter: HetznerDatacenter,
) -> common::HetznerDatacenter {
match datacenter {
HetznerDatacenter::Nuremberg1Dc3 => {
common::HetznerDatacenter::Nuremberg1Dc3
}
HetznerDatacenter::Helsinki1Dc2 => {
common::HetznerDatacenter::Helsinki1Dc2
}
HetznerDatacenter::Falkenstein1Dc14 => {
common::HetznerDatacenter::Falkenstein1Dc14
}
HetznerDatacenter::AshburnDc1 => {
common::HetznerDatacenter::AshburnDc1
}
HetznerDatacenter::HillsboroDc1 => {
common::HetznerDatacenter::HillsboroDc1
}
HetznerDatacenter::SingaporeDc1 => {
common::HetznerDatacenter::SingaporeDc1
}
}
}
fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
HetznerServerType::SharedArm2Core4Ram40Disk => {
common::HetznerServerType::SharedArm2Core4Ram40Disk
}
HetznerServerType::SharedIntel2Core4Ram40Disk => {
common::HetznerServerType::SharedIntel2Core4Ram40Disk
}
HetznerServerType::SharedAmd3Core4Ram80Disk => {
common::HetznerServerType::SharedAmd3Core4Ram80Disk
}
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel4Core8Ram80Disk => {
common::HetznerServerType::SharedIntel4Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
}
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel8Core16Ram160Disk => {
common::HetznerServerType::SharedIntel8Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
}
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel16Core32Ram320Disk => {
common::HetznerServerType::SharedIntel16Core32Ram320Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk
}
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
}
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
}
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
}
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
}
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
}
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
}
}
}

View File

@@ -1,5 +1,8 @@
pub mod aws;
#[allow(unused)]
pub mod hetzner;
#[derive(Debug)]
pub enum BuildCleanupData {
Server { repo_name: String },

View File

@@ -2,14 +2,21 @@ use std::sync::OnceLock;
use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::config::core::{CoreConfig, Env};
use komodo_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
GithubWebhookAppInstallationConfig, HetznerCredentials,
MongoConfig, OauthCredentials,
},
logger::LogConfig,
};
use serde::Deserialize;
pub fn frontend_path() -> &'static String {
#[derive(Deserialize)]
struct FrontendEnv {
#[serde(default = "default_frontend_path")]
monitor_frontend_path: String,
komodo_frontend_path: String,
}
fn default_frontend_path() -> String {
@@ -19,106 +26,184 @@ pub fn frontend_path() -> &'static String {
static FRONTEND_PATH: OnceLock<String> = OnceLock::new();
FRONTEND_PATH.get_or_init(|| {
let FrontendEnv {
monitor_frontend_path,
komodo_frontend_path,
} = envy::from_env()
.context("failed to parse FrontendEnv")
.unwrap();
monitor_frontend_path
komodo_frontend_path
})
}
pub fn core_config() -> &'static CoreConfig {
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
CORE_CONFIG.get_or_init(|| {
let env: Env = envy::from_env()
.context("failed to parse core Env")
.unwrap();
let config_path = &env.monitor_config_path;
let mut config =
let env: Env = match envy::from_env()
.context("failed to parse core Env") {
Ok(env) => env,
Err(e) => {
panic!("{e:#?}");
}
};
let config_path = &env.komodo_config_path;
let config =
parse_config_file::<CoreConfig>(config_path.as_str())
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
let installations = match (env.komodo_github_webhook_app_installations_ids, env.komodo_github_webhook_app_installations_namespaces) {
(Some(ids), Some(namespaces)) => {
if ids.len() != namespaces.len() {
panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
}
ids
.into_iter()
.zip(namespaces)
.map(|(id, namespace)| GithubWebhookAppInstallationConfig {
id,
namespace
})
.collect()
},
(Some(_), None) | (None, Some(_)) => {
panic!("Got only one of KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
}
(None, None) => {
config.github_webhook_app.installations
}
};
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.komodo_title.unwrap_or(config.title),
host: env.komodo_host.unwrap_or(config.host),
port: env.komodo_port.unwrap_or(config.port),
passkey: env.komodo_passkey.unwrap_or(config.passkey),
ensure_server: env.komodo_ensure_server.unwrap_or(config.ensure_server),
jwt_secret: env.komodo_jwt_secret.unwrap_or(config.jwt_secret),
jwt_ttl: env
.komodo_jwt_ttl
.unwrap_or(config.jwt_ttl),
repo_directory: env
.komodo_repo_directory
.map(|dir|
dir.parse()
.context("failed to parse env komodo_REPO_DIRECTORY as valid path").unwrap())
.unwrap_or(config.repo_directory),
stack_poll_interval: env
.komodo_stack_poll_interval
.unwrap_or(config.stack_poll_interval),
sync_poll_interval: env
.komodo_sync_poll_interval
.unwrap_or(config.sync_poll_interval),
build_poll_interval: env
.komodo_build_poll_interval
.unwrap_or(config.build_poll_interval),
repo_poll_interval: env
.komodo_repo_poll_interval
.unwrap_or(config.repo_poll_interval),
monitoring_interval: env
.komodo_monitoring_interval
.unwrap_or(config.monitoring_interval),
keep_stats_for_days: env
.komodo_keep_stats_for_days
.unwrap_or(config.keep_stats_for_days),
keep_alerts_for_days: env
.komodo_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days),
webhook_secret: env
.komodo_webhook_secret
.unwrap_or(config.webhook_secret),
webhook_base_url: env
.komodo_webhook_base_url
.or(config.webhook_base_url),
transparent_mode: env
.komodo_transparent_mode
.unwrap_or(config.transparent_mode),
ui_write_disabled: env
.komodo_ui_write_disabled
.unwrap_or(config.ui_write_disabled),
enable_new_users: env.komodo_enable_new_users
.unwrap_or(config.enable_new_users),
local_auth: env.komodo_local_auth.unwrap_or(config.local_auth),
google_oauth: OauthCredentials {
enabled: env
.komodo_google_oauth_enabled
.unwrap_or(config.google_oauth.enabled),
id: env
.komodo_google_oauth_id
.unwrap_or(config.google_oauth.id),
secret: env
.komodo_google_oauth_secret
.unwrap_or(config.google_oauth.secret),
},
github_oauth: OauthCredentials {
enabled: env
.komodo_github_oauth_enabled
.unwrap_or(config.github_oauth.enabled),
id: env
.komodo_github_oauth_id
.unwrap_or(config.github_oauth.id),
secret: env
.komodo_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: env
.komodo_github_webhook_app_app_id
.unwrap_or(config.github_webhook_app.app_id),
pk_path: env
.komodo_github_webhook_app_pk_path
.unwrap_or(config.github_webhook_app.pk_path),
installations,
},
aws: AwsCredentials {
access_key_id: env
.komodo_aws_access_key_id
.unwrap_or(config.aws.access_key_id),
secret_access_key: env
.komodo_aws_secret_access_key
.unwrap_or(config.aws.secret_access_key),
},
hetzner: HetznerCredentials {
token: env
.komodo_hetzner_token
.unwrap_or(config.hetzner.token),
},
mongo: MongoConfig {
uri: env.komodo_mongo_uri.or(config.mongo.uri),
address: env.komodo_mongo_address.or(config.mongo.address),
username: env
.komodo_mongo_username
.or(config.mongo.username),
password: env
.komodo_mongo_password
.or(config.mongo.password),
app_name: env
.komodo_mongo_app_name
.unwrap_or(config.mongo.app_name),
db_name: env
.komodo_mongo_db_name
.unwrap_or(config.mongo.db_name),
},
logging: LogConfig {
level: env
.komodo_logging_level
.unwrap_or(config.logging.level),
stdio: env
.komodo_logging_stdio
.unwrap_or(config.logging.stdio),
otlp_endpoint: env
.komodo_logging_otlp_endpoint
.or(config.logging.otlp_endpoint),
opentelemetry_service_name: env
.komodo_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
},
// Overrides
config.title = env.monitor_title.unwrap_or(config.title);
config.host = env.monitor_host.unwrap_or(config.host);
config.port = env.monitor_port.unwrap_or(config.port);
config.passkey = env.monitor_passkey.unwrap_or(config.passkey);
config.jwt_valid_for =
env.monitor_jwt_valid_for.unwrap_or(config.jwt_valid_for);
config.monitoring_interval = env
.monitor_monitoring_interval
.unwrap_or(config.monitoring_interval);
config.keep_stats_for_days = env
.monitor_keep_stats_for_days
.unwrap_or(config.keep_stats_for_days);
config.keep_alerts_for_days = env
.monitor_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days);
config.github_webhook_secret = env
.monitor_github_webhook_secret
.unwrap_or(config.github_webhook_secret);
config.github_webhook_base_url = env
.monitor_github_webhook_base_url
.or(config.github_webhook_base_url);
config.docker_organizations = env
.monitor_docker_organizations
.unwrap_or(config.docker_organizations);
config.logging.level =
env.monitor_logging_level.unwrap_or(config.logging.level);
config.logging.stdio =
env.monitor_logging_stdio.unwrap_or(config.logging.stdio);
config.logging.otlp_endpoint = env
.monitor_logging_otlp_endpoint
.or(config.logging.otlp_endpoint);
config.logging.opentelemetry_service_name = env
.monitor_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name);
config.local_auth =
env.monitor_local_auth.unwrap_or(config.local_auth);
config.github_oauth.enabled = env
.monitor_github_oauth_enabled
.unwrap_or(config.github_oauth.enabled);
config.github_oauth.id = env
.monitor_github_oauth_id
.unwrap_or(config.github_oauth.id);
config.github_oauth.secret = env
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret);
config.google_oauth.enabled = env
.monitor_google_oauth_enabled
.unwrap_or(config.google_oauth.enabled);
config.google_oauth.id = env
.monitor_google_oauth_id
.unwrap_or(config.google_oauth.id);
config.google_oauth.secret = env
.monitor_google_oauth_secret
.unwrap_or(config.google_oauth.secret);
config.mongo.uri = env.monitor_mongo_uri.or(config.mongo.uri);
config.mongo.address =
env.monitor_mongo_address.or(config.mongo.address);
config.mongo.username =
env.monitor_mongo_username.or(config.mongo.username);
config.mongo.password =
env.monitor_mongo_password.or(config.mongo.password);
config.mongo.app_name =
env.monitor_mongo_app_name.unwrap_or(config.mongo.app_name);
config.mongo.db_name =
env.monitor_mongo_db_name.unwrap_or(config.mongo.db_name);
config.aws.access_key_id = env
.monitor_aws_access_key_id
.unwrap_or(config.aws.access_key_id);
config.aws.secret_access_key = env
.monitor_aws_secret_access_key
.unwrap_or(config.aws.secret_access_key);
config
// These can't be overridden on env
secrets: config.secrets,
git_providers: config.git_providers,
docker_registries: config.docker_registries,
aws_ecr_registries: config.aws_ecr_registries,
}
})
}

View File

@@ -1,5 +1,5 @@
use mongo_indexed::{create_index, create_unique_index, Indexed};
use monitor_client::entities::{
use mongo_indexed::{create_index, create_unique_index};
use komodo_client::entities::{
alert::Alert,
alerter::Alerter,
api_key::ApiKey,
@@ -9,13 +9,18 @@ use monitor_client::entities::{
deployment::Deployment,
permission::Permission,
procedure::Procedure,
provider::{DockerRegistryAccount, GitProviderAccount},
repo::Repo,
server::{stats::SystemStatsRecord, Server},
server::Server,
server_template::ServerTemplate,
stack::Stack,
stats::SystemStatsRecord,
sync::ResourceSync,
tag::Tag,
update::Update,
user::User,
user_group::UserGroup,
variable::Variable,
};
use mungos::{
init::MongoBuilder,
@@ -28,6 +33,9 @@ pub struct DbClient {
pub permissions: Collection<Permission>,
pub api_keys: Collection<ApiKey>,
pub tags: Collection<Tag>,
pub variables: Collection<Variable>,
pub git_accounts: Collection<GitProviderAccount>,
pub registry_accounts: Collection<DockerRegistryAccount>,
pub updates: Collection<Update>,
pub alerts: Collection<Alert>,
pub stats: Collection<SystemStatsRecord>,
@@ -40,6 +48,8 @@ pub struct DbClient {
pub procedures: Collection<Procedure>,
pub alerters: Collection<Alerter>,
pub server_templates: Collection<ServerTemplate>,
pub resource_syncs: Collection<ResourceSync>,
pub stacks: Collection<Stack>,
//
pub db: Database,
}
@@ -80,14 +90,18 @@ impl DbClient {
let db = client.database(db_name);
let client = DbClient {
users: User::collection(&db, true).await?,
user_groups: UserGroup::collection(&db, true).await?,
permissions: Permission::collection(&db, true).await?,
api_keys: ApiKey::collection(&db, true).await?,
tags: Tag::collection(&db, true).await?,
updates: Update::collection(&db, true).await?,
alerts: Alert::collection(&db, true).await?,
stats: SystemStatsRecord::collection(&db, true).await?,
users: mongo_indexed::collection(&db, true).await?,
user_groups: mongo_indexed::collection(&db, true).await?,
permissions: mongo_indexed::collection(&db, true).await?,
api_keys: mongo_indexed::collection(&db, true).await?,
tags: mongo_indexed::collection(&db, true).await?,
variables: mongo_indexed::collection(&db, true).await?,
git_accounts: mongo_indexed::collection(&db, true).await?,
registry_accounts: mongo_indexed::collection(&db, true).await?,
updates: mongo_indexed::collection(&db, true).await?,
alerts: mongo_indexed::collection(&db, true).await?,
stats: mongo_indexed::collection(&db, true).await?,
// RESOURCES
servers: resource_collection(&db, "Server").await?,
deployments: resource_collection(&db, "Deployment").await?,
builds: resource_collection(&db, "Build").await?,
@@ -97,13 +111,17 @@ impl DbClient {
procedures: resource_collection(&db, "Procedure").await?,
server_templates: resource_collection(&db, "ServerTemplate")
.await?,
resource_syncs: resource_collection(&db, "ResourceSync")
.await?,
stacks: resource_collection(&db, "Stack").await?,
//
db,
};
Ok(client)
}
}
async fn resource_collection<T>(
async fn resource_collection<T: Send + Sync>(
db: &Database,
collection_name: &str,
) -> anyhow::Result<Collection<T>> {

View File

@@ -1,12 +1,13 @@
use std::sync::{Arc, Mutex};
use anyhow::anyhow;
use monitor_client::{
use komodo_client::{
busy::Busy,
entities::{
build::BuildActionState, deployment::DeploymentActionState,
procedure::ProcedureActionState, repo::RepoActionState,
server::ServerActionState,
server::ServerActionState, stack::StackActionState,
sync::ResourceSyncActionState,
},
};
@@ -21,6 +22,9 @@ pub struct ActionStates {
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
pub procedure:
Cache<String, Arc<ActionState<ProcedureActionState>>>,
pub resource_sync:
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
}
/// Need to be able to check "busy" with write lock acquired.

View File

@@ -1,16 +1,17 @@
use anyhow::{anyhow, Context};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use monitor_client::entities::{
alert::{Alert, AlertData},
use komodo_client::entities::{
alert::{Alert, AlertData, SeverityLevel},
alerter::*,
deployment::DockerContainerState,
server::stats::SeverityLevel,
deployment::DeploymentState,
stack::StackState,
ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use reqwest::StatusCode;
use slack::types::Block;
use crate::state::db_client;
use crate::{config::core_config, state::db_client};
#[instrument]
pub async fn send_alerts(alerts: &[Alert]) {
@@ -18,21 +19,19 @@ pub async fn send_alerts(alerts: &[Alert]) {
return;
}
let alerters = find_collect(
let Ok(alerters) = find_collect(
&db_client().await.alerters,
doc! { "config.params.enabled": true },
doc! { "config.enabled": true },
None,
)
.await;
if let Err(e) = alerters {
.await
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
)
}) else {
return;
}
let alerters = alerters.unwrap();
};
let handles =
alerts.iter().map(|alert| send_alert(&alerters, alert));
@@ -46,23 +45,49 @@ async fn send_alert(alerters: &[Alerter], alert: &Alert) {
return;
}
let alert_type = alert.data.extract_variant();
let handles = alerters.iter().map(|alerter| async {
match &alerter.config {
AlerterConfig::Slack(SlackAlerterConfig { url, enabled }) => {
if !enabled {
return Ok(());
}
send_slack_alert(url, alert)
.await
.context("failed to send slack alert")
// Don't send if not enabled
if !alerter.config.enabled {
return Ok(());
}
// Don't send if alert type not configured on the alerter
if !alerter.config.alert_types.is_empty()
&& !alerter.config.alert_types.contains(&alert_type)
{
return Ok(());
}
// Don't send if resource is in the blacklist
if alerter.config.except_resources.contains(&alert.target) {
return Ok(());
}
// Don't send if whitelist configured and target is not included
if !alerter.config.resources.is_empty()
&& !alerter.config.resources.contains(&alert.target)
{
return Ok(());
}
match &alerter.config.endpoint {
AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => {
send_slack_alert(url, alert).await.with_context(|| {
format!(
"failed to send alert to slack alerter {}",
alerter.name
)
})
}
AlerterConfig::Custom(CustomAlerterConfig { url, enabled }) => {
if !enabled {
return Ok(());
}
send_custom_alert(url, alert).await.context(format!(
"failed to send alert to custom alerter at {url}"
))
AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => {
send_custom_alert(url, alert).await.with_context(|| {
format!(
"failed to send alert to custom alerter {}",
alerter.name
)
})
}
}
});
@@ -86,7 +111,7 @@ async fn send_custom_alert(
.await
.context("failed at post request to alerter")?;
let status = res.status();
if status != StatusCode::OK {
if !status.is_success() {
let text = res
.text()
.await
@@ -105,7 +130,12 @@ async fn send_slack_alert(
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let (text, blocks): (_, Option<_>) = match &alert.data {
AlertData::ServerUnreachable { name, region, .. } => {
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
@@ -122,10 +152,18 @@ async fn send_slack_alert(
SeverityLevel::Critical => {
let text =
format!("{level} | *{name}*{region} is *unreachable* ❌");
let err = err
.as_ref()
.map(|e| format!("\nerror: {e:#?}"))
.unwrap_or_default();
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is *unreachable* ❌"
"*{name}*{region} is *unreachable* ❌{err}"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
@@ -134,89 +172,227 @@ async fn send_slack_alert(
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
..
} => {
let region = fmt_region(region);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨");
let blocks = vec![
Block::header(format!("{level} 🚨")),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text =
format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿 🚨"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
}
}
AlertData::ContainerStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_docker_container_state(to);
let text = format!("📦 container *{name}* is now {to}");
let text = format!("📦 Container *{name}* is now {to}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}"
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
)),
];
(text, blocks.into())
}
AlertData::AwsBuilderTerminationFailed { instance_id } => {
AlertData::StackStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_stack_state(to);
let text = format!("🥞 Stack *{name}* is now {to}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
id,
)),
];
(text, blocks.into())
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
let text = format!(
"{level} | Failed to terminated AWS builder instance"
"{level} | Failed to terminated AWS builder instance "
);
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("instance id: {instance_id}")),
Block::section(format!(
"instance id: *{instance_id}*\n{message}"
)),
];
(text, blocks.into())
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | Pending resource sync updates on {name}");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"sync id: *{id}*\nsync name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::ResourceSync,
id,
)),
];
(text, blocks.into())
}
AlertData::BuildFailed { id, name, version } => {
let text = format!("{level} | Build {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}",
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
AlertData::RepoBuildFailed { id, name } => {
let text =
format!("{level} | Repo build for {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"repo id: *{id}*\nrepo name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Repo,
id,
)),
];
(text, blocks.into())
}
@@ -236,14 +412,22 @@ fn fmt_region(region: &Option<String>) -> String {
}
}
fn fmt_docker_container_state(
state: &DockerContainerState,
) -> String {
fn fmt_docker_container_state(state: &DeploymentState) -> String {
match state {
DockerContainerState::Running => String::from("Running ▶️"),
DockerContainerState::Exited => String::from("Exited 🛑"),
DockerContainerState::Restarting => String::from("Restarting 🔄"),
DockerContainerState::NotDeployed => String::from("Not Deployed"),
DeploymentState::Running => String::from("Running ▶️"),
DeploymentState::Exited => String::from("Exited 🛑"),
DeploymentState::Restarting => String::from("Restarting 🔄"),
DeploymentState::NotDeployed => String::from("Not Deployed"),
_ => state.to_string(),
}
}
fn fmt_stack_state(state: &StackState) -> String {
match state {
StackState::Running => String::from("Running ▶️"),
StackState::Stopped => String::from("Stopped 🛑"),
StackState::Restarting => String::from("Restarting 🔄"),
StackState::Down => String::from("Down ⬇️"),
_ => state.to_string(),
}
}
@@ -251,7 +435,44 @@ fn fmt_docker_container_state(
fn fmt_level(level: SeverityLevel) -> &'static str {
match level {
SeverityLevel::Critical => "CRITICAL 🚨",
SeverityLevel::Warning => "WARNING 🚨",
SeverityLevel::Warning => "WARNING ‼️",
SeverityLevel::Ok => "OK ✅",
}
}
fn resource_link(
resource_type: ResourceTargetVariant,
id: &str,
) -> String {
let path = match resource_type {
ResourceTargetVariant::System => unreachable!(),
ResourceTargetVariant::Build => format!("/builds/{id}"),
ResourceTargetVariant::Builder => {
format!("/builders/{id}")
}
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Stack => {
format!("/stacks/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}
ResourceTargetVariant::Repo => format!("/repos/{id}"),
ResourceTargetVariant::Alerter => {
format!("/alerters/{id}")
}
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}
};
format!("{}{path}", core_config().host)
}

View File

@@ -0,0 +1,49 @@
use async_timing_util::{wait_until_timelength, Timelength};
use komodo_client::{
api::write::RefreshBuildCache, entities::user::build_user,
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
config::core_config,
state::{db_client, State},
};
pub fn spawn_build_refresh_loop() {
let interval: Timelength = core_config()
.build_poll_interval
.try_into()
.expect("Invalid build poll interval");
tokio::spawn(async move {
refresh_builds().await;
loop {
wait_until_timelength(interval, 2000).await;
refresh_builds().await;
}
});
}
async fn refresh_builds() {
let Ok(builds) =
find_collect(&db_client().await.builds, None, None)
.await
.inspect_err(|e| {
warn!("failed to get builds from db in refresh task | {e:#}")
})
else {
return;
};
for build in builds {
State
.resolve(
RefreshBuildCache { build: build.id },
build_user().clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh build cache in refresh task | build: {} | {e:#}", build.name)
})
.ok();
}
}

View File

@@ -0,0 +1,211 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use formatting::muted;
use komodo_client::entities::{
builder::{AwsBuilderConfig, Builder, BuilderConfig},
komodo_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
Version,
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
};
use crate::{
cloud::{
aws::ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
BuildCleanupData,
},
config::core_config,
helpers::update::update_update,
resource,
};
use super::periphery_client;
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument(skip_all, fields(builder_id = builder.id, update_id = update.id))]
pub async fn get_builder_periphery(
// build: &Build,
resource_name: String,
version: Option<Version>,
builder: Builder,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
match builder.config {
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(&resource_name, version, config, update).await
}
}
}
#[instrument(skip_all, fields(resource_name, update_id = update.id))]
async fn get_aws_builder(
resource_name: &str,
version: Option<Version>,
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = komodo_timestamp();
let version = version.map(|v| format!("-v{v}")).unwrap_or_default();
let instance_name = format!("BUILDER-{resource_name}{version}");
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: komodo_timestamp(),
..Default::default()
};
update.logs.push(log);
update_update(update.clone()).await?;
let periphery_address = format!("http://{ip}:{}", config.port);
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = komodo_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = periphery
.request(api::GetVersion {})
.await
.context("failed to reach periphery client on builder");
if let Ok(GetVersionResponse { version }) = &version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!(
"established contact with periphery on builder\nperiphery version: v{}",
version
),
start_ts: start_connect_ts,
end_ts: komodo_timestamp(),
..Default::default()
};
update.logs.push(connect_log);
update_update(update.clone()).await?;
return Ok((
periphery,
BuildCleanupData::Aws {
instance_id,
region: config.region,
},
));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
// Spawn terminate task in failure case (if loop is passed without return)
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
.await;
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(
res.err().unwrap().context(
"failed to start usable builder. terminating instance.",
),
)
}
#[instrument(skip(periphery, update))]
pub async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(region, &_instance_id)
.await;
});
update.push_simple_log(
"terminate instance",
format!("termination queued for instance id {instance_id}"),
);
}
}
}
pub fn start_aws_builder_log(
instance_id: &str,
ip: &str,
config: &AwsBuilderConfig,
) -> String {
let AwsBuilderConfig {
ami_id,
instance_type,
volume_gb,
subnet_id,
assign_public_ip,
security_group_ids,
use_public_ip,
..
} = config;
let readable_sec_group_ids = security_group_ids.join(", ");
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}

View File

@@ -1,6 +1,6 @@
use std::{collections::HashMap, hash::Hash};
use monitor_client::busy::Busy;
use komodo_client::busy::Busy;
use tokio::sync::RwLock;
#[derive(Default)]

View File

@@ -1,6 +1,6 @@
use std::sync::OnceLock;
use monitor_client::entities::update::{Update, UpdateListItem};
use komodo_client::entities::update::{Update, UpdateListItem};
use tokio::sync::{broadcast, Mutex};
/// A channel sending (build_id, update_id)
@@ -12,6 +12,15 @@ pub fn build_cancel_channel(
BUILD_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
}
/// A channel sending (repo_id, update_id)
pub fn repo_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
static REPO_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();
REPO_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
}
pub fn update_channel() -> &'static BroadcastChannel<UpdateListItem> {
static UPDATE_CHANNEL: OnceLock<BroadcastChannel<UpdateListItem>> =
OnceLock::new();

View File

@@ -0,0 +1,222 @@
use std::collections::HashSet;
use anyhow::Context;
use komodo_client::entities::{
update::Update, EnvironmentVar, SystemCommand,
};
use super::query::VariablesAndSecrets;
pub fn interpolate_variables_secrets_into_environment(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
environment: &mut Vec<EnvironmentVar>,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
for env in environment {
if env.value.is_empty() {
continue;
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into env var '{}'",
env.variable
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into env var '{}'",
env.variable
)
})?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
Ok(())
}
pub fn interpolate_variables_secrets_into_extra_args(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
extra_args: &mut Vec<String>,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
for arg in extra_args {
if arg.is_empty() {
continue;
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
arg,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into extra arg '{arg}'",
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into extra arg '{arg}'",
)
})?;
secret_replacers.extend(more_replacers);
// set arg with the result
*arg = res;
}
Ok(())
}
pub fn interpolate_variables_secrets_into_container_command(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
command: &mut String,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
if command.is_empty() {
return Ok(());
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
command,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into command '{command}'",
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into command '{command}'",
)
})?;
secret_replacers.extend(more_replacers);
// set command with the result
*command = res;
Ok(())
}
pub fn interpolate_variables_secrets_into_system_command(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
command: &mut SystemCommand,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
if command.command.is_empty() {
return Ok(());
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&command.command,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into command '{}'",
command.command
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into command '{}'",
command.command
)
})?;
secret_replacers.extend(more_replacers);
// set command with the result
command.command = res;
Ok(())
}
pub fn add_interp_update_log(
update: &mut Update,
global_replacers: &HashSet<(String, String)>,
secret_replacers: &HashSet<(String, String)>,
) {
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
// Only show names of interpolated secrets
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
}

View File

@@ -1,28 +1,51 @@
use std::time::Duration;
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use monitor_client::entities::{
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
update::ResourceTarget,
user::User,
use futures::future::join_all;
use mongo_indexed::Document;
use komodo_client::{
api::write::CreateServer,
entities::{
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::Log,
user::{system_user, User},
ResourceTarget,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, to_document, Bson},
};
use mungos::mongodb::bson::doc;
use periphery_client::PeripheryClient;
use rand::{thread_rng, Rng};
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use resolver_api::Resolve;
use crate::{config::core_config, state::db_client};
use crate::{
config::core_config,
resource,
state::{db_client, State},
};
pub mod action_state;
pub mod alert;
pub mod build;
pub mod builder;
pub mod cache;
pub mod channel;
pub mod interpolate;
pub mod procedure;
pub mod prune;
pub mod query;
pub mod resource;
pub mod repo;
pub mod stack;
pub mod sync;
pub mod update;
// pub mod resource;
pub fn empty_or_only_spaces(word: &str) -> bool {
if word.is_empty() {
return true;
@@ -39,16 +62,86 @@ pub fn random_duration(min_ms: u64, max_ms: u64) -> Duration {
Duration::from_millis(thread_rng().gen_range(min_ms..max_ms))
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
/// Returns (token, use_https)
pub async fn git_token(
provider_domain: &str,
account_username: &str,
mut on_https_found: impl FnMut(bool),
) -> anyhow::Result<Option<String>> {
let db_provider = db_client()
.await
.git_accounts
.find_one(doc! { "domain": provider_domain, "username": account_username })
.await
.context("failed to query db for git provider accounts")?;
if let Some(provider) = db_provider {
on_https_found(provider.https);
return Ok(Some(provider.token));
}
Ok(
core_config()
.git_providers
.iter()
.find(|provider| provider.domain == provider_domain)
.and_then(|provider| {
on_https_found(provider.https);
provider
.accounts
.iter()
.find(|account| account.username == account_username)
.map(|account| account.token.clone())
}),
)
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
pub async fn registry_token(
provider_domain: &str,
account_username: &str,
) -> anyhow::Result<Option<String>> {
let provider = db_client()
.await
.registry_accounts
.find_one(doc! { "domain": provider_domain, "username": account_username })
.await
.context("failed to query db for docker registry accounts")?;
if let Some(provider) = provider {
return Ok(Some(provider.token));
}
Ok(
core_config()
.docker_registries
.iter()
.find(|provider| provider.domain == provider_domain)
.and_then(|provider| {
provider
.accounts
.iter()
.find(|account| account.username == account_username)
.map(|account| account.token.clone())
}),
)
}
#[instrument]
pub async fn remove_from_recently_viewed<T>(
resource: T,
) -> anyhow::Result<()>
pub async fn remove_from_recently_viewed<T>(resource: T)
where
T: Into<ResourceTarget> + std::fmt::Debug,
{
let resource: ResourceTarget = resource.into();
let (ty, id) = resource.extract_variant_id();
db_client()
if let Err(e) = db_client()
.await
.users
.update_many(
@@ -61,13 +154,12 @@ where
}
}
},
None,
)
.await
.context(
"failed to remove resource from users recently viewed",
)?;
Ok(())
.context("failed to remove resource from users recently viewed")
{
warn!("{e:#}");
}
}
//
@@ -103,17 +195,161 @@ pub async fn create_permission<T>(
if let Err(e) = db_client()
.await
.permissions
.insert_one(
Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
},
None,
)
.insert_one(Permission {
id: Default::default(),
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
})
.await
{
error!("failed to create permission for {target:?} | {e:#}");
};
}
/// Flattens a document only one level deep
///
/// eg `{ config: { label: "yes", thing: { field1: "ok", field2: "ok" } } }` ->
/// `{ "config.label": "yes", "config.thing": { field1: "ok", field2: "ok" } }`
pub fn flatten_document(doc: Document) -> Document {
let mut target = Document::new();
for (outer_field, bson) in doc {
if let Bson::Document(doc) = bson {
for (inner_field, bson) in doc {
target.insert(format!("{outer_field}.{inner_field}"), bson);
}
} else {
target.insert(outer_field, bson);
}
}
target
}
pub async fn startup_cleanup() {
tokio::join!(
startup_in_progress_update_cleanup(),
startup_open_alert_cleanup(),
);
}
/// Run on startup, as no updates should be in progress on startup
async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"Komodo shutdown",
String::from("Komodo shutdown during execution. If this is a build, the builder may not have been terminated.")
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
if let Err(e) = db_client()
.await
.updates
.update_many(
doc! { "status": "InProgress" },
doc! {
"$set": {
"status": "Complete",
"success": false,
},
"$push": {
"logs": log
}
},
)
.await
{
error!("failed to cleanup in progress updates on startup | {e:#}")
}
}
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
async fn startup_open_alert_cleanup() {
let db = db_client().await;
let Ok(alerts) =
find_collect(&db.alerts, doc! { "resolved": false }, None)
.await
.inspect_err(|e| {
error!(
"failed to list all alerts for startup open alert cleanup | {e:?}"
)
})
else {
return;
};
let futures = alerts.into_iter().map(|alert| async move {
match alert.target {
ResourceTarget::Server(id) => {
resource::get::<Server>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
ResourceTarget::ResourceSync(id) => {
resource::get::<ResourceSync>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
// No other resources should have open alerts.
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
}
});
let to_update_ids = join_all(futures)
.await
.into_iter()
.flatten()
.collect::<Vec<_>>();
if let Err(e) = db
.alerts
.update_many(
doc! { "_id": { "$in": to_update_ids } },
doc! { "$set": {
"resolved": true,
"resolved_ts": komodo_timestamp()
} },
)
.await
{
error!(
"failed to clean up invalid open alerts on startup | {e:#}"
)
}
}
/// Ensures a default server exists with the defined address
pub async fn ensure_server() {
let ensure_server = &core_config().ensure_server;
if ensure_server.is_empty() {
return;
}
let db = db_client().await;
let Ok(server) = db
.servers
.find_one(doc! { "config.address": ensure_server })
.await
.inspect_err(|e| error!("Failed to initialize 'ensure_server'. Failed to query db. {e:?}"))
else {
return;
};
if server.is_some() {
return;
}
if let Err(e) = State
.resolve(
CreateServer {
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(ensure_server.to_string()),
enabled: Some(true),
..Default::default()
},
},
system_user().to_owned(),
)
.await
{
error!("Failed to initialize 'ensure_server'. Failed to CreateServer. {e:?}");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,24 @@
use anyhow::Context;
use async_timing_util::{
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS,
};
use mungos::mongodb::bson::doc;
use futures::future::join_all;
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api::image::PruneImages;
use crate::{config::core_config, state::db_client};
use super::periphery_client;
pub fn spawn_prune_loop() {
tokio::spawn(async move {
loop {
wait_until_timelength(Timelength::OneDay, 5000).await;
let (stats_res, alerts_res) =
tokio::join!(prune_stats(), prune_alerts());
let (images_res, stats_res, alerts_res) =
tokio::join!(prune_images(), prune_stats(), prune_alerts());
if let Err(e) = images_res {
error!("error in pruning images | {e:#}");
}
if let Err(e) = stats_res {
error!("error in pruning stats | {e:#}");
}
@@ -21,6 +29,35 @@ pub fn spawn_prune_loop() {
});
}
async fn prune_images() -> anyhow::Result<()> {
let futures = find_collect(&db_client().await.servers, None, None)
.await
.context("failed to get servers from db")?
.into_iter()
// This could be done in the mongo query, but rather have rust type system guarantee this.
.filter(|server| server.config.auto_prune)
.map(|server| async move {
(
async {
periphery_client(&server)?.request(PruneImages {}).await
}
.await,
server,
)
});
for (res, server) in join_all(futures).await {
if let Err(e) = res {
error!(
"failed to prune images on server {} ({}) | {e:#}",
server.name, server.id
)
}
}
Ok(())
}
async fn prune_stats() -> anyhow::Result<()> {
if core_config().keep_stats_for_days == 0 {
return Ok(());
@@ -31,12 +68,9 @@ async fn prune_stats() -> anyhow::Result<()> {
let res = db_client()
.await
.stats
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} stats from db", res.deleted_count);
Ok(())
@@ -52,12 +86,9 @@ async fn prune_alerts() -> anyhow::Result<()> {
let res = db_client()
.await
.alerts
.delete_many(
doc! {
"ts": { "$lt": delete_before_ts }
},
None,
)
.delete_many(doc! {
"ts": { "$lt": delete_before_ts }
})
.await?;
info!("deleted {} alerts from db", res.deleted_count);
Ok(())

Some files were not shown because too many files have changed in this diff Show More