Compare commits

..

285 Commits

Author SHA1 Message Date
mbecker20
9418a6d963 update client to 1.9.0 2024-06-23 02:30:50 -07:00
mbecker20
57646b750f clean up 2024-06-23 02:29:47 -07:00
mbecker20
0d57f9411c can deploy ecr 2024-06-23 02:27:19 -07:00
mbecker20
7d396dd539 clean up ecr 2024-06-23 02:22:14 -07:00
mbecker20
bfe762b71a install unzip 2024-06-23 01:37:12 -07:00
mbecker20
16ede84bac install aws cli core 2024-06-23 01:31:15 -07:00
mbecker20
4524db94db get ecr token using cli 2024-06-23 01:23:56 -07:00
mbecker20
580dab4acd improve error log formatting 2024-06-23 01:02:52 -07:00
mbecker20
645382856a update only flattens one level deep 2024-06-22 23:56:01 -07:00
mbecker20
5c4e6a6dbb select aws config 2024-06-22 23:33:35 -07:00
mbecker20
66810e1efb add method to get availabel aws ecr labels 2024-06-22 23:29:02 -07:00
mbecker20
69a84882f0 1.9.0 2024-06-22 23:06:53 -07:00
mbecker20
41648436a5 default periphery method fields 2024-06-22 22:59:51 -07:00
mbecker20
083a88aa7b implement aws ecr image registry 2024-06-22 22:57:26 -07:00
mbecker20
750f95c90d improve shortcut menu 2024-06-22 18:24:38 -07:00
mbecker20
129f3ecd82 add more kb shortcuts and shortcut menu 2024-06-22 02:56:57 -07:00
mbecker20
1b754f80ab fix double emojis 2024-06-22 01:54:45 -07:00
mbecker20
968a882012 fix alerter table 2024-06-22 01:29:31 -07:00
mbecker20
696ebdb26f label blacklist correctly 2024-06-22 01:25:38 -07:00
mbecker20
8fee04607d imporve slack alerting 2024-06-22 01:10:13 -07:00
mbecker20
6fe250244b add alerter blacklist 2024-06-22 00:30:43 -07:00
mbecker20
b530af0eec send_alerts for sync alert 2024-06-21 23:09:38 -07:00
mbecker20
21e9361079 remove unused 2024-06-21 02:28:35 -07:00
mbecker20
524d2d956b fix alerts usage 2024-06-21 02:23:42 -07:00
mbecker20
aca9633941 add links and errors to slack messages 2024-06-21 01:12:46 -07:00
mbecker20
32e1bd2dda add badges for tag filter shortcuts 2024-06-21 00:15:40 -07:00
mbecker20
cb363d1559 add shift + T and shift + C to manage tags 2024-06-20 23:51:12 -07:00
mbecker20
63eb74b9c8 Add and configure build alerts 2024-06-20 23:41:28 -07:00
mbecker20
bbcc27704f bump rust builder version 2024-06-16 16:00:57 -07:00
mbecker20
0aa9513dd0 1.8.0 2024-06-16 15:36:51 -07:00
mbecker20
26b216b478 add resources page 2024-06-16 15:33:31 -07:00
mbecker20
166299bb57 sync docs 2024-06-16 14:35:09 -07:00
mbecker20
03c47eb3dc remove cli sync 2024-06-16 01:41:54 -07:00
mbecker20
1fcb4ad085 move / update changelog 2024-06-16 01:41:15 -07:00
mbecker20
f51af8fbe1 docs 2024-06-16 01:34:08 -07:00
mbecker20
4a975e1b92 update resource sync docs 2024-06-16 01:33:05 -07:00
mbecker20
ba556e3284 fix doc link 2024-06-16 00:31:23 -07:00
mbecker20
299a326942 log build has new version 2024-06-16 00:20:22 -07:00
mbecker20
a5d4b9aefb add cached results reasons 2024-06-16 00:04:05 -07:00
mbecker20
40b820ae42 add reason to deploy logs 2024-06-15 22:01:14 -07:00
mbecker20
7028bf2996 remove termination_signal for tokio signal 2024-06-15 21:48:54 -07:00
mbecker20
75ebd0e6c0 fix fe cancel logic error 2024-06-15 21:36:26 -07:00
mbecker20
426153df66 try improve toml parse error message 2024-06-15 21:33:53 -07:00
mbecker20
5bd423a6a6 sync deploy new build 2024-06-15 21:15:17 -07:00
mbecker20
c24131d383 nested propogate read resources error 2024-06-15 20:37:29 -07:00
mbecker20
9f54b6c26a 1.8.0. improve env config UI, add sync deploy state management 2024-06-15 20:15:33 -07:00
mbecker20
ab8ae51ece slight more colors 2024-06-15 20:14:25 -07:00
mbecker20
ef2a83ff16 add colors to procedure logs 2024-06-15 20:06:34 -07:00
mbecker20
7872771aee clean up sync log 2024-06-15 19:45:53 -07:00
mbecker20
b12cf858d8 sync deploy logs need \n 2024-06-15 19:36:46 -07:00
mbecker20
38dba91c3a sync deploy accounts for any dependencies in 'after' need deploy 2024-06-15 19:20:45 -07:00
mbecker20
ea8136aa57 add sync deployment state log 2024-06-15 17:31:49 -07:00
mbecker20
f956e12e28 move formatting to shared lib 2024-06-15 17:15:05 -07:00
mbecker20
207ea52b95 add finished log 2024-06-15 17:12:02 -07:00
mbecker20
caf28d3a26 sync deploy 2024-06-15 17:03:16 -07:00
mbecker20
8fff45649d implement sync deployment get updates for view with deploy action 2024-06-15 15:50:10 -07:00
mbecker20
de5df70e11 invert search FE 2024-06-15 00:58:03 -07:00
mbecker20
3df010ac2a read req error debug 2024-06-15 00:54:11 -07:00
mbecker20
2d3beb708e invert logs 2024-06-15 00:28:04 -07:00
mbecker20
1dc22d01c4 improve execute instrumentation 2024-06-15 00:20:28 -07:00
mbecker20
eb029d0408 clone repo to specific directory on host 2024-06-14 23:43:47 -07:00
mbecker20
f926932181 build / deployment env variable / secret selectors 2024-06-14 23:28:08 -07:00
mbecker20
cc96d80c6a string deser filter empty lines 2024-06-14 22:20:39 -07:00
mbecker20
144b49495c string deser can handle empty string 2024-06-14 22:15:02 -07:00
mbecker20
de9354bdc7 frontend manage env with string 2024-06-14 22:10:07 -07:00
mbecker20
38bfee84d7 read resources propogate error 2024-06-14 21:53:13 -07:00
mbecker20
ec33d9fb9e trim incoming value env var string, conversion string, before deserialize 2024-06-14 21:42:59 -07:00
mbecker20
0a66937b1d fix unused liniting 2024-06-14 21:30:10 -07:00
mbecker20
43cc0c3bc1 remove @ in format date 2024-06-14 14:48:22 -07:00
mbecker20
c14b395c70 quick copy variable value 2024-06-12 12:15:29 -07:00
mbecker20
7b8529a7c6 tweak colors 2024-06-12 11:55:06 -07:00
mbecker20
547c089581 update colors 2024-06-12 11:53:39 -07:00
mbecker20
4fe5e461b3 use stroke for icons 2024-06-12 03:48:47 -07:00
mbecker20
edfb873f7c improve error logs 2024-06-12 03:22:51 -07:00
mbecker20
5ef5294c44 remove onkeydown causing redundant create 2024-06-12 03:15:07 -07:00
mbecker20
5d3c50e04f reorder procedure config table 2024-06-12 02:47:41 -07:00
mbecker20
f10efbb5ba add bg to body 2024-06-12 02:39:26 -07:00
mbecker20
39ce98161b add the colors, always plz 2024-06-12 02:21:49 -07:00
mbecker20
cff6e79eee fix omnibar all resource types 2024-06-12 01:46:30 -07:00
mbecker20
dedf22ede8 continue on disabled stage 2024-06-12 01:25:10 -07:00
mbecker20
6955b92a99 add same colors in update 2024-06-12 01:15:39 -07:00
mbecker20
5c63eeab02 better sync coloring 2024-06-12 01:13:33 -07:00
mbecker20
4c14a4ae20 create variable log skip description line if it's empty 2024-06-12 00:39:23 -07:00
mbecker20
29fd856a2d deal with deployment build version 2024-06-11 03:07:56 -07:00
mbecker20
195bdbd94a fix " to \" 2024-06-11 02:14:57 -07:00
mbecker20
298ccd945c improve export dialog sizing 2024-06-11 01:42:06 -07:00
mbecker20
436e4e79e9 toml include ResourceSync 2024-06-11 01:09:37 -07:00
mbecker20
8b8c89d976 1.7.3 procedure stage alias 2024-06-11 00:51:16 -07:00
mbecker20
25c8d25636 1.7.2 default resource config parsing 2024-06-11 00:44:41 -07:00
mbecker20
ea242de2e4 default the config if not exists 2024-06-11 00:34:11 -07:00
mbecker20
be03547407 reorder struct fields for improved toml 2024-06-11 00:04:20 -07:00
mbecker20
9c0d28b311 allow inline arrow up to max length 2024-06-10 23:53:23 -07:00
mbecker20
f269deb99c update toml_pretty 2024-06-10 23:30:17 -07:00
mbecker20
3df8163131 improve procedure toml 2024-06-10 23:14:04 -07:00
mbecker20
33a16a9bd2 need 2 \n 2024-06-10 22:36:17 -07:00
mbecker20
215e7d1bdc update toml_pretty 2024-06-10 22:11:40 -07:00
mbecker20
25e0905c0c fix deserializers 2024-06-10 21:31:17 -07:00
mbecker20
1c07ccea85 bump toml for multiline string 2024-06-10 19:26:01 -07:00
mbecker20
405ec1b8cc bump toml_pretty for fix 2024-06-10 18:58:33 -07:00
mbecker20
4f212bd06f update toml_pretty with skip empty strings 2024-06-10 18:43:53 -07:00
mbecker20
074f4ea2db fix toml 2024-06-10 18:07:05 -07:00
mbecker20
c9abccaf02 build use string serialized version 2024-06-10 17:59:03 -07:00
mbecker20
6428fa6de2 1.7.1 2024-06-10 17:37:22 -07:00
mbecker20
883f54431d custom to toml serializer for api 2024-06-10 17:34:56 -07:00
mbecker20
28dc030e2b custom Vec<EnvVar>, Vec<Conversion> deserializers to support config them as string 2024-06-10 14:39:51 -07:00
mbecker20
145d933e63 pt-2 2024-06-10 01:47:46 -07:00
mbecker20
9772ca1a1c add Resource Sync system user 2024-06-10 01:46:26 -07:00
mbecker20
4059b69201 core auto refreshes all syncs every 5 min 2024-06-09 23:49:02 -07:00
mbecker20
8e175ea5a1 add pending sync alert variant 2024-06-09 23:23:40 -07:00
mbecker20
d931b8b4e7 fix deployment when image_type None 2024-06-09 23:15:52 -07:00
mbecker20
0982800ad2 update client to 1.7.0 2024-06-09 22:47:49 -07:00
mbecker20
4382ad0b3b migrate 1.6 to 1.7 2024-06-09 22:46:21 -07:00
mbecker20
e7891f7870 update docs for ghcr 2024-06-09 21:56:01 -07:00
mbecker20
6bada46841 add export variables / user groups 2024-06-09 21:32:53 -07:00
mbecker20
eae6cbd228 label the image 2024-06-09 20:55:09 -07:00
mbecker20
a0ee6180b2 finish 1.7.0 2024-06-09 19:45:46 -07:00
mbecker20
3ce3de8768 configure registry 2024-06-09 19:34:49 -07:00
mbecker20
6c46993b61 New Monitor logo cr. George Weston 2024-06-09 18:38:58 -07:00
mbecker20
fbd9d14aaa change handler loggin 2024-06-09 15:11:18 -07:00
mbecker20
1011ec60ab rename to ghcr 2024-06-09 14:55:26 -07:00
mbecker20
48e17a7c87 update config example 2024-06-09 03:43:26 -07:00
mbecker20
a94baded55 1.7.0 2024-06-09 03:06:17 -07:00
mbecker20
e97c0873cf get types 2024-06-09 03:05:07 -07:00
mbecker20
43a0b76811 small 2024-06-09 03:04:05 -07:00
mbecker20
2d2577e5ee ghcr 2024-06-09 02:46:57 -07:00
mbecker20
202ac77de3 from on the new types 2024-06-09 02:18:40 -07:00
mbecker20
568c963419 core / periphery support ghcr 2024-06-09 02:01:51 -07:00
mbecker20
5c3294241d add 1.6 build schema for 1.7 migration 2024-06-08 15:35:31 -07:00
mbecker20
648a04be88 add sleep execution for procedure 2024-06-08 14:51:19 -07:00
mbecker20
1b5822f649 custom version deserializer. support string versions 2024-06-08 14:23:26 -07:00
mbecker20
c41a008603 fix variable update 2024-06-08 05:22:32 -07:00
mbecker20
603243b0eb need partial default on alerter enabled 2024-06-08 04:56:52 -07:00
mbecker20
d09ab36696 any sync error shows up in log 2024-06-08 04:34:09 -07:00
mbecker20
ad168c87f7 use approp dialog menus 2024-06-08 04:12:55 -07:00
mbecker20
914f4c6197 seems to work 2024-06-08 03:35:22 -07:00
mbecker20
c73d918e18 no unnecessary user group sync 2024-06-08 02:56:53 -07:00
mbecker20
9d116f56cb sort lists by name 2024-06-08 02:21:32 -07:00
mbecker20
8a8dede5db resource sync state 2024-06-08 02:12:04 -07:00
mbecker20
d2cecf316c add pending update alert 2024-06-08 01:39:18 -07:00
mbecker20
cad1ee123e improv the sync 2024-06-08 00:50:30 -07:00
mbecker20
6aa801b705 lock sync dir access 2024-06-07 22:02:58 -07:00
mbecker20
078ba59002 ensure sync directory exist 2024-06-07 21:02:28 -07:00
mbecker20
5eacb7191b fix the fe errors with most boilerplate 2024-06-07 20:00:01 -07:00
mbecker20
45eafd10b9 finish sync backend? 2024-06-07 19:00:03 -07:00
mbecker20
42c486807c implement resource sync cli 2024-06-07 17:11:58 -07:00
mbecker20
8c31fcff02 backend for resource sync 2024-06-07 03:52:07 -07:00
mbecker20
49f1d40ce8 implement RunSync 2024-06-07 02:43:45 -07:00
mbecker20
bf85e886bd abit more 2024-06-06 03:02:25 -07:00
mbecker20
eda0b233ca implement sync 2024-06-06 02:38:47 -07:00
mbecker20
5efb227851 update ts client response 2024-06-05 23:46:05 -07:00
mbecker20
1a45fffe75 move some libraries out 2024-06-05 23:44:06 -07:00
mbecker20
fa72f2e5ef update execute task handling 2024-06-05 22:50:03 -07:00
mbecker20
c9152db300 unneeded import 2024-06-05 22:42:28 -07:00
mbecker20
25fcca7246 should fix procedure 2024-06-05 22:42:04 -07:00
mbecker20
ac449e38d5 init boilerplate 2024-06-05 17:29:59 -07:00
mbecker20
d6c66948ba skip update in execute task instrument 2024-06-05 16:16:21 -07:00
mbecker20
b6af790aef sort resources in selector 2024-06-05 15:39:47 -07:00
mbecker20
36a49210a0 fix filter by split 2024-06-05 15:16:30 -07:00
mbecker20
d2b2aa0550 its not really distributed 2024-06-05 02:41:08 -07:00
mbecker20
7f4c883416 hide / show to toggle alert area 2024-06-05 01:59:16 -07:00
mbecker20
676fb3c732 common filtering method 2024-06-04 05:50:36 -07:00
mbecker20
17da4bd2fa procedure setconfig on update 2024-06-04 04:46:40 -07:00
mbecker20
b44e57bbf6 improve update date format 2024-06-04 04:36:55 -07:00
mbecker20
6aa5b5faae use same search alg for all command inputs 2024-06-04 04:21:24 -07:00
mbecker20
9565855477 fix fe error 2024-06-04 02:23:53 -07:00
mbecker20
3504c083b4 export all resources toml filter resources by tag 2024-06-04 02:11:07 -07:00
mbecker20
5fdaa9a808 make overflowing tags wrap 2024-06-04 01:45:20 -07:00
mbecker20
ec35b14077 further improve BuildState if cancel. 2024-06-04 01:22:41 -07:00
mbecker20
158f3ad89b fmt update operation with regex everywhere 2024-06-03 16:36:53 -07:00
mbecker20
7257ecbaed version link to docs 2024-06-03 16:12:09 -07:00
mbecker20
a2a94f23ee publish client + cli 1.6.2 2024-06-03 15:03:43 -07:00
mbecker20
03cad5b23b partial config from files first merged onto full config default before diff with remote 2024-06-03 15:01:14 -07:00
mbecker20
2460b5edf7 update log internal scroll 2024-06-03 03:10:35 -07:00
mbecker20
83fdb180aa avoid deployment state change alert involving status Unknown 2024-06-03 03:01:23 -07:00
mbecker20
9b1d32ebdf base64 encode aws user data before send 2024-06-03 00:44:45 -07:00
mbecker20
ea4ae7651c readme 2024-06-02 21:19:46 -07:00
mbecker20
5f6fabd925 1.6.1 pass creds as args cli 2024-06-02 21:17:23 -07:00
mbecker20
38d9495ab1 fix cli readme 2024-06-02 21:03:37 -07:00
mbecker20
46ad5b3953 1.6.0 Improve procedure with multiple stages 2024-06-02 21:00:06 -07:00
mbecker20
e60b817208 improve saving 2024-06-02 20:57:44 -07:00
mbecker20
0ce5248292 improve changes made visibility 2024-06-02 20:54:41 -07:00
mbecker20
050c29f4a3 show when changes made 2024-06-02 20:30:06 -07:00
mbecker20
8580728933 alert config working 2024-06-02 20:15:49 -07:00
mbecker20
3c5868d111 alert refactor 2024-06-02 19:15:13 -07:00
mbecker20
40e1b1ff88 improve build cancel disabled logic to prevent redundant cancels 2024-06-02 17:50:33 -07:00
mbecker20
99641b2e39 improve update toast title 2024-06-02 17:42:15 -07:00
mbecker20
f0e7757eb4 improve validate CancelBuild 2024-06-02 17:39:13 -07:00
mbecker20
f7283b1fc1 update alerter to support type filtering. 2024-06-02 17:16:35 -07:00
mbecker20
771af21eae migrator support migrate permissions 2024-06-02 15:35:36 -07:00
mbecker20
0dda791ec7 fix build not try add_update 2024-06-02 04:56:37 -07:00
mbecker20
bc76b1c07e only push recently viewed if exists 2024-06-02 04:43:01 -07:00
mbecker20
8b537924fb correct execution target passed by name 2024-06-02 04:38:32 -07:00
mbecker20
f5ce3570e4 execute api returns update immediately 2024-06-02 04:14:51 -07:00
mbecker20
f1e51d275c move stages up / down 2024-06-02 02:39:48 -07:00
mbecker20
eaa10d96b5 finish new procedure config 2024-06-02 02:06:01 -07:00
mbecker20
037364068d refresh caches on create / update 2024-06-02 01:07:53 -07:00
mbecker20
2441bc8cbf fix lint 2024-06-02 00:44:47 -07:00
mbecker20
92ac003910 backend for updated procedure schema 2024-06-02 00:36:39 -07:00
mbecker20
693f24763f new deployment / repo from server page 2024-06-01 20:33:38 -07:00
mbecker20
d9d44ceee1 update readme with manual 2024-06-01 19:58:20 -07:00
mbecker20
30ab8ed17b update cli with execute features. 2024-06-01 19:47:46 -07:00
mbecker20
2bf2be54cc bookworm base 2024-05-29 13:09:52 -07:00
mbecker20
b7ea680958 alert table rename Target to Resource 2024-05-29 01:48:32 -07:00
mbecker20
2a56d09f89 improve periphery start command docs 2024-05-29 01:40:45 -07:00
mbecker20
2612f742b2 remove trailing whitespace in error log 2024-05-29 00:22:04 -07:00
mbecker20
29bdf5c71d pretty clone fail message 2024-05-29 00:20:58 -07:00
mbecker20
873d9ea433 builder instance failed reachability adds log that instance will be terminated 2024-05-29 00:16:55 -07:00
mbecker20
717f3afa89 fix build config when not builder 2024-05-28 14:33:52 -07:00
mbecker20
ec31d1af01 fix 2024-05-28 05:35:35 -07:00
mbecker20
9e5c52b9a4 update client version 2024-05-28 05:32:19 -07:00
mbecker20
762873d5be implement ui_write_disabled 2024-05-28 05:30:37 -07:00
mbecker20
67fa512975 core version in topbar 2024-05-28 05:06:39 -07:00
mbecker20
502dd3a4a8 update client version 2024-05-28 04:58:31 -07:00
mbecker20
8c22bdd473 1.5.4 add variable support to monitor cli 2024-05-28 04:57:41 -07:00
mbecker20
ba6801da11 cli much faster 2024-05-28 04:02:34 -07:00
mbecker20
309802093c 1.5.3 add ListFull methods 2024-05-28 03:42:35 -07:00
mbecker20
3d1e3009b3 add ListFull methods 2024-05-28 03:25:50 -07:00
mbecker20
fdc23c2650 improve docs 2024-05-28 03:06:42 -07:00
mbecker20
072ee6834e update dashboard screenshots 2024-05-28 01:44:11 -07:00
mbecker20
bedbf76349 red 2024-05-28 01:40:44 -07:00
mbecker20
e26d1211cc Cloud 2024-05-28 01:38:35 -07:00
mbecker20
0342ee4dd9 Hetzner 2024-05-28 01:38:08 -07:00
mbecker20
669d5c81b4 read. me. 2024-05-28 01:35:49 -07:00
mbecker20
defbab5955 monitor cli 2024-05-28 01:30:57 -07:00
mbecker20
9405295e4a update changelog 2024-05-28 01:23:44 -07:00
mbecker20
28c077ed4c remove hetzner automount 2024-05-26 02:30:56 -07:00
mbecker20
61406c1b00 add back wait for volume 2024-05-26 02:10:34 -07:00
mbecker20
64638730b9 waiting for volumes makes no difference. dont seem to automount 2024-05-26 01:34:14 -07:00
mbecker20
c0942c6d1d remove execute fail message 2024-05-26 00:42:33 -07:00
mbecker20
ff964cd0fe fix updates 2024-05-26 00:38:59 -07:00
mbecker20
d56f632a11 improve unknown server styling 2024-05-26 00:21:43 -07:00
mbecker20
a7f22b6cfb instrument ServerTemplate write api 2024-05-26 00:09:38 -07:00
mbecker20
6053fc1d99 hetzner poll volumes for ready before launch server 2024-05-26 00:04:31 -07:00
mbecker20
573ff1863c 1.5.2 2024-05-25 23:34:14 -07:00
mbecker20
dd4a9b0cb5 add defaults to Hetzner volume 2024-05-25 23:32:16 -07:00
mbecker20
d243cf2da7 all resources search case insensitive 2024-05-25 23:07:54 -07:00
mbecker20
4e06e788ae PushRecentlyViewed and SetLastSeenUpdate should be debug instrument 2024-05-25 21:13:25 -07:00
mbecker20
a0f71f8af5 table search not case sensitive 2024-05-25 21:07:53 -07:00
mbecker20
fcbb75d0c0 update some tracing stuff 2024-05-25 20:46:56 -07:00
mbecker20
0a8419bb13 update client to 1.5.1 2024-05-25 20:38:35 -07:00
mbecker20
40fe76cf27 1.5.1 move routes to /user 2024-05-25 20:36:52 -07:00
mbecker20
5594d3c1d9 add server to repo table / info 2024-05-25 19:45:23 -07:00
mbecker20
b12aeb259f clean up 2024-05-25 18:38:17 -07:00
mbecker20
b121b0ac07 fix remove from recently viewed 2024-05-25 18:34:05 -07:00
mbecker20
a9f1d91b1b update deps 2024-05-25 18:06:49 -07:00
mbecker20
abf48d0243 1.5.0 doc update and add other_data 2024-05-25 17:57:31 -07:00
mbecker20
447690d8bf remove TextUpdateMenu update on enter 2024-05-25 16:58:34 -07:00
mbecker20
a70c0a2697 increase hetzner polling time 2024-05-25 15:49:10 -07:00
mbecker20
0758e6ff81 get ip after instance is running 2024-05-25 15:29:11 -07:00
mbecker20
ea0e059ee1 hetzner response optional parsing 2024-05-25 14:26:26 -07:00
mbecker20
c9e0524794 add repos tab to server page 2024-05-25 14:05:02 -07:00
mbecker20
81ceaf1eae move automount 2024-05-25 13:50:59 -07:00
mbecker20
37c07ff748 only actually add automount if volumes nonempty 2024-05-25 13:43:06 -07:00
mbecker20
62e8943ebe improve client error message 2024-05-25 13:40:05 -07:00
mbecker20
99ccffbc38 configure hetzner template working 2024-05-25 03:09:31 -07:00
mbecker20
84dc29b77f update ts types 2024-05-25 01:37:05 -07:00
mbecker20
81bab4aa50 clean up some unused stuff 2024-05-25 01:35:28 -07:00
mbecker20
9fa2fd0f58 implement hetzner server launch 2024-05-25 01:16:54 -07:00
mbecker20
3745967690 ensure env overrides fully applied 2024-05-24 16:40:15 -07:00
mbecker20
e8cfc13342 implement transparent mode 2024-05-23 02:19:42 -07:00
mbecker20
ec47bb11ee start on hetzner 2024-05-23 01:47:02 -07:00
mbecker20
d008c95853 no destructive update toasts 2024-05-22 04:27:33 -07:00
mbecker20
4986d70506 remove unneded build toasts 2024-05-22 04:23:53 -07:00
mbecker20
1372a5fb39 3s 2024-05-22 04:22:06 -07:00
mbecker20
f54224650f fix update 2024-05-22 04:20:48 -07:00
mbecker20
2eee1459e7 fix ws 2024-05-22 04:02:37 -07:00
mbecker20
5a3fd891c4 huh 2024-05-22 04:00:43 -07:00
mbecker20
ba3f288c2d improve toasts 2024-05-22 03:55:26 -07:00
mbecker20
6d5fd7dc5d improve update table 2024-05-22 03:25:49 -07:00
mbecker20
df3fd7c4e9 update builder ami 2024-05-22 03:15:30 -07:00
mbecker20
395f032ee2 fix update details name 2024-05-22 03:15:23 -07:00
mbecker20
de2bd800c4 update client to 1.4.1 2024-05-22 02:01:40 -07:00
mbecker20
75352a91ff 1.4.1 fix cli - shouldn't send update if no change 2024-05-22 01:59:25 -07:00
mbecker20
9b12270d04 update client published version 2024-05-22 00:29:34 -07:00
mbecker20
7fc378798f fix cli toml patch 2024-05-22 00:28:21 -07:00
mbecker20
3db2c93303 expande resource table status column 2024-05-22 00:25:54 -07:00
mbecker20
150d6562bf improve table with better row sizing 2024-05-21 01:31:46 -07:00
mbecker20
c3b549b051 resource to_list_item should be infallible 2024-05-21 00:46:13 -07:00
mbecker20
931f2bd92d log auto bottom and increase height 2024-05-20 22:56:32 -07:00
mbecker20
6b6324d79c theme toggle indicator 2024-05-20 22:52:25 -07:00
mbecker20
2c65d924f9 dashboard recents 2 cols unless 2xl 2024-05-20 22:46:21 -07:00
mbecker20
dd1fecf190 a little smaller 2024-05-20 22:36:01 -07:00
mbecker20
aa96a37db4 decrease sidebar vertical size 2024-05-20 21:40:14 -07:00
mbecker20
ec9e9638f5 more gap between resources on dashboard 2024-05-20 03:55:58 -07:00
mbecker20
e33019cab8 add prune images to prune loop 2024-05-20 03:55:01 -07:00
360 changed files with 20626 additions and 7660 deletions

465
Cargo.lock generated
View File

@@ -32,16 +32,15 @@ dependencies = [
[[package]]
name = "alerter"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"axum 0.7.5",
"dotenv",
"envy",
"logger",
"monitor_client 1.4.0",
"monitor_client",
"serde",
"termination_signal",
"tokio",
"tracing",
]
@@ -112,9 +111,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.85"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27a4bd113ab6da4cd0f521068a6e2ee1065eab54107266a11835d02c8ec86a37"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "async-stream"
@@ -176,9 +175,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "aws-config"
version = "1.4.0"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40ddbfb5db93d62521f47b3f223da0884a2f02741ff54cb9cda192a0e73ba08b"
checksum = "2368fb843e9eec932f7789d64d0e05850f4a79067188c657e572f1f5a7589df0"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -219,9 +218,9 @@ dependencies = [
[[package]]
name = "aws-runtime"
version = "1.2.2"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75588e7ee5e8496eed939adac2035a6dbab9f7eb2acdd9ab2d31856dab6f3955"
checksum = "9a4a5e448145999d7de17bf44a886900ecb834953408dae8aaf90465ce91c1dd"
dependencies = [
"aws-credential-types",
"aws-sigv4",
@@ -242,9 +241,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.42.0"
version = "1.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c68f8f8eae987440aac23ea70cf69c811d998f1ebce70276da754eb776451433"
checksum = "db7385f189e99cae1fbadbfa7e0ed58cd89f655e3b3884b8386fecd9d988b4d5"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -265,10 +264,32 @@ dependencies = [
]
[[package]]
name = "aws-sdk-sso"
version = "1.25.0"
name = "aws-sdk-ecr"
version = "1.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fef2d9ca2b43051224ed326ed9960a85e277b7d554a2cd0397e57c0553d86e64"
checksum = "23c7734480f3e434de256544f534cda7ac08742808dd485f64ab86782cbb5c48"
dependencies = [
"aws-credential-types",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http",
"aws-smithy-json",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http 0.2.12",
"once_cell",
"regex-lite",
"tracing",
]
[[package]]
name = "aws-sdk-sso"
version = "1.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8aee358b755b2738b3ffb8a5b54ee991b28c8a07483a0ff7d49a58305cc2609"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -288,9 +309,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ssooidc"
version = "1.26.0"
version = "1.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c869d1f5c4ee7437b79c3c1664ddbf7a60231e893960cf82b2b299a5ccf2cc5d"
checksum = "1d5ce026f0ae73e06b20be5932150dd0e9b063417fd7c3acf5ca97018b9cbd64"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -310,9 +331,9 @@ dependencies = [
[[package]]
name = "aws-sdk-sts"
version = "1.25.0"
version = "1.33.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e2b4a632a59e4fab7abf1db0d94a3136ad7871aba46bebd1fdb95c7054afcdb"
checksum = "c820248cb02e4ea83630ad2e43d0721cdbccedba5ac902cd0b6fb84d7271f205"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -333,9 +354,9 @@ dependencies = [
[[package]]
name = "aws-sigv4"
version = "1.2.1"
version = "1.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58b56f1cbe6fd4d0c2573df72868f20ab1c125ca9c9dbce17927a463433a2e57"
checksum = "31eed8d45759b2c5fe7fd304dd70739060e9e0de509209036eabea14d0720cce"
dependencies = [
"aws-credential-types",
"aws-smithy-http",
@@ -406,9 +427,9 @@ dependencies = [
[[package]]
name = "aws-smithy-runtime"
version = "1.5.0"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9ac79e9f3a4d576f3cd4a470a0275b138d9e7b11b1cd514a6858ae0a79dd5bb"
checksum = "db83b08939838d18e33b5dbaf1a0f048f28c10bd28071ab7ce6f245451855414"
dependencies = [
"aws-smithy-async",
"aws-smithy-http",
@@ -420,6 +441,7 @@ dependencies = [
"http 0.2.12",
"http-body 0.4.6",
"http-body 1.0.0",
"httparse",
"hyper 0.14.28",
"hyper-rustls",
"once_cell",
@@ -432,9 +454,9 @@ dependencies = [
[[package]]
name = "aws-smithy-runtime-api"
version = "1.6.0"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04ec42c2f5c0e7796a2848dde4d9f3bf8ce12ccbb3d5aa40c52fa0cdd61a1c47"
checksum = "1b570ea39eb95bd32543f6e4032bce172cb6209b9bc8c83c770d08169e875afc"
dependencies = [
"aws-smithy-async",
"aws-smithy-types",
@@ -449,9 +471,9 @@ dependencies = [
[[package]]
name = "aws-smithy-types"
version = "1.1.9"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf98d97bba6ddaba180f1b1147e202d8fe04940403a95a3f826c790f931bbd1"
checksum = "cfe321a6b21f5d8eabd0ade9c55d3d0335f3c3157fc2b3e87f05f34b539e4df5"
dependencies = [
"base64-simd",
"bytes",
@@ -484,15 +506,14 @@ dependencies = [
[[package]]
name = "aws-types"
version = "1.2.1"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a807d90cd50a969b3d95e4e7ad1491fcae13c6e83948d8728363ecc09d66343a"
checksum = "2009a9733865d0ebf428a314440bbe357cc10d0c16d86a8e15d32e9b47c1e80e"
dependencies = [
"aws-credential-types",
"aws-smithy-async",
"aws-smithy-runtime-api",
"aws-smithy-types",
"http 0.2.12",
"rustc_version 0.4.0",
"tracing",
]
@@ -556,7 +577,7 @@ dependencies = [
"sha1",
"sync_wrapper 1.0.1",
"tokio",
"tokio-tungstenite",
"tokio-tungstenite 0.21.0",
"tower",
"tower-layer",
"tower-service",
@@ -781,9 +802,9 @@ dependencies = [
[[package]]
name = "bson"
version = "2.10.0"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4d43b38e074cc0de2957f10947e376a1d88b9c4dbab340b590800cc1b2e066b2"
checksum = "d8a88e82b9106923b5c4d6edfca9e7db958d4e98a478ec115022e81b9b38e2c8"
dependencies = [
"ahash",
"base64 0.13.1",
@@ -867,9 +888,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.4"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0"
checksum = "5db83dced34638ad474f39f250d7fea9598bdd239eaced1bdf45d597da0f433f"
dependencies = [
"clap_builder",
"clap_derive",
@@ -877,9 +898,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.2"
version = "4.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4"
checksum = "f7e204572485eb3fbf28f871612191521df159bc3e15a9f5064c66dba3a8c05f"
dependencies = [
"anstream",
"anstyle",
@@ -889,9 +910,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.5.4"
version = "4.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64"
checksum = "c780290ccf4fb26629baa7a1081e68ced113f1d3ec302fa5948f1c381ebf06c6"
dependencies = [
"heck 0.5.0",
"proc-macro2",
@@ -921,6 +942,14 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "command"
version = "1.9.0"
dependencies = [
"monitor_client",
"run_command",
]
[[package]]
name = "convert_case"
version = "0.4.0"
@@ -952,15 +981,6 @@ dependencies = [
"libc",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
@@ -1170,9 +1190,19 @@ dependencies = [
[[package]]
name = "derive_variants"
version = "0.1.3"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4140db2d06b71f45b6c3ab1a30db4c024a930b5b87c207ff8e8c8bfc2f13e461"
checksum = "7197273f187b1adc848e4db0487f6bb2d49e97d8d892e2d23a2fa05d07deee1e"
dependencies = [
"derive_variants_derive",
"thiserror",
]
[[package]]
name = "derive_variants_derive"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bceb8b4ad480f8cf02ae4efb42c95add230544b4239d543cbd9f9141d838581"
dependencies = [
"proc-macro2",
"quote",
@@ -1290,6 +1320,13 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "formatting"
version = "1.9.0"
dependencies = [
"serror",
]
[[package]]
name = "funty"
version = "2.0.0"
@@ -1412,6 +1449,18 @@ version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]]
name = "git"
version = "1.9.0"
dependencies = [
"anyhow",
"command",
"formatting",
"monitor_client",
"run_command",
"tracing",
]
[[package]]
name = "glob"
version = "0.3.1"
@@ -1462,6 +1511,15 @@ version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
dependencies = [
"ahash",
]
[[package]]
name = "hashbrown"
version = "0.14.5"
@@ -1948,10 +2006,10 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "logger"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"monitor_client 1.4.0",
"monitor_client",
"opentelemetry",
"opentelemetry-otlp",
"opentelemetry_sdk",
@@ -2017,18 +2075,16 @@ dependencies = [
[[package]]
name = "migrator"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"chrono",
"dotenv",
"envy",
"logger",
"mongo_indexed 0.3.0",
"monitor_client 1.4.0",
"monitor_client",
"mungos",
"serde",
"termination_signal",
"tokio",
"tracing",
]
@@ -2069,19 +2125,6 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "mongo_indexed"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0cd571ac04a8ac4751e6899f404e5d1370ec79f17609ea5a3caa823619b7513"
dependencies = [
"anyhow",
"async-trait",
"mongo_indexed_derive 0.2.2",
"mongodb",
"serde",
]
[[package]]
name = "mongo_indexed"
version = "0.3.0"
@@ -2089,22 +2132,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e837961dc668ca0d6e298066f22c1c9242bdfe7cc473b18f5e3d960f27e6adb"
dependencies = [
"anyhow",
"mongo_indexed_derive 0.3.0",
"mongo_indexed_derive",
"mongodb",
"serde",
]
[[package]]
name = "mongo_indexed_derive"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da28743409e01e8a63877563786b227285423a0f36c3c16e83f6b8226de9990e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "mongo_indexed_derive"
version = "0.3.0"
@@ -2165,13 +2197,14 @@ dependencies = [
[[package]]
name = "monitor_cli"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"clap",
"colored",
"futures",
"monitor_client 1.3.0",
"merge_config_files",
"monitor_client",
"partial_derive2",
"serde",
"serde_json",
@@ -2184,41 +2217,7 @@ dependencies = [
[[package]]
name = "monitor_client"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6349d2caa7828bf9561bf61e2612bfa6541d6ede5c90049756095df74358c3a0"
dependencies = [
"anyhow",
"async_timing_util",
"bollard",
"clap",
"derive_builder",
"derive_default_builder",
"derive_empty_traits",
"derive_variants",
"envy",
"futures",
"mongo_indexed 0.2.2",
"mungos",
"partial_derive2",
"reqwest 0.12.4",
"resolver_api 0.1.9",
"serde",
"serde_json",
"serror",
"strum 0.26.2",
"thiserror",
"tokio",
"tokio-tungstenite",
"tokio-util",
"tracing",
"typeshare",
"uuid",
]
[[package]]
name = "monitor_client"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2231,17 +2230,17 @@ dependencies = [
"derive_variants",
"envy",
"futures",
"mongo_indexed 0.3.0",
"mongo_indexed",
"partial_derive2",
"reqwest 0.12.4",
"resolver_api 1.1.0",
"resolver_api",
"serde",
"serde_json",
"serror",
"strum 0.26.2",
"thiserror",
"tokio",
"tokio-tungstenite",
"tokio-tungstenite 0.23.1",
"tokio-util",
"tracing",
"typeshare",
@@ -2250,42 +2249,50 @@ dependencies = [
[[package]]
name = "monitor_core"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"async_timing_util",
"aws-config",
"aws-sdk-ec2",
"aws-sdk-ecr",
"axum 0.7.5",
"axum-extra",
"base64 0.22.1",
"bcrypt",
"derive_variants",
"dotenv",
"envy",
"formatting",
"futures",
"git",
"hex",
"hmac",
"jwt",
"logger",
"merge_config_files",
"mongo_indexed 0.3.0",
"monitor_client 1.4.0",
"mongo_indexed",
"monitor_client",
"mungos",
"ordered_hash_map",
"parse_csl",
"partial_derive2",
"periphery_client",
"rand",
"reqwest 0.12.4",
"resolver_api 1.1.0",
"resolver_api",
"run_command",
"serde",
"serde_json",
"serror",
"sha2",
"slack_client_rs",
"strum 0.26.2",
"svi",
"termination_signal",
"tokio",
"tokio-util",
"toml",
"toml_pretty",
"tower",
"tower-http",
"tracing",
@@ -2296,7 +2303,7 @@ dependencies = [
[[package]]
name = "monitor_periphery"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"async_timing_util",
@@ -2304,21 +2311,23 @@ dependencies = [
"axum-extra",
"bollard",
"clap",
"command",
"dotenv",
"envy",
"formatting",
"git",
"logger",
"merge_config_files",
"monitor_client 1.4.0",
"monitor_client",
"parse_csl",
"periphery_client",
"resolver_api 1.1.0",
"resolver_api",
"run_command",
"serde",
"serde_json",
"serror",
"svi",
"sysinfo",
"termination_signal",
"tokio",
"tracing",
"uuid",
@@ -2469,9 +2478,9 @@ dependencies = [
[[package]]
name = "opentelemetry"
version = "0.22.0"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "900d57987be3f2aeb70d385fff9b27fb74c5723cc9a52d904d4f9c807a0667bf"
checksum = "1b69a91d4893e713e06f724597ad630f1fa76057a5e1026c0ca67054a9032a76"
dependencies = [
"futures-core",
"futures-sink",
@@ -2479,21 +2488,19 @@ dependencies = [
"once_cell",
"pin-project-lite",
"thiserror",
"urlencoding",
]
[[package]]
name = "opentelemetry-otlp"
version = "0.15.0"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a016b8d9495c639af2145ac22387dcb88e44118e45320d9238fbf4e7889abcb"
checksum = "a94c69209c05319cdf7460c6d4c055ed102be242a0a6245835d7bc42c6ec7f54"
dependencies = [
"async-trait",
"futures-core",
"http 0.2.12",
"opentelemetry",
"opentelemetry-proto",
"opentelemetry-semantic-conventions",
"opentelemetry_sdk",
"prost",
"thiserror",
@@ -2503,9 +2510,9 @@ dependencies = [
[[package]]
name = "opentelemetry-proto"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a8fddc9b68f5b80dae9d6f510b88e02396f006ad48cac349411fbecc80caae4"
checksum = "984806e6cf27f2b49282e2a05e288f30594f3dbc74eb7a6e99422bc48ed78162"
dependencies = [
"opentelemetry",
"opentelemetry_sdk",
@@ -2513,24 +2520,18 @@ dependencies = [
"tonic",
]
[[package]]
name = "opentelemetry-semantic-conventions"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9ab5bd6c42fb9349dcf28af2ba9a0667f697f9bdcca045d39f2cec5543e2910"
[[package]]
name = "opentelemetry_sdk"
version = "0.22.1"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e90c7113be649e31e9a0f8b5ee24ed7a16923b322c3c5ab6367469c049d6b7e"
checksum = "ae312d58eaa90a82d2e627fd86e075cf5230b3f11794e2ed74199ebbe572d4fd"
dependencies = [
"async-trait",
"crossbeam-channel",
"futures-channel",
"futures-executor",
"futures-util",
"glob",
"lazy_static",
"once_cell",
"opentelemetry",
"ordered-float",
@@ -2550,6 +2551,16 @@ dependencies = [
"num-traits",
]
[[package]]
name = "ordered_hash_map"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab0e5f22bf6dd04abd854a8874247813a8fa2c8c1260eba6fbb150270ce7c176"
dependencies = [
"hashbrown 0.13.2",
"serde",
]
[[package]]
name = "outref"
version = "0.5.1"
@@ -2593,18 +2604,18 @@ checksum = "ffa94c2e5674923c67d7f3dfce1279507b191e10eb064881b46ed3e1256e5ca6"
[[package]]
name = "partial_derive2"
version = "0.4.2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a7b915bd76bc306b7ae6f1b5d99b0434e498ab979ecbd5df119db8a00dab972"
checksum = "6b2bd06fda40521c285c8dfc5f6b90208d586328a295e83332b6166c2b2a4241"
dependencies = [
"partial_derive2_derive",
]
[[package]]
name = "partial_derive2_derive"
version = "0.4.2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddaac49c0e65bcb207999d2514b10b43d5f2ec2d0fb47b9d875c9a10536a294e"
checksum = "3a506f66d52e40b2385d7b9f776fd5243d6cff16ba79147f859aa4e27d2d27cc"
dependencies = [
"proc-macro2",
"quote",
@@ -2628,12 +2639,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "periphery_client"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"monitor_client 1.4.0",
"monitor_client",
"reqwest 0.12.4",
"resolver_api 1.1.0",
"resolver_api",
"serde",
"serde_json",
"serror",
@@ -2900,20 +2911,6 @@ dependencies = [
"quick-error",
]
[[package]]
name = "resolver_api"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43981e6bc9a85f1072ffd38bcaca5823e3cd7f24bb675fa5e79736a318f1f998"
dependencies = [
"anyhow",
"async-trait",
"resolver_api_derive 0.1.8",
"serde",
"serde_json",
"thiserror",
]
[[package]]
name = "resolver_api"
version = "1.1.0"
@@ -2921,23 +2918,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4e8831e742662de68dbce8ca6243d1f0a1a5a295e1b3fa9ad996f215d1645b5"
dependencies = [
"anyhow",
"resolver_api_derive 1.1.0",
"resolver_api_derive",
"serde",
"serde_json",
"thiserror",
]
[[package]]
name = "resolver_api_derive"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e0af2b29987c0bff183294b58b5b084f16c8f1b77951c7c72b7c70d66cf91ac"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "resolver_api_derive"
version = "1.1.0"
@@ -3162,9 +3148,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "serde"
version = "1.0.202"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395"
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
dependencies = [
"serde_derive",
]
@@ -3180,9 +3166,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.202"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838"
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
dependencies = [
"proc-macro2",
"quote",
@@ -3284,13 +3270,12 @@ dependencies = [
[[package]]
name = "serror"
version = "0.3.4"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6c430d274ef4964c27e7b338fdfed6d400b18ceac80cba827ddd84055f9eca8"
checksum = "a58297fe0d139a2950d2f474c3f5d0614eecc2b90f3bfe3ee28dca36dfcfd78d"
dependencies = [
"anyhow",
"axum 0.7.5",
"axum-extra",
"serde",
"serde_json",
]
@@ -3337,16 +3322,6 @@ dependencies = [
"lazy_static",
]
[[package]]
name = "signal-hook"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801"
dependencies = [
"libc",
"signal-hook-registry",
]
[[package]]
name = "signal-hook-registry"
version = "1.4.2"
@@ -3356,18 +3331,6 @@ dependencies = [
"libc",
]
[[package]]
name = "signal-hook-tokio"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213241f76fb1e37e27de3b6aa1b068a2c333233b59cca6634f634b80a27ecf1e"
dependencies = [
"futures-core",
"libc",
"signal-hook",
"tokio",
]
[[package]]
name = "slab"
version = "0.4.9"
@@ -3616,28 +3579,15 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "termination_signal"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16e2993a3469a2732b1c07df31a0227f2354500779c175f42af4c21693f1ad8f"
dependencies = [
"anyhow",
"futures",
"signal-hook",
"signal-hook-tokio",
"tokio",
]
[[package]]
name = "tests"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"dotenv",
"envy",
"logger",
"monitor_client 1.4.0",
"monitor_client",
"mungos",
"partial_derive2",
"rand",
@@ -3725,9 +3675,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.37.0"
version = "1.38.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787"
checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a"
dependencies = [
"backtrace",
"bytes",
@@ -3754,9 +3704,9 @@ dependencies = [
[[package]]
name = "tokio-macros"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b"
checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a"
dependencies = [
"proc-macro2",
"quote",
@@ -3803,7 +3753,19 @@ dependencies = [
"futures-util",
"log",
"tokio",
"tungstenite",
"tungstenite 0.21.0",
]
[[package]]
name = "tokio-tungstenite"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd"
dependencies = [
"futures-util",
"log",
"tokio",
"tungstenite 0.23.0",
]
[[package]]
@@ -3822,9 +3784,9 @@ dependencies = [
[[package]]
name = "toml"
version = "0.8.13"
version = "0.8.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba"
checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335"
dependencies = [
"serde",
"serde_spanned",
@@ -3843,9 +3805,9 @@ dependencies = [
[[package]]
name = "toml_edit"
version = "0.22.13"
version = "0.22.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c"
checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38"
dependencies = [
"indexmap 2.2.6",
"serde",
@@ -3854,6 +3816,18 @@ dependencies = [
"winnow",
]
[[package]]
name = "toml_pretty"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd412f79539d72f97ac0ec9905b0178be1be4f255ddc2ae6f1ff3f7b234bb870"
dependencies = [
"ordered_hash_map",
"serde",
"serde_json",
"thiserror",
]
[[package]]
name = "tonic"
version = "0.11.0"
@@ -3984,9 +3958,9 @@ dependencies = [
[[package]]
name = "tracing-opentelemetry"
version = "0.23.0"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9be14ba1bbe4ab79e9229f7f89fab8d120b865859f10527f31c033e599d2284"
checksum = "f68803492bf28ab40aeccaecc7021096bd256baf7ca77c3d425d89b35a7be4e4"
dependencies = [
"js-sys",
"once_cell",
@@ -4097,6 +4071,24 @@ dependencies = [
"utf-8",
]
[[package]]
name = "tungstenite"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8"
dependencies = [
"byteorder",
"bytes",
"data-encoding",
"http 1.1.0",
"httparse",
"log",
"rand",
"sha1",
"thiserror",
"utf-8",
]
[[package]]
name = "typed-builder"
version = "0.10.0"
@@ -4174,12 +4166,11 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "update_logger"
version = "1.4.0"
version = "1.9.0"
dependencies = [
"anyhow",
"logger",
"monitor_client 1.4.0",
"termination_signal",
"monitor_client",
"tokio",
"tracing",
]

View File

@@ -3,38 +3,44 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.4.0"
version = "1.9.0"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/mbecker20/monitor"
homepage = "https://docs.monitor.mogh.tech"
[patch.crates-io]
monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = { path = "client/core/rs" }
monitor_client = "1.9.0"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
logger = { path = "lib/logger" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.3.4", default-features = false }
serror = { version = "0.4.3", default-features = false }
slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
termination_signal = "0.1.3"
async_timing_util = "0.1.14"
partial_derive2 = "0.4.2"
derive_variants = "0.1.3"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.37.0", features = ["full"] }
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
@@ -45,39 +51,41 @@ axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.21.0"
tokio-tungstenite = "0.23.1"
# SER/DE
serde = { version = "1.0.201", features = ["derive"] }
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.116"
toml = "0.8.12"
serde_json = "1.0.117"
toml = "0.8.14"
# ERROR
anyhow = "1.0.83"
thiserror = "1.0.60"
anyhow = "1.0.86"
thiserror = "1.0.61"
# LOGGING
opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"] }
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.18", features = ["json"] }
tracing-opentelemetry = "0.23.0"
opentelemetry-otlp = "0.15.0"
opentelemetry = "0.22.0"
tracing-opentelemetry = "0.24.0"
opentelemetry-otlp = "0.16.0"
opentelemetry = "0.23.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.4", features = ["derive"] }
clap = { version = "4.5.7", features = ["derive"] }
dotenv = "0.15.0"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
rand = "0.8.5"
jwt = "0.16.0"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
sha2 = "0.10.8"
bcrypt = "0.15.1"
rand = "0.8.5"
jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
@@ -85,11 +93,12 @@ bollard = "0.16.1"
sysinfo = "0.30.12"
# CLOUD
aws-config = "1.3.0"
aws-sdk-ec2 = "1.40.0"
aws-config = "1.5.1"
aws-sdk-ec2 = "1.51.1"
aws-sdk-ecr = "1.33.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.3"
colored = "2.1.0"
bson = "2.10.0"
bson = "2.11.0"

View File

@@ -13,8 +13,6 @@ repository.workspace = true
# local
monitor_client.workspace = true
logger.workspace = true
# mogh
termination_signal.workspace = true
# external
tokio.workspace = true
tracing.workspace = true

View File

@@ -9,7 +9,6 @@ use monitor_client::entities::{
alert::Alert, server::stats::SeverityLevel,
};
use serde::Deserialize;
use termination_signal::tokio::immediate_term_handle;
#[derive(Deserialize)]
struct Env {
@@ -57,13 +56,15 @@ async fn app() -> anyhow::Result<()> {
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let term_signal = immediate_term_handle()?;
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
_ = term_signal => {},
res = app => return res?,
_ = term_signal.recv() => {},
}
Ok(())

View File

@@ -14,16 +14,14 @@ path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
monitor_client.workspace = true
[dependencies]
# local client
monitor_client = "1.3.0"
# local
monitor_client.workspace = true
# mogh
partial_derive2.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true
serde_json.workspace = true
futures.workspace = true
tracing.workspace = true

View File

@@ -1,6 +1,6 @@
# Monitor CLI
Monitor CLI is a tool to sync monitor resources and execute file defined procedures.
Monitor CLI is a tool to sync monitor resources and execute operations.
## Install
@@ -10,6 +10,8 @@ cargo install monitor_cli
## Usage
### Credentials
Configure a file `~/.config/monitor/creds.toml` file with contents:
```toml
url = "https://your.monitor.address"
@@ -18,8 +20,13 @@ secret = "YOUR-API-SECRET"
```
Note. You can specify a different creds file by using `--creds ./other/path.toml`.
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
With your creds in place, you can run syncs:
```sh
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
```
### Run Syncs
```sh
## Sync resources in a single file
@@ -32,9 +39,54 @@ monitor sync ./resources
monitor sync
```
And executions:
#### Manual
```md
Runs syncs on resource files
Usage: monitor sync [OPTIONS] [PATH]
Arguments:
[PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources]
Options:
--delete Will delete any resources that aren't included in the resource files
-h, --help Print help
```
### Run Executions
```sh
## Execute a TOML defined procedure
monitor exec ./execution/execution.toml
```
# Triggers an example build
monitor execute run-build test_build
```
#### Manual
```md
Runs an execution
Usage: monitor execute <COMMAND>
Commands:
none The "null" execution. Does nothing
run-procedure Runs the target procedure. Response: [Update]
run-build Runs the target build. Response: [Update]
deploy Deploys the container for the target deployment. Response: [Update]
start-container Starts the container for the target deployment. Response: [Update]
stop-container Stops the container for the target deployment. Response: [Update]
stop-all-containers Stops all deployments on the target server. Response: [Update]
remove-container Stops and removes the container for the target deployment. Reponse: [Update]
clone-repo Clones the target repo. Response: [Update]
pull-repo Pulls the target repo. Response: [Update]
prune-networks Prunes the docker networks on the target server. Response: [Update]
prune-images Prunes the docker images on the target server. Response: [Update]
prune-containers Prunes the docker containers on the target server. Response: [Update]
help Print this message or the help of the given subcommand(s)
Options:
-h, --help Print help
```
### --yes
You can use `--yes` to avoid any human prompt to continue, for use in automated environments.

View File

@@ -1,83 +0,0 @@
[[build]]
name = "monitor_core"
description = "Public monitor core build"
tags = ["monitor"]
[build.config]
builder_id = "mogh-builder"
repo = "mbecker20/monitor"
branch = "main"
docker_account = "mbecker2020"
build_path = "."
dockerfile_path = "bin/core/Dockerfile"
[[build]]
name = "monitor_core_dev"
description = ""
tags = ["monitor", "dev"]
[build.config]
builder_id = "mogh-builder"
repo = "mbecker20/monitor"
branch = "main"
docker_account = "mbecker2020"
build_path = "."
dockerfile_path = "bin/core/Dockerfile"
[[build]]
name = "monitor_frontend"
description = "standalone hosted frontend for monitor.mogh.tech"
tags = ["monitor", "frontend"]
[build.config]
builder_id = "mogh-builder"
repo = "mbecker20/monitor"
branch = "main"
docker_account = "mbecker2020"
build_path = "."
dockerfile_path = "frontend/Dockerfile"
[[build.config.build_args]]
variable = "VITE_MONITOR_HOST"
value = "https://monitor.api.mogh.tech"
[[build]]
name = "monitor_frontend_dev"
description = "standalone hosted frontend for monitor-dev.mogh.tech"
tags = ["monitor", "frontend"]
[build.config]
builder_id = "mogh-builder"
repo = "mbecker20/monitor"
branch = "main"
docker_account = "mbecker2020"
build_path = "."
dockerfile_path = "frontend/Dockerfile"
[[build.config.build_args]]
variable = "VITE_MONITOR_HOST"
value = "https://monitor-dev.api.mogh.tech"
## BUILDER
[[builder]]
name = "mogh-builder"
description = ""
tags = []
[builder.config]
type = "Aws"
[builder.config.params]
region = "us-east-2"
instance_type = "c5.2xlarge"
volume_gb = 20
port = 8120
ami_id = "ami-0005a05fa63a080ab"
subnet_id = "subnet-02ae5ad480eacc4bc"
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
key_pair_name = "mogh-key"
assign_public_ip = true
use_public_ip = false
github_accounts = []
docker_accounts = []

View File

@@ -1,213 +0,0 @@
## MONITOR PROXY
[[deployment]]
name = "monitor-proxy"
description = "An NGINX proxy for mogh.tech"
tags = ["monitor"]
config.server_id = "monitor-01"
config.network = "host"
config.restart = "on-failure"
config.image.type = "Image"
config.image.params.image = "jc21/nginx-proxy-manager"
[[deployment.config.volumes]]
local = "/data/nginx/data"
container = "/data"
[[deployment.config.volumes]]
local = "/data/nginx/letsencrypt"
container = "/etc/letsencrypt"
## MONITOR MONGO
[[deployment]]
name = "monitor-mongo"
description = ""
tags = ["monitor"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "no"
[deployment.config.image]
type = "Image"
params.image = "mongo"
## MONITOR CORE
[[deployment]]
name = "monitor-core"
description = ""
tags = ["monitor"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "no"
[deployment.config.image]
type = "Image"
params.image = "mbecker2020/monitor_core"
## GRAFANA
[[deployment]]
name = "grafana"
description = ""
tags = ["logging"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "unless-stopped"
extra_args = ["--user root"]
[deployment.config.image]
type = "Image"
params.image = "grafana/grafana"
[[deployment.config.volumes]]
local = "/data/grafana"
container = "/var/lib/grafana"
[[deployment.config.environment]]
variable = "GF_SERVER_HTTP_PORT"
value = "3080"
[[deployment.config.labels]]
variable = "vector"
value = "key-value"
## LOKI
[[deployment]]
name = "loki"
description = ""
tags = ["logging"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "unless-stopped"
extra_args = ["--user root"]
[deployment.config.image]
type = "Image"
params.image = "grafana/loki"
[[deployment.config.volumes]]
local = "/data/loki"
container = "/loki"
[[deployment]]
name = "tempo"
description = ""
tags = ["logging"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "unless-stopped"
command = "-server.http-listen-port=3200 -server.grpc-listen-port=9096 --storage.trace.backend=local --storage.trace.local.path=/tmp/tempo/traces --storage.trace.wal.path=/tmp/tempo/wal"
extra_args = ["--user root"]
[deployment.config.image]
type = "Image"
params.image = "grafana/tempo"
[[deployment.config.volumes]]
local = "/data/tempo"
container = "/tmp/tempo"
[[deployment.config.labels]]
variable = "vector"
value = "key-value"
## VECTOR
[[deployment]]
name = "vector"
description = ""
tags = ["logging"]
[deployment.config]
server_id = "monitor-01"
network = "host"
restart = "unless-stopped"
command = "--config /etc/vector/*.toml"
extra_args = ["--user root"]
[deployment.config.image]
type = "Image"
params.image = "timberio/vector:latest-debian"
[[deployment.config.volumes]]
local = "/home/ubuntu/.config/vector"
container = "/etc/vector"
[[deployment.config.volumes]]
local = "/data/vector"
container = "/var/lib/vector"
[[deployment.config.volumes]]
local = "/var/run/docker.sock"
container = "/var/run/docker.sock"
[[deployment.config.labels]]
variable = "vector"
value = "key-value"
## MONITOR CORE DEV
[[deployment]]
name = "monitor-core-dev"
description = ""
tags = ["monitor", "dev"]
[deployment.config]
server_id = "monitor-01"
redeploy_on_build = true
network = "host"
restart = "no"
[deployment.config.image]
type = "Build"
params.build_id = "monitor_core"
[[deployment.config.volumes]]
local = "/home/ubuntu/.config/monitor/dev.core.config.toml"
container = "/config/config.toml"
[[deployment.config.volumes]]
local = "/data/repos/monitor-dev-frontend/frontend/dist"
container = "/frontend"
[[deployment.config.labels]]
variable = "vector"
value = "rust"
## MONITOR FRONTEND
[[deployment]]
name = "monitor-frontend"
description = ""
tags = ["monitor", "frontend"]
[deployment.config]
server_id = "monitor-01"
redeploy_on_build = true
network = "host"
restart = "unless-stopped"
image.type = "Build"
image.params.build = "monitor_frontend"
## MONITOR DEV FRONTEND
[[deployment]]
name = "monitor-dev-frontend"
description = ""
tags = ["monitor", "dev", "frontend"]
[deployment.config]
server_id = "monitor-01"
redeploy_on_build = true
network = "host"
restart = "unless-stopped"
image.type = "Build"
image.params.build = "monitor_frontend_dev"
[[deployment.config.environment]]
variable = "PORT"
value = "4175"

View File

@@ -1,8 +0,0 @@
[[procedure]]
name = "test-procedure"
description = ""
tags = []
[procedure.config]
procedure_type = "Sequence"
executions = []

View File

@@ -1,37 +0,0 @@
# [[repo]]
# name = "monitor-dev-frontend"
# description = "Used as frontend for monitor-core-dev"
# tags = ["monitor", "dev"]
# [repo.config]
# server_id = "monitor-01"
# repo = "mbecker20/monitor"
# branch = "main"
# github_account = ""
# [repo.config.on_clone]
# path = ""
# command = ""
# [repo.config.on_pull]
# path = "frontend"
# command = "sh on_pull.sh"
[[repo]]
name = "monitor-periphery"
description = ""
tags = ["monitor"]
[repo.config]
server_id = "monitor-01"
repo = "mbecker20/monitor"
branch = "main"
github_account = ""
[repo.config.on_clone]
path = ""
command = ""
[repo.config.on_pull]
path = "."
command = "/root/.cargo/bin/cargo build -p monitor_periphery --release && cp ./target/release/periphery /home/ubuntu/periphery"

View File

@@ -1,51 +0,0 @@
[[server]]
name = "monitor-01"
description = ""
tags = ["monitor"]
[server.config]
address = "http://localhost:8120"
enabled = true
stats_monitoring = true
auto_prune = true
send_unreachable_alerts = true
send_cpu_alerts = true
send_mem_alerts = true
send_disk_alerts = true
region = "us-east-2"
## TEMPLATE
[[server_template]]
name = "mogh-template"
description = ""
tags = []
[server_template.config]
type = "Aws"
[server_template.config.params]
region = "us-east-2"
instance_type = "t3.medium"
ami_id = "ami-0005a05fa63a080ab"
subnet_id = "subnet-02ae5ad480eacc4bc"
key_pair_name = "mogh-key"
assign_public_ip = true
use_public_ip = false
port = 8120
user_data = ""
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
[[server_template.config.params.volumes]]
device_name = "/dev/sda1"
size_gb = 20
volume_type = "gp2"
iops = 0
throughput = 0
[[server_template.config.params.volumes]]
device_name = "/dev/sdb"
size_gb = 10
volume_type = "gp3"
iops = 0
throughput = 0

66
bin/cli/src/args.rs Normal file
View File

@@ -0,0 +1,66 @@
use clap::{Parser, Subcommand};
use monitor_client::api::execute::Execution;
use serde::Deserialize;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct CliArgs {
/// Sync or Exec
#[command(subcommand)]
pub command: Command,
/// The path to a creds file.
///
/// Note: If each of `url`, `key` and `secret` are passed,
/// no file is required at this path.
#[arg(long, default_value_t = default_creds())]
pub creds: String,
/// Pass url in args instead of creds file
#[arg(long)]
pub url: Option<String>,
/// Pass api key in args instead of creds file
#[arg(long)]
pub key: Option<String>,
/// Pass api secret in args instead of creds file
#[arg(long)]
pub secret: Option<String>,
/// Always continue on user confirmation prompts.
#[arg(long, short, default_value_t = false)]
pub yes: bool,
}
fn default_creds() -> String {
let home = std::env::var("HOME")
.expect("no HOME env var. cannot get default config path.");
format!("{home}/.config/monitor/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Runs syncs on resource files
Sync {
/// The path of the resource folder / file
/// Folder paths will recursively incorporate all the resources it finds under the folder
#[arg(default_value_t = String::from("./resources"))]
path: String,
/// Will delete any resources that aren't included in the resource files.
#[arg(long, default_value_t = false)]
delete: bool,
},
/// Runs an execution
Execute {
#[command(subcommand)]
execution: Execution,
},
}
#[derive(Debug, Deserialize)]
pub struct CredsFile {
pub url: String,
pub key: String,
pub secret: String,
}

130
bin/cli/src/exec.rs Normal file
View File

@@ -0,0 +1,130 @@
use std::time::Duration;
use colored::Colorize;
use monitor_client::api::execute::Execution;
use crate::{
helpers::wait_for_enter,
state::{cli_args, monitor_client},
};
pub async fn run(execution: Execution) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
tokio::time::sleep(Duration::from_secs(3)).await;
println!("Finished doing nothing. Exiting...");
std::process::exit(0);
}
println!("\n{}: Execution", "Mode".dimmed());
match &execution {
Execution::None(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneNetworks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneImages(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
}
if !cli_args().yes {
wait_for_enter("run execution")?;
}
info!("Running Execution...");
let res = match execution {
Execution::RunProcedure(request) => {
monitor_client().execute(request).await
}
Execution::RunBuild(request) => {
monitor_client().execute(request).await
}
Execution::Deploy(request) => {
monitor_client().execute(request).await
}
Execution::StartContainer(request) => {
monitor_client().execute(request).await
}
Execution::StopContainer(request) => {
monitor_client().execute(request).await
}
Execution::StopAllContainers(request) => {
monitor_client().execute(request).await
}
Execution::RemoveContainer(request) => {
monitor_client().execute(request).await
}
Execution::CloneRepo(request) => {
monitor_client().execute(request).await
}
Execution::PullRepo(request) => {
monitor_client().execute(request).await
}
Execution::PruneNetworks(request) => {
monitor_client().execute(request).await
}
Execution::PruneImages(request) => {
monitor_client().execute(request).await
}
Execution::PruneContainers(request) => {
monitor_client().execute(request).await
}
Execution::RunSync(request) => {
monitor_client().execute(request).await
}
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);
tokio::time::sleep(duration).await;
println!("Finished sleeping!");
std::process::exit(0)
}
Execution::None(_) => unreachable!(),
};
match res {
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}
Ok(())
}

View File

@@ -1,189 +0,0 @@
use std::path::Path;
use anyhow::{anyhow, Context};
use futures::future::join_all;
use monitor_client::api::execute;
use serde::Deserialize;
use strum::Display;
use crate::monitor_client;
pub async fn run_execution(path: &Path) -> anyhow::Result<()> {
let ExecutionFile { name, stages } = crate::parse_toml_file(path)?;
info!("EXECUTION: {name}");
info!("path: {path:?}");
println!("{stages:#?}");
crate::wait_for_enter("EXECUTE")?;
run_stages(stages)
.await
.context("failed during a stage. terminating run.")?;
info!("finished successfully ✅");
Ok(())
}
/// Specifies sequence of stages (build / deploy) on resources
#[derive(Debug, Clone, Deserialize)]
pub struct ExecutionFile {
pub name: String,
#[serde(rename = "stage")]
pub stages: Vec<Stage>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Stage {
pub name: String,
pub action: ExecutionType,
/// resource names
pub targets: Vec<String>,
}
#[derive(Debug, Clone, Copy, Deserialize, Display)]
#[serde(rename_all = "snake_case")]
#[strum(serialize_all = "snake_case")]
pub enum ExecutionType {
Build,
Deploy,
StartContainer,
StopContainer,
DestroyContainer,
}
pub async fn run_stages(stages: Vec<Stage>) -> anyhow::Result<()> {
for Stage {
name,
action,
targets,
} in stages
{
info!("running {action} stage: {name}... ⏳");
match action {
ExecutionType::Build => {
trigger_builds_in_parallel(&targets).await?;
}
ExecutionType::Deploy => {
redeploy_deployments_in_parallel(&targets).await?;
}
ExecutionType::StartContainer => {
start_containers_in_parallel(&targets).await?
}
ExecutionType::StopContainer => {
stop_containers_in_parallel(&targets).await?
}
ExecutionType::DestroyContainer => {
destroy_containers_in_parallel(&targets).await?;
}
}
info!("finished {action} stage: {name} ✅");
}
Ok(())
}
async fn redeploy_deployments_in_parallel(
deployments: &[String],
) -> anyhow::Result<()> {
let futes = deployments.iter().map(|deployment| async move {
monitor_client()
.execute(execute::Deploy { deployment: deployment.to_string(), stop_signal: None, stop_time: None })
.await
.with_context(|| format!("failed to deploy {deployment}"))
.and_then(|update| {
if update.success {
Ok(())
} else {
Err(anyhow!(
"failed to deploy {deployment}. operation unsuccessful, see monitor update"
))
}
})
});
join_all(futes).await.into_iter().collect()
}
async fn start_containers_in_parallel(
deployments: &[String],
) -> anyhow::Result<()> {
let futes = deployments.iter().map(|deployment| async move {
monitor_client()
.execute(execute::StartContainer { deployment: deployment.to_string() })
.await
.with_context(|| format!("failed to start container {deployment}"))
.and_then(|update| {
if update.success {
Ok(())
} else {
Err(anyhow!(
"failed to start container {deployment}. operation unsuccessful, see monitor update"
))
}
})
});
join_all(futes).await.into_iter().collect()
}
async fn stop_containers_in_parallel(
deployments: &[String],
) -> anyhow::Result<()> {
let futes = deployments.iter().map(|deployment| async move {
monitor_client()
.execute(execute::StopContainer { deployment: deployment.to_string(), signal: None, time: None })
.await
.with_context(|| format!("failed to stop container {deployment}"))
.and_then(|update| {
if update.success {
Ok(())
} else {
Err(anyhow!(
"failed to stop container {deployment}. operation unsuccessful, see monitor update"
))
}
})
});
join_all(futes).await.into_iter().collect()
}
async fn destroy_containers_in_parallel(
deployments: &[String],
) -> anyhow::Result<()> {
let futes = deployments.iter().map(|deployment| async move {
monitor_client()
.execute(execute::RemoveContainer { deployment: deployment.to_string(), signal: None, time: None })
.await
.with_context(|| format!("failed to destroy container {deployment}"))
.and_then(|update| {
if update.success {
Ok(())
} else {
Err(anyhow!(
"failed to destroy container {deployment}. operation unsuccessful, see monitor update"
))
}
})
});
join_all(futes).await.into_iter().collect()
}
async fn trigger_builds_in_parallel(
builds: &[String],
) -> anyhow::Result<()> {
let futes = builds.iter().map(|build| async move {
monitor_client()
.execute(execute::RunBuild { build: build.to_string() })
.await
.with_context(|| format!("failed to build {build}"))
.and_then(|update| {
if update.success {
Ok(())
} else {
Err(anyhow!(
"failed to build {build}. operation unsuccessful, see monitor update"
))
}
})
});
join_all(futes).await.into_iter().collect()
}

17
bin/cli/src/helpers.rs Normal file
View File

@@ -0,0 +1,17 @@
use std::io::Read;
use anyhow::Context;
use colored::Colorize;
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}

View File

@@ -1,113 +1,32 @@
#[macro_use]
extern crate tracing;
use std::{io::Read, path::PathBuf, str::FromStr, sync::OnceLock};
use anyhow::Context;
use clap::{Parser, Subcommand};
use colored::Colorize;
use monitor_client::{api::read, MonitorClient};
use serde::{de::DeserializeOwned, Deserialize};
use monitor_client::api::read::GetVersion;
mod execution;
mod args;
mod exec;
mod helpers;
mod maps;
mod state;
mod sync;
fn cli_args() -> &'static CliArgs {
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(CliArgs::parse)
}
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct CliArgs {
/// Sync or Exec
#[command(subcommand)]
command: Command,
/// The path to a creds file.
#[arg(long, default_value_t = default_creds())]
creds: String,
/// Log less (just resource names).
#[arg(long, default_value_t = false)]
quiet: bool,
}
fn default_creds() -> String {
let home = std::env::var("HOME")
.expect("no HOME env var. cannot get default config path.");
format!("{home}/.config/monitor/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
enum Command {
/// Runs syncs on resource files
Sync {
/// The path of the resource folder / file
/// Folder paths will recursively incorporate all the resources it finds under the folder
#[arg(default_value_t = String::from("./resources"))]
path: String,
},
/// Runs execution files
Exec {
/// The path of the exec file
path: PathBuf,
},
}
#[derive(Debug, Deserialize)]
struct CredsFile {
url: String,
key: String,
secret: String,
}
fn monitor_client() -> &'static MonitorClient {
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
MONITOR_CLIENT.get_or_init(|| {
let CredsFile { url, key, secret } =
parse_toml_file(&cli_args().creds)
.expect("failed to parse monitor credentials");
futures::executor::block_on(MonitorClient::new(url, key, secret))
.expect("failed to initialize monitor client")
})
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt().with_target(false).init();
let version =
monitor_client().read(read::GetVersion {}).await?.version;
state::monitor_client().read(GetVersion {}).await?.version;
info!("monitor version: {}", version.to_string().blue().bold());
match &cli_args().command {
Command::Exec { path } => execution::run_execution(path).await?,
Command::Sync { path } => {
sync::run_sync(&PathBuf::from_str(path)?).await?
match &state::cli_args().command {
args::Command::Sync { path, delete } => {
sync::run(path, *delete).await?
}
args::Command::Execute { execution } => {
exec::run(execution.to_owned()).await?
}
}
Ok(())
}
fn parse_toml_file<T: DeserializeOwned>(
path: impl AsRef<std::path::Path>,
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents).context("failed to parse toml contents")
}
fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}

View File

@@ -3,22 +3,22 @@ use std::{collections::HashMap, sync::OnceLock};
use monitor_client::{
api::read,
entities::{
alerter::AlerterListItem, build::BuildListItem,
builder::BuilderListItem, deployment::DeploymentListItem,
procedure::ProcedureListItem, repo::RepoListItem,
server::ServerListItem, server_template::ServerTemplateListItem,
tag::Tag, user::User, user_group::UserGroup,
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup,
variable::Variable,
},
};
use crate::monitor_client;
use crate::state::monitor_client;
pub fn name_to_build() -> &'static HashMap<String, BuildListItem> {
static NAME_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
pub fn name_to_build() -> &'static HashMap<String, Build> {
static NAME_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
NAME_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListBuilds::default()),
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
@@ -27,12 +27,12 @@ pub fn name_to_build() -> &'static HashMap<String, BuildListItem> {
})
}
pub fn id_to_build() -> &'static HashMap<String, BuildListItem> {
static ID_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
pub fn id_to_build() -> &'static HashMap<String, Build> {
static ID_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
ID_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListBuilds::default()),
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
@@ -41,14 +41,12 @@ pub fn id_to_build() -> &'static HashMap<String, BuildListItem> {
})
}
pub fn name_to_deployment(
) -> &'static HashMap<String, DeploymentListItem> {
static NAME_TO_DEPLOYMENT: OnceLock<
HashMap<String, DeploymentListItem>,
> = OnceLock::new();
pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
static NAME_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
NAME_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListDeployments::default()),
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
@@ -57,14 +55,12 @@ pub fn name_to_deployment(
})
}
pub fn id_to_deployment(
) -> &'static HashMap<String, DeploymentListItem> {
static ID_TO_DEPLOYMENT: OnceLock<
HashMap<String, DeploymentListItem>,
> = OnceLock::new();
pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
static ID_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
ID_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListDeployments::default()),
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
@@ -73,12 +69,12 @@ pub fn id_to_deployment(
})
}
pub fn name_to_server() -> &'static HashMap<String, ServerListItem> {
static NAME_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
pub fn name_to_server() -> &'static HashMap<String, Server> {
static NAME_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
NAME_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListServers::default()),
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
@@ -87,12 +83,12 @@ pub fn name_to_server() -> &'static HashMap<String, ServerListItem> {
})
}
pub fn id_to_server() -> &'static HashMap<String, ServerListItem> {
static ID_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
pub fn id_to_server() -> &'static HashMap<String, Server> {
static ID_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
ID_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListServers::default()),
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
@@ -101,13 +97,12 @@ pub fn id_to_server() -> &'static HashMap<String, ServerListItem> {
})
}
pub fn name_to_builder() -> &'static HashMap<String, BuilderListItem>
{
static NAME_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
pub fn name_to_builder() -> &'static HashMap<String, Builder> {
static NAME_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
NAME_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListBuilders::default()),
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
@@ -116,12 +111,12 @@ pub fn name_to_builder() -> &'static HashMap<String, BuilderListItem>
})
}
pub fn id_to_builder() -> &'static HashMap<String, BuilderListItem> {
static ID_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
pub fn id_to_builder() -> &'static HashMap<String, Builder> {
static ID_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
ID_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListBuilders::default()),
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
@@ -130,13 +125,12 @@ pub fn id_to_builder() -> &'static HashMap<String, BuilderListItem> {
})
}
pub fn name_to_alerter() -> &'static HashMap<String, AlerterListItem>
{
static NAME_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListAlerters::default()),
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
@@ -145,12 +139,12 @@ pub fn name_to_alerter() -> &'static HashMap<String, AlerterListItem>
})
}
pub fn id_to_alerter() -> &'static HashMap<String, AlerterListItem> {
static ID_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListAlerters::default()),
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
@@ -159,12 +153,12 @@ pub fn id_to_alerter() -> &'static HashMap<String, AlerterListItem> {
})
}
pub fn name_to_repo() -> &'static HashMap<String, RepoListItem> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
pub fn name_to_repo() -> &'static HashMap<String, Repo> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListRepos::default()),
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
@@ -173,12 +167,12 @@ pub fn name_to_repo() -> &'static HashMap<String, RepoListItem> {
})
}
pub fn id_to_repo() -> &'static HashMap<String, RepoListItem> {
static ID_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
pub fn id_to_repo() -> &'static HashMap<String, Repo> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListRepos::default()),
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
@@ -187,14 +181,12 @@ pub fn id_to_repo() -> &'static HashMap<String, RepoListItem> {
})
}
pub fn name_to_procedure(
) -> &'static HashMap<String, ProcedureListItem> {
static NAME_TO_PROCEDURE: OnceLock<
HashMap<String, ProcedureListItem>,
> = OnceLock::new();
pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
static NAME_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
NAME_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListProcedures::default()),
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
@@ -203,14 +195,12 @@ pub fn name_to_procedure(
})
}
pub fn id_to_procedure() -> &'static HashMap<String, ProcedureListItem>
{
static ID_TO_PROCEDURE: OnceLock<
HashMap<String, ProcedureListItem>,
> = OnceLock::new();
pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
static ID_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
ID_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListProcedures::default()),
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
@@ -220,13 +210,13 @@ pub fn id_to_procedure() -> &'static HashMap<String, ProcedureListItem>
}
pub fn name_to_server_template(
) -> &'static HashMap<String, ServerTemplateListItem> {
) -> &'static HashMap<String, ServerTemplate> {
static NAME_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplateListItem>,
HashMap<String, ServerTemplate>,
> = OnceLock::new();
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListServerTemplates::default()),
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
@@ -236,13 +226,13 @@ pub fn name_to_server_template(
}
pub fn id_to_server_template(
) -> &'static HashMap<String, ServerTemplateListItem> {
) -> &'static HashMap<String, ServerTemplate> {
static ID_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplateListItem>,
HashMap<String, ServerTemplate>,
> = OnceLock::new();
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListServerTemplates::default()),
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
@@ -251,6 +241,36 @@ pub fn id_to_server_template(
})
}
pub fn name_to_resource_sync(
) -> &'static HashMap<String, ResourceSync> {
static NAME_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
NAME_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.name.clone(), sync))
.collect()
})
}
pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
{
static ID_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
ID_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.id.clone(), sync))
.collect()
})
}
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
OnceLock::new();
@@ -265,6 +285,21 @@ pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
})
}
pub fn name_to_variable() -> &'static HashMap<String, Variable> {
static NAME_TO_VARIABLE: OnceLock<HashMap<String, Variable>> =
OnceLock::new();
NAME_TO_VARIABLE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListVariables::default()),
)
.expect("failed to get user groups from monitor")
.variables
.into_iter()
.map(|variable| (variable.name.clone(), variable))
.collect()
})
}
pub fn id_to_user() -> &'static HashMap<String, User> {
static ID_TO_USER: OnceLock<HashMap<String, User>> =
OnceLock::new();

46
bin/cli/src/state.rs Normal file
View File

@@ -0,0 +1,46 @@
use std::sync::OnceLock;
use clap::Parser;
use merge_config_files::parse_config_file;
use monitor_client::MonitorClient;
pub fn cli_args() -> &'static crate::args::CliArgs {
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
}
pub fn monitor_client() -> &'static MonitorClient {
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
MONITOR_CLIENT.get_or_init(|| {
let args = cli_args();
let crate::args::CredsFile { url, key, secret } =
match (&args.url, &args.key, &args.secret) {
(Some(url), Some(key), Some(secret)) => {
crate::args::CredsFile {
url: url.clone(),
key: key.clone(),
secret: secret.clone(),
}
}
(url, key, secret) => {
let mut creds: crate::args::CredsFile =
parse_config_file(cli_args().creds.as_str())
.expect("failed to parse monitor credentials");
if let Some(url) = url {
creds.url.clone_from(url);
}
if let Some(key) = key {
creds.key.clone_from(key);
}
if let Some(secret) = secret {
creds.secret.clone_from(secret);
}
creds
}
};
futures::executor::block_on(MonitorClient::new(url, key, secret))
.expect("failed to initialize monitor client")
})
}

View File

@@ -1,12 +1,19 @@
use std::{fs, path::Path};
use std::{
fs,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, Context};
use colored::Colorize;
use monitor_client::entities::toml::ResourcesToml;
use serde::de::DeserializeOwned;
pub fn read_resources(path: &Path) -> anyhow::Result<ResourcesToml> {
pub fn read_resources(path: &str) -> anyhow::Result<ResourcesToml> {
let mut res = ResourcesToml::default();
read_resources_recursive(path, &mut res)?;
let path =
PathBuf::from_str(path).context("invalid resources path")?;
read_resources_recursive(&path, &mut res)?;
Ok(res)
}
@@ -24,7 +31,7 @@ fn read_resources_recursive(
{
return Ok(());
}
let more = match crate::parse_toml_file::<ResourcesToml>(path) {
let more = match parse_toml_file::<ResourcesToml>(path) {
Ok(res) => res,
Err(e) => {
warn!("failed to parse {:?}. skipping file | {e:#}", path);
@@ -36,15 +43,17 @@ fn read_resources_recursive(
"adding resources".green().bold(),
path.display().to_string().blue().bold()
);
resources.server_templates.extend(more.server_templates);
resources.servers.extend(more.servers);
resources.builds.extend(more.builds);
resources.deployments.extend(more.deployments);
resources.builders.extend(more.builders);
resources.builds.extend(more.builds);
resources.repos.extend(more.repos);
resources.alerters.extend(more.alerters);
resources.procedures.extend(more.procedures);
resources.builders.extend(more.builders);
resources.alerters.extend(more.alerters);
resources.server_templates.extend(more.server_templates);
resources.resource_syncs.extend(more.resource_syncs);
resources.user_groups.extend(more.user_groups);
resources.variables.extend(more.variables);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
@@ -61,3 +70,11 @@ fn read_resources_recursive(
Err(anyhow!("resources path is neither file nor directory"))
}
}
fn parse_toml_file<T: DeserializeOwned>(
path: impl AsRef<std::path::Path>,
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents).context("failed to parse toml contents")
}

View File

@@ -1,96 +1,174 @@
use std::path::Path;
use colored::Colorize;
use monitor_client::entities::{
alerter::Alerter, build::Build, builder::Builder,
self, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
};
use crate::{sync::resources::ResourceSync, wait_for_enter};
use crate::{helpers::wait_for_enter, state::cli_args};
mod file;
mod resource;
mod resources;
mod user_group;
mod variables;
pub async fn run_sync(path: &Path) -> anyhow::Result<()> {
info!(
"resources path: {}",
path.display().to_string().blue().bold()
);
use resource::ResourceSync;
pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> {
info!("resources path: {}", path.blue().bold());
if delete {
warn!("Delete mode {}", "enabled".bold());
}
let resources = file::read_resources(path)?;
info!("computing sync actions...");
let (server_template_creates, server_template_updates) =
ServerTemplate::get_updates(resources.server_templates).await?;
let (server_creates, server_updates) =
Server::get_updates(resources.servers).await?;
let (deployment_creates, deployment_updates) =
Deployment::get_updates(resources.deployments).await?;
let (build_creates, build_updates) =
Build::get_updates(resources.builds).await?;
let (builder_creates, builder_updates) =
Builder::get_updates(resources.builders).await?;
let (alerter_creates, alerter_updates) =
Alerter::get_updates(resources.alerters).await?;
let (repo_creates, repo_updates) =
Repo::get_updates(resources.repos).await?;
let (procedure_creates, procedure_updates) =
Procedure::get_updates(resources.procedures).await?;
let (user_group_creates, user_group_updates) =
user_group::get_updates(resources.user_groups).await?;
let (server_creates, server_updates, server_deletes) =
resource::get_updates::<Server>(resources.servers, delete)?;
let (deployment_creates, deployment_updates, deployment_deletes) =
resource::get_updates::<Deployment>(
resources.deployments,
delete,
)?;
let (build_creates, build_updates, build_deletes) =
resource::get_updates::<Build>(resources.builds, delete)?;
let (repo_creates, repo_updates, repo_deletes) =
resource::get_updates::<Repo>(resources.repos, delete)?;
let (procedure_creates, procedure_updates, procedure_deletes) =
resource::get_updates::<Procedure>(resources.procedures, delete)?;
let (builder_creates, builder_updates, builder_deletes) =
resource::get_updates::<Builder>(resources.builders, delete)?;
let (alerter_creates, alerter_updates, alerter_deletes) =
resource::get_updates::<Alerter>(resources.alerters, delete)?;
let (
server_template_creates,
server_template_updates,
server_template_deletes,
) = resource::get_updates::<ServerTemplate>(
resources.server_templates,
delete,
)?;
let (
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
) = resource::get_updates::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
)?;
if server_template_creates.is_empty()
let (variable_creates, variable_updates, variable_deletes) =
variables::get_updates(resources.variables, delete)?;
let (user_group_creates, user_group_updates, user_group_deletes) =
user_group::get_updates(resources.user_groups, delete).await?;
if resource_sync_creates.is_empty()
&& resource_sync_updates.is_empty()
&& resource_sync_deletes.is_empty()
&& server_template_creates.is_empty()
&& server_template_updates.is_empty()
&& server_template_deletes.is_empty()
&& server_creates.is_empty()
&& server_updates.is_empty()
&& server_deletes.is_empty()
&& deployment_creates.is_empty()
&& deployment_updates.is_empty()
&& deployment_deletes.is_empty()
&& build_creates.is_empty()
&& build_updates.is_empty()
&& build_deletes.is_empty()
&& builder_creates.is_empty()
&& builder_updates.is_empty()
&& builder_deletes.is_empty()
&& alerter_creates.is_empty()
&& alerter_updates.is_empty()
&& alerter_deletes.is_empty()
&& repo_creates.is_empty()
&& repo_updates.is_empty()
&& repo_deletes.is_empty()
&& procedure_creates.is_empty()
&& procedure_updates.is_empty()
&& procedure_deletes.is_empty()
&& user_group_creates.is_empty()
&& user_group_updates.is_empty()
&& user_group_deletes.is_empty()
&& variable_creates.is_empty()
&& variable_updates.is_empty()
&& variable_deletes.is_empty()
{
info!("{}. exiting.", "nothing to do".green().bold());
return Ok(());
}
wait_for_enter("run sync")?;
if !cli_args().yes {
wait_for_enter("run sync")?;
}
// No deps
entities::sync::ResourceSync::run_updates(
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
)
.await;
ServerTemplate::run_updates(
server_template_creates,
server_template_updates,
server_template_deletes,
)
.await;
Server::run_updates(server_creates, server_updates, server_deletes)
.await;
Alerter::run_updates(
alerter_creates,
alerter_updates,
alerter_deletes,
)
.await;
Server::run_updates(server_creates, server_updates).await;
Alerter::run_updates(alerter_creates, alerter_updates).await;
// Dependant on server
Builder::run_updates(builder_creates, builder_updates).await;
Repo::run_updates(repo_creates, repo_updates).await;
Builder::run_updates(
builder_creates,
builder_updates,
builder_deletes,
)
.await;
Repo::run_updates(repo_creates, repo_updates, repo_deletes).await;
// Dependant on builder
Build::run_updates(build_creates, build_updates).await;
// Dependant on server / builder
Deployment::run_updates(deployment_creates, deployment_updates)
Build::run_updates(build_creates, build_updates, build_deletes)
.await;
// Dependant on server / build
Deployment::run_updates(
deployment_creates,
deployment_updates,
deployment_deletes,
)
.await;
// Dependant on everything
Procedure::run_updates(procedure_creates, procedure_updates).await;
user_group::run_updates(user_group_creates, user_group_updates)
.await;
Procedure::run_updates(
procedure_creates,
procedure_updates,
procedure_deletes,
)
.await;
variables::run_updates(
variable_creates,
variable_updates,
variable_deletes,
)
.await;
user_group::run_updates(
user_group_creates,
user_group_updates,
user_group_deletes,
)
.await;
Ok(())
}

View File

@@ -0,0 +1,358 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
resource::Resource, toml::ResourceToml, update::ResourceTarget,
},
};
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use serde::Serialize;
use crate::maps::id_to_tag;
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
pub struct ToUpdateItem<T: Default> {
pub id: String,
pub resource: ResourceToml<T>,
pub update_description: bool,
pub update_tags: bool,
}
pub trait ResourceSync: Sized {
type Config: Clone
+ Default
+ Send
+ From<Self::PartialConfig>
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
+ 'static;
type Info: Default + 'static;
type PartialConfig: std::fmt::Debug
+ Clone
+ Send
+ Default
+ From<Self::Config>
+ From<Self::ConfigDiff>
+ Serialize
+ MaybeNone
+ 'static;
type ConfigDiff: Diff + MaybeNone;
fn display() -> &'static str;
fn resource_target(id: String) -> ResourceTarget;
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>;
/// Creates the resource and returns created id.
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String>;
/// Updates the resource at id with the partial config.
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()>;
/// Apply any changes to incoming toml partial config
/// before it is diffed against existing config
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
/// Diffs the declared toml (partial) against the full existing config.
/// Removes all fields from toml (partial) that haven't changed.
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff>;
/// Apply any changes to computed config diff
/// before logging
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
/// Deletes the target resource
async fn delete(id_or_name: String) -> anyhow::Result<()>;
async fn run_updates(
to_create: ToCreate<Self::PartialConfig>,
to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource).await {
Ok(id) => id,
Err(e) => {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
continue;
}
};
run_update_tags::<Self>(id.clone(), &name, tags).await;
run_update_description::<Self>(id, &name, description).await;
info!(
"{} {} '{}'",
"created".green().bold(),
Self::display(),
name.bold(),
);
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Self>(
id.clone(),
&name,
description,
)
.await;
}
if update_tags {
run_update_tags::<Self>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) = Self::update(id, resource).await {
warn!(
"failed to update config on {} {name} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}' configuration",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
}
for resource in to_delete {
if let Err(e) = Self::delete(resource.clone()).await {
warn!(
"failed to delete {} {resource} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}'",
"deleted".red().bold(),
Self::display(),
resource.bold(),
);
}
}
}
}
/// Gets all the resources to update, logging along the way.
pub fn get_updates<Resource: ResourceSync>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
let map = Resource::name_to_resource();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
for mut resource in resources {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: Resource::Config = resource.config.into();
resource.config = config.into();
Resource::validate_partial_config(&mut resource.config);
let mut diff = Resource::get_diff(
original.config.clone(),
resource.config,
)?;
Resource::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| {
id_to_tag().get(id).map(|t| t.name.clone())
})
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
println!(
"\n{}: {}: '{}'\n-------------------",
"UPDATE".blue(),
Resource::display(),
resource.name.bold(),
);
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
resource.description.green()
))
}
if resource.tags != original_tags {
let from = format!("{:?}", original_tags).red();
let to = format!("{:?}", resource.tags).green();
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
"field".dimmed(),
"from".dimmed(),
"to".dimmed(),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
from.red(),
"to".dimmed(),
to.green()
)
},
));
println!("{}", lines.join("\n-------------------\n"));
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
};
to_update.push(update);
}
None => {
println!(
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
"CREATE".green(),
Resource::display(),
resource.name.bold().green(),
"description".dimmed(),
resource.description,
"tags".dimmed(),
resource.tags,
"config".dimmed(),
serde_json::to_string_pretty(&resource.config)?
);
to_create.push(resource);
}
}
}
for name in &to_delete {
println!(
"\n{}: {}: '{}'\n-------------------",
"DELETE".red(),
Resource::display(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_update_tags<Resource: ResourceSync>(
id: String,
name: &str,
tags: Vec<String>,
) {
// Update tags
if let Err(e) = crate::state::monitor_client()
.write(UpdateTagsOnResource {
target: Resource::resource_target(id),
tags,
})
.await
{
warn!(
"failed to update tags on {} {name} | {e:#}",
Resource::display(),
);
} else {
info!(
"{} {} '{}' tags",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}
pub async fn run_update_description<Resource: ResourceSync>(
id: String,
name: &str,
description: String,
) {
if let Err(e) = crate::state::monitor_client()
.write(UpdateDescription {
target: Resource::resource_target(id.clone()),
description,
})
.await
{
warn!("failed to update resource {id} description | {e:#}");
} else {
info!(
"{} {} '{}' description",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}

View File

@@ -1,31 +1,28 @@
use partial_derive2::PartialDiff;
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetAlerter,
write::{CreateAlerter, UpdateAlerter},
},
api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter},
entities::{
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, AlerterInfo, AlerterListItemInfo, PartialAlerterConfig
Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{maps::name_to_alerter, monitor_client};
use super::ResourceSync;
use crate::{
maps::name_to_alerter, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Alerter {
type Config = AlerterConfig;
type Info = AlerterInfo;
type Info = ();
type PartialConfig = PartialAlerterConfig;
type ConfigDiff = AlerterConfigDiff;
type ListItemInfo = AlerterListItemInfo;
fn display() -> &'static str {
"alerter"
@@ -36,7 +33,7 @@ impl ResourceSync for Alerter {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_alerter()
}
@@ -66,16 +63,15 @@ impl ResourceSync for Alerter {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetAlerter { alerter: id }).await
}
async fn get_diff(
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteAlerter { id }).await?;
Ok(())
}
}

View File

@@ -1,16 +1,13 @@
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetBuild,
write::{CreateBuild, UpdateBuild},
},
api::write::{CreateBuild, DeleteBuild, UpdateBuild},
entities::{
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo,
BuildListItemInfo, PartialBuildConfig,
PartialBuildConfig,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
@@ -19,17 +16,15 @@ use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_builder, name_to_build},
monitor_client,
state::monitor_client,
sync::resource::ResourceSync,
};
use super::ResourceSync;
impl ResourceSync for Build {
type Config = BuildConfig;
type Info = BuildInfo;
type PartialConfig = PartialBuildConfig;
type ConfigDiff = BuildConfigDiff;
type ListItemInfo = BuildListItemInfo;
fn display() -> &'static str {
"build"
@@ -40,7 +35,7 @@ impl ResourceSync for Build {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_build()
}
@@ -70,13 +65,7 @@ impl ResourceSync for Build {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetBuild { build: id }).await
}
async fn get_diff(
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
@@ -88,4 +77,17 @@ impl ResourceSync for Build {
Ok(original.partial_diff(update))
}
fn validate_diff(diff: &mut Self::ConfigDiff) {
if let Some((_, to)) = &diff.version {
if to.is_none() {
diff.version = None;
}
}
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuild { id }).await?;
Ok(())
}
}

View File

@@ -1,16 +1,12 @@
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetBuilder,
write::{CreateBuilder, UpdateBuilder},
},
api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder},
entities::{
builder::{
Builder, BuilderConfig, BuilderConfigDiff, BuilderListItemInfo,
PartialBuilderConfig,
Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
@@ -19,17 +15,15 @@ use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_builder},
monitor_client,
state::monitor_client,
sync::resource::ResourceSync,
};
use super::ResourceSync;
impl ResourceSync for Builder {
type Config = BuilderConfig;
type Info = ();
type PartialConfig = PartialBuilderConfig;
type ConfigDiff = BuilderConfigDiff;
type ListItemInfo = BuilderListItemInfo;
fn display() -> &'static str {
"builder"
@@ -40,7 +34,7 @@ impl ResourceSync for Builder {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_builder()
}
@@ -70,13 +64,7 @@ impl ResourceSync for Builder {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetBuilder { builder: id }).await
}
async fn get_diff(
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
@@ -90,4 +78,9 @@ impl ResourceSync for Builder {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuilder { id }).await?;
Ok(())
}
}

View File

@@ -1,14 +1,13 @@
use std::collections::HashMap;
use monitor_client::{
api::{read::GetDeployment, write},
api::write::{self, DeleteDeployment},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentConfigDiff,
DeploymentImage, DeploymentListItemInfo,
PartialDeploymentConfig,
DeploymentImage, PartialDeploymentConfig,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
@@ -17,17 +16,15 @@ use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_build, id_to_server, name_to_deployment},
monitor_client,
state::monitor_client,
sync::resource::ResourceSync,
};
use super::ResourceSync;
impl ResourceSync for Deployment {
type Config = DeploymentConfig;
type Info = ();
type PartialConfig = PartialDeploymentConfig;
type ConfigDiff = DeploymentConfigDiff;
type ListItemInfo = DeploymentListItemInfo;
fn display() -> &'static str {
"deployment"
@@ -38,7 +35,7 @@ impl ResourceSync for Deployment {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_deployment()
}
@@ -68,15 +65,7 @@ impl ResourceSync for Deployment {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client()
.read(GetDeployment { deployment: id })
.await
}
async fn get_diff(
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
@@ -95,10 +84,15 @@ impl ResourceSync for Deployment {
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: version.clone(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteDeployment { id }).await?;
Ok(())
}
}

View File

@@ -1,327 +1,9 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
resource::{Resource, ResourceListItem},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use serde::Serialize;
use crate::{cli_args, maps::id_to_tag, monitor_client};
pub mod alerter;
pub mod build;
pub mod builder;
pub mod deployment;
pub mod procedure;
pub mod repo;
pub mod server;
pub mod server_template;
type ToUpdate<T> = Vec<ToUpdateItem<T>>;
type ToCreate<T> = Vec<ResourceToml<T>>;
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>);
pub struct ToUpdateItem<T> {
pub id: String,
pub resource: ResourceToml<T>,
pub update_description: bool,
pub update_tags: bool,
}
pub trait ResourceSync {
type Config: Clone
+ Send
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
+ 'static;
type Info: Default;
type PartialConfig: std::fmt::Debug
+ Clone
+ Send
+ From<Self::ConfigDiff>
+ Serialize
+ 'static;
type ConfigDiff: Diff + MaybeNone;
type ListItemInfo: 'static;
fn display() -> &'static str;
fn resource_target(id: String) -> ResourceTarget;
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>;
/// Creates the resource and returns created id.
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String>;
/// Updates the resource at id with the partial config.
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()>;
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>>;
/// Diffs the declared toml (partial) against the full existing config.
/// Removes all fields from toml (partial) that haven't changed.
async fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff>;
async fn get_updates(
resources: Vec<ResourceToml<Self::PartialConfig>>,
) -> anyhow::Result<UpdatesResult<Self::PartialConfig>> {
let map = Self::name_to_resource();
let mut to_create = ToCreate::<Self::PartialConfig>::new();
let mut to_update = ToUpdate::<Self::PartialConfig>::new();
let quiet = cli_args().quiet;
for mut resource in resources {
match map.get(&resource.name).map(|s| s.id.clone()) {
Some(id) => {
// Get the full original config for the resource.
let original = Self::get(id.clone()).await?;
let diff =
Self::get_diff(original.config, resource.config).await?;
let original_tags = original
.tags
.iter()
.filter_map(|id| {
id_to_tag().get(id).map(|t| t.name.clone())
})
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
if !quiet {
println!(
"\n{}: {}: '{}'\n-------------------",
"UPDATE".blue(),
Self::display(),
resource.name.bold(),
);
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
resource.description.green()
))
}
if resource.tags != original_tags {
let from = format!("{:?}", original_tags).red();
let to = format!("{:?}", resource.tags).green();
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
"field".dimmed(),
"from".dimmed(),
"to".dimmed(),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
from.red(),
"to".dimmed(),
to.green()
)
},
));
println!("{}", lines.join("\n-------------------\n"));
}
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id,
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
};
to_update.push(update);
}
None => {
if !quiet {
println!(
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
"CREATE".green(),
Self::display(),
resource.name.bold().green(),
"description".dimmed(),
resource.description,
"tags".dimmed(),
resource.tags,
"config".dimmed(),
serde_json::to_string_pretty(&resource.config)?
)
}
to_create.push(resource);
}
}
}
if quiet && !to_create.is_empty() {
println!(
"\n{}s {}: {:#?}",
Self::display(),
"TO CREATE".green(),
to_create.iter().map(|item| item.name.as_str())
);
}
if quiet && !to_update.is_empty() {
println!(
"\n{}s {}: {:#?}",
Self::display(),
"TO UPDATE".blue(),
to_update
.iter()
.map(|update| update.resource.name.as_str())
.collect::<Vec<_>>()
);
}
Ok((to_create, to_update))
}
async fn run_updates(
to_create: ToCreate<Self::PartialConfig>,
to_update: ToUpdate<Self::PartialConfig>,
) {
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource).await {
Ok(id) => id,
Err(e) => {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
continue;
}
};
Self::update_tags(id.clone(), &name, tags).await;
Self::update_description(id, &name, description).await;
info!(
"{} {} '{}'",
"created".green().bold(),
Self::display(),
name.bold(),
);
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
Self::update_description(id.clone(), &name, description)
.await;
}
if update_tags {
Self::update_tags(id.clone(), &name, tags).await;
}
if let Err(e) = Self::update(id, resource).await {
warn!(
"failed to update config on {} {name} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}' configuration",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
}
async fn update_tags(id: String, name: &str, tags: Vec<String>) {
// Update tags
if let Err(e) = monitor_client()
.write(UpdateTagsOnResource {
target: Self::resource_target(id),
tags,
})
.await
{
warn!(
"failed to update tags on {} {name} | {e:#}",
Self::display(),
);
} else {
info!(
"{} {} '{}' tags",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
async fn update_description(
id: String,
name: &str,
description: String,
) {
if let Err(e) = monitor_client()
.write(UpdateDescription {
target: Self::resource_target(id.clone()),
description,
})
.await
{
warn!("failed to update resource {id} description | {e:#}");
} else {
info!(
"{} {} '{}' description",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
}
mod alerter;
mod build;
mod builder;
mod deployment;
mod procedure;
mod repo;
mod server;
mod server_template;
mod sync;

View File

@@ -1,16 +1,17 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::{
execute::Execution,
read::GetProcedure,
write::{CreateProcedure, UpdateProcedure},
write::{CreateProcedure, DeleteProcedure, UpdateProcedure},
},
entities::{
procedure::{
PartialProcedureConfig, Procedure, ProcedureConfig, ProcedureConfigDiff, ProcedureListItemInfo
PartialProcedureConfig, Procedure, ProcedureConfig,
ProcedureConfigDiff,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
@@ -20,20 +21,20 @@ use partial_derive2::{MaybeNone, PartialDiff};
use crate::{
maps::{
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
id_to_server, name_to_procedure,
id_to_resource_sync, id_to_server, name_to_procedure,
},
state::monitor_client,
sync::resource::{
run_update_description, run_update_tags, ResourceSync, ToCreate,
ToDelete, ToUpdate, ToUpdateItem,
},
monitor_client,
sync::resources::ToUpdateItem,
};
use super::{ResourceSync, ToCreate, ToUpdate};
impl ResourceSync for Procedure {
type Config = ProcedureConfig;
type Info = ();
type PartialConfig = PartialProcedureConfig;
type ConfigDiff = ProcedureConfigDiff;
type ListItemInfo = ProcedureListItemInfo;
fn display() -> &'static str {
"procedure"
@@ -44,7 +45,7 @@ impl ResourceSync for Procedure {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_procedure()
}
@@ -77,7 +78,23 @@ impl ResourceSync for Procedure {
async fn run_updates(
mut to_create: ToCreate<Self::PartialConfig>,
mut to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for name in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteProcedure { id: name.clone() })
.await
{
warn!("failed to delete procedure {name} | {e:#}",);
} else {
info!(
"{} procedure '{}'",
"deleted".red().bold(),
name.bold(),
);
}
}
if to_update.is_empty() && to_create.is_empty() {
return;
}
@@ -96,11 +113,15 @@ impl ResourceSync for Procedure {
let tags = resource.tags.clone();
let description = resource.description.clone();
if *update_description {
Self::update_description(id.clone(), &name, description)
.await;
run_update_description::<Procedure>(
id.clone(),
&name,
description,
)
.await;
}
if *update_tags {
Self::update_tags(id.clone(), &name, tags).await;
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) =
@@ -112,6 +133,7 @@ impl ResourceSync for Procedure {
Self::display()
);
}
continue;
}
}
@@ -139,112 +161,115 @@ impl ResourceSync for Procedure {
continue;
}
};
Self::update_tags(id.clone(), &name, tags).await;
Self::update_description(id, &name, description).await;
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
run_update_description::<Procedure>(id, &name, description)
.await;
info!("{} {name} created", Self::display());
to_pull.push(name);
}
to_create.retain(|resource| !to_pull.contains(&resource.name));
if to_update.is_empty() && to_create.is_empty() {
info!(
"============ {}s synced ✅ ============",
Self::display()
);
// info!("all procedures synced");
return;
}
}
warn!("procedure sync loop exited after max iterations");
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetProcedure { procedure: id }).await
}
async fn get_diff(
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
for execution in &mut original.executions {
match &mut execution.execution {
Execution::None(_) => {}
Execution::RunProcedure(config) => {
config.procedure = id_to_procedure()
.get(&config.procedure)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = id_to_build()
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CloneRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PullRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneDockerNetworks(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneDockerImages(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneDockerContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
for stage in &mut original.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::None(_) | Execution::Sleep(_) => {}
Execution::RunProcedure(config) => {
config.procedure = id_to_procedure()
.get(&config.procedure)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = id_to_build()
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CloneRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PullRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneNetworks(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneImages(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RunSync(config) => {
config.sync = id_to_resource_sync()
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
}
}
}
Ok(original.partial_diff(update))
}
async fn delete(_: String) -> anyhow::Result<()> {
unreachable!()
}
}

View File

@@ -1,16 +1,12 @@
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetRepo,
write::{CreateRepo, UpdateRepo},
},
api::write::{CreateRepo, DeleteRepo, UpdateRepo},
entities::{
repo::{
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
RepoListItemInfo,
},
resource::{Resource, ResourceListItem},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
@@ -19,17 +15,15 @@ use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_repo},
monitor_client,
state::monitor_client,
sync::resource::ResourceSync,
};
use super::ResourceSync;
impl ResourceSync for Repo {
type Config = RepoConfig;
type Info = RepoInfo;
type PartialConfig = PartialRepoConfig;
type ConfigDiff = RepoConfigDiff;
type ListItemInfo = RepoListItemInfo;
fn display() -> &'static str {
"repo"
@@ -40,7 +34,7 @@ impl ResourceSync for Repo {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_repo()
}
@@ -70,13 +64,7 @@ impl ResourceSync for Repo {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetRepo { repo: id }).await
}
async fn get_diff(
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
@@ -88,4 +76,9 @@ impl ResourceSync for Repo {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteRepo { id }).await?;
Ok(())
}
}

View File

@@ -1,15 +1,11 @@
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetServer,
write::{CreateServer, UpdateServer},
},
api::write::{CreateServer, DeleteServer, UpdateServer},
entities::{
resource::{Resource, ResourceListItem},
resource::Resource,
server::{
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
ServerListItemInfo,
},
toml::ResourceToml,
update::ResourceTarget,
@@ -17,16 +13,16 @@ use monitor_client::{
};
use partial_derive2::PartialDiff;
use crate::{maps::name_to_server, monitor_client};
use super::ResourceSync;
use crate::{
maps::name_to_server, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Server {
type Config = ServerConfig;
type Info = ();
type PartialConfig = PartialServerConfig;
type ConfigDiff = ServerConfigDiff;
type ListItemInfo = ServerListItemInfo;
fn display() -> &'static str {
"server"
@@ -37,7 +33,7 @@ impl ResourceSync for Server {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server()
}
@@ -67,16 +63,15 @@ impl ResourceSync for Server {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client().read(GetServer { server: id }).await
}
async fn get_diff(
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServer { id }).await?;
Ok(())
}
}

View File

@@ -1,14 +1,14 @@
use std::collections::HashMap;
use monitor_client::{
api::{
read::GetServerTemplate,
write::{CreateServerTemplate, UpdateServerTemplate},
api::write::{
CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate,
},
entities::{
resource::{Resource, ResourceListItem},
resource::Resource,
server_template::{
PartialServerTemplateConfig, ServerTemplate, ServerTemplateConfig, ServerTemplateConfigDiff, ServerTemplateListItemInfo
PartialServerTemplateConfig, ServerTemplate,
ServerTemplateConfig, ServerTemplateConfigDiff,
},
toml::ResourceToml,
update::ResourceTarget,
@@ -16,16 +16,16 @@ use monitor_client::{
};
use partial_derive2::PartialDiff;
use crate::{maps::name_to_server_template, monitor_client};
use super::ResourceSync;
use crate::{
maps::name_to_server_template, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for ServerTemplate {
type Config = ServerTemplateConfig;
type Info = ();
type PartialConfig = PartialServerTemplateConfig;
type ConfigDiff = ServerTemplateConfigDiff;
type ListItemInfo = ServerTemplateListItemInfo;
fn display() -> &'static str {
"server template"
@@ -36,7 +36,7 @@ impl ResourceSync for ServerTemplate {
}
fn name_to_resource(
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server_template()
}
@@ -66,20 +66,15 @@ impl ResourceSync for ServerTemplate {
Ok(())
}
async fn get(
id: String,
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
monitor_client()
.read(GetServerTemplate {
server_template: id,
})
.await
}
async fn get_diff(
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServerTemplate { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,81 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{
CreateResourceSync, DeleteResourceSync, UpdateResourceSync,
},
entities::{
self,
resource::Resource,
sync::{
PartialResourceSyncConfig, ResourceSyncConfig,
ResourceSyncConfigDiff, ResourceSyncInfo,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_resource_sync, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for entities::sync::ResourceSync {
type Config = ResourceSyncConfig;
type Info = ResourceSyncInfo;
type PartialConfig = PartialResourceSyncConfig;
type ConfigDiff = ResourceSyncConfigDiff;
fn display() -> &'static str {
"resource sync"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_resource_sync()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateResourceSync {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateResourceSync {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteResourceSync { id }).await?;
Ok(())
}
}

View File

@@ -1,11 +1,13 @@
use std::cmp::Ordering;
use anyhow::Context;
use colored::Colorize;
use monitor_client::{
api::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, SetUsersInUserGroup, UpdatePermissionOnTarget,
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
},
},
entities::{
@@ -15,143 +17,247 @@ use monitor_client::{
},
};
use crate::{
maps::{
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
id_to_procedure, id_to_repo, id_to_server, id_to_server_template,
id_to_user, name_to_user_group,
},
monitor_client,
use crate::maps::{
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server,
id_to_server_template, id_to_user, name_to_user_group,
};
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
}
pub struct DeleteItem {
id: String,
name: String,
}
pub async fn get_updates(
user_groups: Vec<UserGroupToml>,
) -> anyhow::Result<(Vec<UserGroupToml>, Vec<UserGroupToml>)> {
delete: bool,
) -> anyhow::Result<(
Vec<UserGroupToml>,
Vec<UpdateItem>,
Vec<DeleteItem>,
)> {
let map = name_to_user_group();
let mut to_create = Vec::<UserGroupToml>::new();
let mut to_update = Vec::<UserGroupToml>::new();
let mut to_update = Vec::<UpdateItem>::new();
let mut to_delete = Vec::<DeleteItem>::new();
for mut user_group in user_groups {
match map.get(&user_group.name).cloned() {
Some(original) => {
// replace the user ids with usernames
let mut users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user().get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut permissions = monitor_client()
.read(ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
})
.await
.context("failed to query for UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = id_to_build()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = id_to_builder()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = id_to_deployment()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = id_to_server()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = id_to_repo()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = id_to_alerter()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = id_to_procedure()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = id_to_server_template()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
permissions.sort_by(sort_permissions);
// only push update after failed diff
if user_group.users != users
|| user_group.permissions != permissions
{
// no update from users
to_update.push(user_group);
}
if delete {
for user_group in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
to_delete.push(DeleteItem {
id: user_group.id.clone(),
name: user_group.name.clone(),
});
}
None => to_create.push(user_group),
}
}
if !to_create.is_empty() {
let id_to_user = id_to_user();
for mut user_group in user_groups {
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
println!(
"\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
"CREATE".green(),
user_group.name.bold().green(),
"users".dimmed(),
user_group.users,
"permissions".dimmed(),
user_group.permissions,
);
to_create.push(user_group);
continue;
}
};
let mut original_users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = crate::state::monitor_client()
.read(ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
})
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = id_to_build()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = id_to_builder()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = id_to_deployment()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = id_to_server()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = id_to_repo()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = id_to_alerter()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = id_to_procedure()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = id_to_server_template()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = id_to_resource_sync()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
println!(
"\n{}: user group: '{}'\n-------------------",
"UPDATE".blue(),
user_group.name.bold(),
);
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
.iter()
.filter(|user| !original_users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_users
.iter()
.filter(|user| !user_group.users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed(),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
});
}
}
for d in &to_delete {
println!(
"\nUSER GROUPS TO CREATE: {}",
to_create
.iter()
.map(|item| item.name.as_str())
.collect::<Vec<_>>()
.join(", ")
"\n{}: user group: '{}'\n-------------------",
"DELETE".red(),
d.name.bold(),
);
}
if !to_update.is_empty() {
println!(
"\nUSER GROUPS TO UPDATE: {}",
to_update
.iter()
.map(|item| item.name.as_str())
.collect::<Vec<_>>()
.join(", ")
);
}
Ok((to_create, to_update))
Ok((to_create, to_update, to_delete))
}
/// order permissions in deterministic way
@@ -172,14 +278,13 @@ fn sort_permissions(
pub async fn run_updates(
to_create: Vec<UserGroupToml>,
to_update: Vec<UserGroupToml>,
to_update: Vec<UpdateItem>,
to_delete: Vec<DeleteItem>,
) {
let log_after = !to_update.is_empty() || !to_create.is_empty();
// Create the non-existant user groups
for user_group in to_create {
// Create the user group
if let Err(e) = monitor_client()
if let Err(e) = crate::state::monitor_client()
.write(CreateUserGroup {
name: user_group.name.clone(),
})
@@ -190,43 +295,78 @@ pub async fn run_updates(
user_group.name
);
continue;
} else {
info!(
"{} user group '{}'",
"created".green().bold(),
user_group.name.bold(),
);
};
set_users(user_group.name.clone(), user_group.users).await;
update_permissions(user_group.name, user_group.permissions).await;
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
// Update the existing user groups
for user_group in to_update {
set_users(user_group.name.clone(), user_group.users).await;
update_permissions(user_group.name, user_group.permissions).await;
for UpdateItem {
user_group,
update_users,
update_permissions,
} in to_update
{
if update_users {
set_users(user_group.name.clone(), user_group.users).await;
}
if update_permissions {
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
}
if log_after {
info!("============ user groups synced ✅ ============");
}
}
async fn set_users(user_group: String, users: Vec<String>) {
if !users.is_empty() {
if let Err(e) = monitor_client()
.write(SetUsersInUserGroup {
user_group: user_group.clone(),
users,
})
for user_group in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteUserGroup { id: user_group.id })
.await
{
warn!("failed to set users in group {user_group} | {e:#}");
warn!(
"failed to delete user group {} | {e:#}",
user_group.name
);
} else {
info!(
"{} user group '{}'",
"deleted".red().bold(),
user_group.name.bold(),
);
}
}
}
async fn update_permissions(
async fn set_users(user_group: String, users: Vec<String>) {
if let Err(e) = crate::state::monitor_client()
.write(SetUsersInUserGroup {
user_group: user_group.clone(),
users,
})
.await
{
warn!("failed to set users in group {user_group} | {e:#}");
} else {
info!(
"{} user group '{}' users",
"updated".blue().bold(),
user_group.bold(),
);
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
) {
for PermissionToml { target, level } in permissions {
if let Err(e) = monitor_client()
if let Err(e) = crate::state::monitor_client()
.write(UpdatePermissionOnTarget {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_target: target.clone(),
@@ -237,6 +377,12 @@ async fn update_permissions(
warn!(
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
);
} else {
info!(
"{} user group '{}' permissions",
"updated".blue().bold(),
user_group.bold(),
);
}
}
}

View File

@@ -0,0 +1,206 @@
use colored::Colorize;
use monitor_client::{
api::write::{
CreateVariable, DeleteVariable, UpdateVariableDescription,
UpdateVariableValue,
},
entities::variable::Variable,
};
use crate::{maps::name_to_variable, state::monitor_client};
pub struct ToUpdateItem {
pub variable: Variable,
pub update_value: bool,
pub update_description: bool,
}
pub fn get_updates(
variables: Vec<Variable>,
delete: bool,
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
let map = name_to_variable();
let mut to_create = Vec::<Variable>::new();
let mut to_update = Vec::<ToUpdateItem>::new();
let mut to_delete = Vec::<String>::new();
if delete {
for variable in map.values() {
if !variables.iter().any(|v| v.name == variable.name) {
to_delete.push(variable.name.clone());
}
}
}
for variable in variables {
match map.get(&variable.name) {
Some(original) => {
let item = ToUpdateItem {
update_value: original.value != variable.value,
update_description: original.description
!= variable.description,
variable,
};
if !item.update_value && !item.update_description {
continue;
}
println!(
"\n{}: variable: '{}'\n-------------------",
"UPDATE".blue(),
item.variable.name.bold(),
);
let mut lines = Vec::<String>::new();
if item.update_value {
lines.push(format!(
"{}: 'value'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.value.red(),
"to".dimmed(),
item.variable.value.green()
))
}
if item.update_description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
item.variable.description.green()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(item);
}
None => {
if variable.description.is_empty() {
println!(
"\n{}: variable: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"value".dimmed(),
variable.value,
);
} else {
println!(
"\n{}: variable: {}\n{}: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"description".dimmed(),
variable.description,
"value".dimmed(),
variable.value,
);
}
to_create.push(variable)
}
}
}
for name in &to_delete {
println!(
"\n{}: variable: '{}'\n-------------------",
"DELETE".red(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_updates(
to_create: Vec<Variable>,
to_update: Vec<ToUpdateItem>,
to_delete: Vec<String>,
) {
for variable in to_create {
if let Err(e) = monitor_client()
.write(CreateVariable {
name: variable.name.clone(),
value: variable.value,
description: variable.description,
})
.await
{
warn!("failed to create variable {} | {e:#}", variable.name);
} else {
info!(
"{} variable '{}'",
"created".green().bold(),
variable.name.bold(),
);
};
}
for ToUpdateItem {
variable,
update_value,
update_description,
} in to_update
{
if update_value {
if let Err(e) = monitor_client()
.write(UpdateVariableValue {
name: variable.name.clone(),
value: variable.value,
})
.await
{
warn!(
"failed to update variable value for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' value",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
if update_description {
if let Err(e) = monitor_client()
.write(UpdateVariableDescription {
name: variable.name.clone(),
description: variable.description,
})
.await
{
warn!(
"failed to update variable description for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' description",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
}
for variable in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteVariable {
name: variable.clone(),
})
.await
{
warn!("failed to delete variable {variable} | {e:#}",);
} else {
info!(
"{} variable '{}'",
"deleted".red().bold(),
variable.bold(),
);
}
}
}

View File

@@ -17,22 +17,28 @@ path = "src/main.rs"
# local
monitor_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
formatting.workspace = true
logger.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
termination_signal.workspace = true
async_timing_util.workspace = true
partial_derive2.workspace = true
derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
parse_csl.workspace = true
mungos.workspace = true
slack.workspace = true
svi.workspace = true
# external
ordered_hash_map.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-sdk-ecr.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum-extra.workspace = true
@@ -45,9 +51,11 @@ futures.workspace = true
anyhow.workspace = true
dotenv.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true

View File

@@ -1,5 +1,5 @@
# Build Core
FROM rust:1.78.0-bullseye as core-builder
FROM rust:1.79.0-bookworm as core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
@@ -13,11 +13,25 @@ RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
# Final Image
# FROM gcr.io/distroless/cc
FROM debian:bullseye-slim
RUN apt update && apt install -y ca-certificates
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install
# Copy
COPY ./config_example/core.config.example.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /
COPY --from=frontend-builder /builder/frontend/dist /frontend
# Hint at the port
EXPOSE 9000
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="A tool to build and deploy software across many servers"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./core"]

View File

@@ -1,63 +1,76 @@
use std::{collections::HashSet, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{format_serror, muted};
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
entities::{
alert::{Alert, AlertData},
all_logs_success,
build::Build,
build::{Build, CloudRegistryConfig, ImageRegistry},
builder::{AwsBuilderConfig, Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
permission::PermissionLevel,
server::Server,
server_template::AwsServerTemplateConfig,
server::{stats::SeverityLevel, Server},
server_template::aws::AwsServerTemplateConfig,
to_monitor_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
Operation,
},
};
use mungos::{
by_id::update_one_by_id,
find::find_collect,
mongodb::bson::{doc, to_bson, to_document},
mongodb::{
bson::{doc, to_bson, to_document},
options::FindOneOptions,
},
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
};
use resolver_api::Resolve;
use serror::{serialize_error, serialize_error_pretty};
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
ecr,
},
BuildCleanupData,
},
config::core_config,
helpers::{
alert::send_alerts,
channel::build_cancel_channel,
periphery_client,
query::{get_deployment_state, get_global_variables},
update::{add_update, make_update, update_update},
update::update_update,
},
resource::{self, refresh_build_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<RunBuild, User> for State {
#[instrument(name = "RunBuild", skip(self, user))]
use crate::helpers::update::init_execution_update;
use super::ExecuteRequest;
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunBuild { build }: RunBuild,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut build = resource::get_check_permissions::<Build>(
&build,
@@ -66,6 +79,9 @@ impl Resolve<RunBuild, User> for State {
)
.await?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
// get the action state for the build (or insert default).
let action_state =
action_states().build.get_or_insert_default(&build.id).await;
@@ -76,10 +92,8 @@ impl Resolve<RunBuild, User> for State {
action_state.update(|state| state.building = true)?;
build.config.version.increment();
let mut update = make_update(&build, Operation::RunBuild, &user);
update.in_progress();
update.version = build.config.version.clone();
update.version = build.config.version;
update_update(update.clone()).await?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
@@ -95,10 +109,9 @@ impl Resolve<RunBuild, User> for State {
id = cancel_recv.recv() => id?
};
if incoming_build_id == build_id {
info!("build cancel acknowledged");
update.push_simple_log(
"cancel acknowledged",
"the build cancellation has been queud, it may still take some time",
"the build cancellation has been queued, it may still take some time",
);
update.finalize();
let id = update.id.clone();
@@ -113,13 +126,11 @@ impl Resolve<RunBuild, User> for State {
anyhow::Ok(())
};
tokio::select! {
_ = cancel_clone.cancelled() => {}
_ = poll => {}
_ = cancel_clone.cancelled() => {}
_ = poll => {}
}
});
update.id = add_update(update.clone()).await?;
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) =
@@ -132,9 +143,10 @@ impl Resolve<RunBuild, User> for State {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
serialize_error_pretty(&e),
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update).await;
return handle_early_return(update, build.id, build.name)
.await;
}
};
@@ -160,7 +172,7 @@ impl Resolve<RunBuild, User> for State {
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name).await
},
};
@@ -171,18 +183,16 @@ impl Resolve<RunBuild, User> for State {
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update.push_error_log("clone repo", serialize_error(&e));
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
}
}
update_update(update.clone()).await?;
if all_logs_success(&update.logs) {
let docker_token = core_config
.docker_accounts
.get(&build.config.docker_account)
.cloned();
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
@@ -234,7 +244,8 @@ impl Resolve<RunBuild, User> for State {
res = periphery
.request(api::build::Build {
build: build.clone(),
docker_token,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
@@ -242,7 +253,7 @@ impl Resolve<RunBuild, User> for State {
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update).await
return handle_early_return(update, build.id, build.name).await
},
};
@@ -253,7 +264,10 @@ impl Resolve<RunBuild, User> for State {
}
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log("build", serialize_error(&e))
update.push_error_log(
"build",
format_serror(&e.context("failed to build").into()),
)
}
};
}
@@ -279,6 +293,7 @@ impl Resolve<RunBuild, User> for State {
.await;
}
// stop the cancel listening task from going forever
cancel.cancel();
cleanup_builder_instance(periphery, cleanup_data, &mut update)
@@ -307,14 +322,38 @@ impl Resolve<RunBuild, User> for State {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
});
} else {
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
err,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
#[instrument(skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
@@ -332,15 +371,80 @@ async fn handle_early_return(
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build_id,
name: build_name,
version,
err,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
impl Resolve<CancelBuild, User> for State {
#[instrument(name = "CancelBuild", skip(self, user))]
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
if let ExecuteRequest::CancelBuild(req) = request {
let build = resource::get::<Build>(&req.build).await?;
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
)?;
match (latest_build, latest_cancel) {
(Some(build), Some(cancel)) => {
if cancel.start_ts > build.start_ts {
return Err(anyhow!("Build has already been cancelled"));
}
}
(None, _) => return Err(anyhow!("No build in progress")),
_ => {}
};
}
Ok(())
}
impl Resolve<CancelBuild, (User, Update)> for State {
#[instrument(name = "CancelBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelBuild { build }: CancelBuild,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<CancelBuildResponse> {
let build = resource::get_check_permissions::<Build>(
&build,
@@ -349,37 +453,24 @@ impl Resolve<CancelBuild, User> for State {
)
.await?;
// check if theres already an open cancel build update
if db_client()
// make sure the build is building
if !action_states()
.build
.get(&build.id)
.await
.updates
.find_one(
doc! {
"operation": "CancelBuild",
"status": "InProgress",
"target.id": &build.id,
},
None,
)
.await
.context("failed to query updates")?
.is_some()
.and_then(|s| s.get().ok().map(|s| s.building))
.unwrap_or_default()
{
return Err(anyhow!("Build cancel is already in progress"));
return Err(anyhow!("Build is not building."));
}
let mut update =
make_update(&build, Operation::CancelBuild, &user);
update.push_simple_log(
"cancel triggered",
"the build cancel has been triggered",
);
update.in_progress();
update_update(update.clone()).await?;
update.id =
add_update(make_update(&build, Operation::CancelBuild, &user))
.await?;
let update_id = update.id.clone();
build_cancel_channel()
.sender
@@ -387,6 +478,22 @@ impl Resolve<CancelBuild, User> for State {
.await
.send((build.id, update))?;
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
&db_client().await.updates,
&update_id,
doc! { "$set": { "status": "Complete" } },
None,
)
.await
{
warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}")
}
});
Ok(CancelBuildResponse {})
}
}
@@ -394,7 +501,7 @@ impl Resolve<CancelBuild, User> for State {
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument]
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_build_builder(
build: &Build,
update: &mut Update,
@@ -424,7 +531,7 @@ async fn get_build_builder(
}
}
#[instrument]
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_aws_builder(
build: &Build,
config: AwsBuilderConfig,
@@ -494,6 +601,8 @@ async fn get_aws_builder(
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
// Spawn terminate task in failure case (if loop is passed without return)
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
@@ -501,10 +610,14 @@ async fn get_aws_builder(
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(res.err().unwrap())
Err(
res.err().unwrap().context(
"failed to start usable builder. terminating instance.",
),
)
}
#[instrument(skip(periphery))]
#[instrument(skip(periphery, update))]
async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
@@ -556,35 +669,38 @@ async fn handle_post_build_redeploy(build_id: &str) {
let state =
get_deployment_state(&deployment).await.unwrap_or_default();
if state == DeploymentState::Running {
let res = State
.resolve(
Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
},
auto_redeploy_user().to_owned(),
)
.await;
let req = super::ExecuteRequest::Deploy(Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
});
let user = auto_redeploy_user().to_owned();
let res = async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
},
(user, update),
)
.await
}
.await;
Some((deployment.id.clone(), res))
} else {
None
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
for res in join_all(futures).await {
let Some((id, res)) = res else {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
};
if let Err(e) = res {
warn!("failed post build redeploy for deployment {id}: {e:#}");
}
}
}
@@ -607,5 +723,78 @@ fn start_aws_builder_log(
let readable_sec_group_ids = security_group_ids.join(", ");
format!("instance id: {instance_id}\nip: {ip}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}\nassign public ip: {assign_public_ip}\nuse public ip: {use_public_ip}")
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
match &build.config.image_registry {
ImageRegistry::None(_) => Ok((None, None)),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use DockerHub image registry"
));
}
Ok((core_config().docker_accounts.get(account).cloned(), None))
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use GithubContainerRegistry"
));
}
Ok((core_config().github_accounts.get(account).cloned(), None))
}
ImageRegistry::AwsEcr(label) => {
let config = core_config().aws_ecr_registries.get(label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
access_key_id,
secret_access_key,
..
}) => {
let token = ecr::get_ecr_token(
region,
access_key_id,
secret_access_key,
)
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_monitor_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,
)
.await
.context("failed to create aws ecr repo")?;
Some(token)
}
None => None,
};
Ok((token, config.map(AwsEcrConfig::from)))
}
ImageRegistry::Custom(_) => {
Err(anyhow!("Custom image registry is not implemented"))
}
}
}

View File

@@ -1,39 +1,43 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::*,
entities::{
build::Build,
build::{Build, ImageRegistry},
config::core::AwsEcrConfig,
deployment::{Deployment, DeploymentImage},
get_image_name, monitor_timestamp,
get_image_name,
permission::PermissionLevel,
server::ServerState,
update::{Log, ResourceTarget, Update, UpdateStatus},
update::{Log, Update},
user::User,
Operation, Version,
Version,
},
};
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
periphery_client,
query::{get_global_variables, get_server_with_status},
update::{add_update, make_update, update_update},
update::update_update,
},
monitor::update_cache_for_server,
resource,
state::{action_states, db_client, State},
};
impl Resolve<Deploy, User> for State {
#[instrument(name = "Deploy", skip(self, user))]
use crate::helpers::update::init_execution_update;
impl Resolve<Deploy, (User, Update)> for State {
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
Deploy {
@@ -41,7 +45,7 @@ impl Resolve<Deploy, User> for State {
stop_signal,
stop_time,
}: Deploy,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut deployment =
resource::get_check_permissions::<Deployment>(
@@ -76,10 +80,18 @@ impl Resolve<Deploy, User> for State {
let periphery = periphery_client(&server)?;
// This block gets the version of the image to deploy in the Build case.
// It also gets the name of the image from the build and attaches it directly.
let version = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build);
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.get(label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
@@ -89,10 +101,13 @@ impl Resolve<Deploy, User> for State {
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version}"),
};
// set docker account to match build docker account if it's not overridden by deployment
if deployment.config.docker_account.is_empty() {
deployment.config.docker_account =
build.config.docker_account;
// set image registry to match build docker account if it's not overridden by deployment
if matches!(
&deployment.config.image_registry,
ImageRegistry::None(_)
) {
deployment.config.image_registry =
build.config.image_registry;
}
version
}
@@ -129,11 +144,6 @@ impl Resolve<Deploy, User> for State {
env.value = res;
}
let mut update =
make_update(&deployment, Operation::DeployContainer, &user);
update.in_progress();
update.version = version;
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
@@ -156,19 +166,56 @@ impl Resolve<Deploy, User> for State {
);
}
update.id = add_update(update.clone()).await?;
update.version = version;
update_update(update.clone()).await?;
let docker_token = core_config
.docker_accounts
.get(&deployment.config.docker_account)
.cloned();
let (registry_token, aws_ecr) = match &deployment
.config
.image_registry
{
ImageRegistry::None(_) => (None, None),
ImageRegistry::DockerHub(params) => (
core_config.docker_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::Ghcr(params) => (
core_config.github_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::AwsEcr(label) => {
let config = core_config
.aws_ecr_registries
.get(label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
(
Some(
ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?,
),
Some(AwsEcrConfig::from(config)),
)
}
ImageRegistry::Custom(_) => {
return Err(anyhow!("Custom ImageRegistry not yet supported"))
}
};
match periphery
.request(api::container::Deploy {
deployment,
stop_signal,
stop_time,
docker_token,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
})
.await
@@ -177,7 +224,9 @@ impl Resolve<Deploy, User> for State {
Err(e) => {
update.push_error_log(
"deploy container",
serialize_error_pretty(&e),
format_serror(
&e.context("failed to deploy container").into(),
),
);
}
};
@@ -191,12 +240,12 @@ impl Resolve<Deploy, User> for State {
}
}
impl Resolve<StartContainer, User> for State {
#[instrument(name = "StartContainer", skip(self, user))]
impl Resolve<StartContainer, (User, Update)> for State {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { deployment }: StartContainer,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
@@ -230,20 +279,6 @@ impl Resolve<StartContainer, User> for State {
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: ResourceTarget::Deployment(deployment.id.clone()),
operation: Operation::StartContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::StartContainer {
name: deployment.name.clone(),
@@ -251,22 +286,23 @@ impl Resolve<StartContainer, User> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("start container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopContainer, User> for State {
#[instrument(name = "StopContainer", skip(self, user))]
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
@@ -274,7 +310,7 @@ impl Resolve<StopContainer, User> for State {
signal,
time,
}: StopContainer,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
@@ -308,11 +344,6 @@ impl Resolve<StopContainer, User> for State {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&deployment, Operation::StopContainer, &user);
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::StopContainer {
name: deployment.name.clone(),
@@ -326,26 +357,27 @@ impl Resolve<StopContainer, User> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopAllContainers, User> for State {
#[instrument(name = "StopAllContainers", skip(self, user))]
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerState::Ok {
@@ -375,23 +407,27 @@ impl Resolve<StopAllContainers, User> for State {
.await
.context("failed to find deployments on server")?;
let mut update =
make_update(&server, Operation::StopAllContainers, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let futures = deployments.iter().map(|deployment| async {
let req = super::ExecuteRequest::StopContainer(StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
});
(
self
.resolve(
StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
},
user.clone(),
)
.await,
async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
},
(user.clone(), update),
)
.await
}
.await,
deployment.name.clone(),
deployment.id.clone(),
)
@@ -407,9 +443,11 @@ impl Resolve<StopAllContainers, User> for State {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format!(
"failed to stop container {name} ({id})\n\n{}",
serialize_error_pretty(&e)
format_serror(
&e.context(format!(
"failed to stop container {name} ({id})"
))
.into(),
),
);
}
@@ -422,8 +460,8 @@ impl Resolve<StopAllContainers, User> for State {
}
}
impl Resolve<RemoveContainer, User> for State {
#[instrument(name = "RemoveContainer", skip(self, user))]
impl Resolve<RemoveContainer, (User, Update)> for State {
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RemoveContainer {
@@ -431,7 +469,7 @@ impl Resolve<RemoveContainer, User> for State {
signal,
time,
}: RemoveContainer,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
@@ -465,20 +503,6 @@ impl Resolve<RemoveContainer, User> for State {
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: ResourceTarget::Deployment(deployment.id.clone()),
operation: Operation::RemoveContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::RemoveContainer {
name: deployment.name.clone(),
@@ -492,9 +516,10 @@ impl Resolve<RemoveContainer, User> for State {
.await
{
Ok(log) => log,
Err(e) => {
Log::error("stop container", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);

View File

@@ -2,15 +2,26 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::execute::*, entities::user::User};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
update::{Log, Update},
user::User,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, state::State};
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
state::{db_client, State},
};
mod build;
mod deployment;
@@ -18,17 +29,18 @@ mod procedure;
mod repo;
mod server;
mod server_template;
mod sync;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[resolver_args((User, Update))]
#[serde(tag = "type", content = "params")]
enum ExecuteRequest {
pub enum ExecuteRequest {
// ==== SERVER ====
PruneContainers(PruneDockerContainers),
PruneImages(PruneDockerImages),
PruneNetworks(PruneDockerNetworks),
PruneContainers(PruneContainers),
PruneImages(PruneImages),
PruneNetworks(PruneNetworks),
// ==== DEPLOYMENT ====
Deploy(Deploy),
@@ -50,6 +62,9 @@ enum ExecuteRequest {
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
// ==== SYNC ====
RunSync(RunSync),
}
pub fn router() -> Router {
@@ -61,25 +76,58 @@ pub fn router() -> Router {
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
) -> serror::Result<Json<Update>> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request, user))
.await
.context("failure in spawned execute task");
// need to validate no cancel is active before any update is created.
build::validate_cancel_build(&request).await?;
if let Err(e) = &res {
warn!("/execute request {req_id} spawn error: {e:#}",);
}
let update = init_execution_update(&request, &user).await?;
Ok((TypedHeader(ContentType::json()), res??))
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
tokio::spawn({
let update_id = update.id.clone();
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
Log::error("spawn error", format!("{e:#?}"))
}
_ => return,
};
let res = async {
let mut update =
find_one_by_id(&db_client().await.updates, &update_id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update).await
}
.await;
if let Err(e) = res {
warn!("failed to update update with task error log | {e:#}");
}
}
});
Ok(Json(update))
}
#[instrument(name = "ExecuteRequest", skip(user))]
#[instrument(name = "ExecuteRequest", skip(user, update), fields(user_id = user.id, update_id = update.id))]
async fn task(
req_id: Uuid,
request: ExecuteRequest,
user: User,
update: Update,
) -> anyhow::Result<String> {
info!(
"/execute request {req_id} | user: {} ({})",
@@ -87,23 +135,22 @@ async fn task(
);
let timer = Instant::now();
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
let res = State
.resolve_request(request, (user, update))
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/execute request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
info!("/execute request {req_id} | resolve time: {elapsed:?}");
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -1,40 +1,38 @@
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use monitor_client::{
api::execute::RunProcedure,
entities::{
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User, Operation,
update::Update, user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use tokio::sync::Mutex;
use crate::{
helpers::{
procedure::execute_procedure,
update::{add_update, make_update, update_update},
},
helpers::{procedure::execute_procedure, update::update_update},
resource::{self, refresh_procedure_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<RunProcedure, User> for State {
#[instrument(name = "RunProcedure", skip(self, user))]
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunProcedure { procedure }: RunProcedure,
user: User,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
resolve_inner(procedure, user).await
resolve_inner(procedure, user, update).await
}
}
fn resolve_inner(
procedure: String,
user: User,
mut update: Update,
) -> Pin<
Box<
dyn std::future::Future<Output = anyhow::Result<Update>> + Send,
@@ -48,6 +46,18 @@ fn resolve_inner(
)
.await?;
// Need to push the initial log, as execute_procedure
// assumes first log is already created
// and will panic otherwise.
update.push_simple_log(
"execute_procedure",
format!(
"{}: executing procedure '{}'",
muted("INFO"),
bold(&procedure.name)
),
);
// get the action state for the procedure (or insert default).
let action_state = action_states()
.procedure
@@ -59,16 +69,6 @@ fn resolve_inner(
let _action_guard =
action_state.update(|state| state.running = true)?;
let mut update =
make_update(&procedure, Operation::RunProcedure, &user);
update.in_progress();
update.push_simple_log(
"execute procedure",
format!("Executing procedure: {}", procedure.name),
);
update.id = add_update(update.clone()).await?;
let update = Mutex::new(update);
let res = execute_procedure(&procedure, &update).await;
@@ -79,13 +79,15 @@ fn resolve_inner(
Ok(_) => {
update.push_simple_log(
"execution ok",
"the procedure has completed with no errors",
format!(
"{}: the procedure has {} with no errors",
muted("INFO"),
colored("completed", Color::Green)
),
);
}
Err(e) => update.push_error_log(
"execution error",
serialize_error_pretty(&e),
),
Err(e) => update
.push_error_log("execution error", format_serror(&e.into())),
}
update.finalize();

View File

@@ -1,4 +1,5 @@
use anyhow::anyhow;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -6,9 +7,8 @@ use monitor_client::{
permission::PermissionLevel,
repo::Repo,
server::Server,
update::{Log, ResourceTarget, Update, UpdateStatus},
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::{
@@ -17,24 +17,20 @@ use mungos::{
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
config::core_config,
helpers::{
periphery_client,
update::{add_update, update_update},
},
helpers::{periphery_client, update::update_update},
resource::{self, refresh_repo_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<CloneRepo, User> for State {
#[instrument(name = "CloneRepo", skip(self, user))]
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CloneRepo { repo }: CloneRepo,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
@@ -61,20 +57,6 @@ impl Resolve<CloneRepo, User> for State {
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
operation: Operation::CloneRepo,
target: ResourceTarget::Repo(repo.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = add_update(update.clone()).await?;
let github_token = core_config()
.github_accounts
.get(&repo.config.github_account)
@@ -89,7 +71,10 @@ impl Resolve<CloneRepo, User> for State {
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("clone repo", serialize_error_pretty(&e))]
vec![Log::error(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
)]
}
};
@@ -97,19 +82,19 @@ impl Resolve<CloneRepo, User> for State {
update.finalize();
if update.success {
update_last_pulled(&repo.name).await;
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
}
}
impl Resolve<PullRepo, User> for State {
#[instrument(name = "PullRepo", skip(self, user))]
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PullRepo { repo }: PullRepo,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
@@ -136,20 +121,6 @@ impl Resolve<PullRepo, User> for State {
let periphery = periphery_client(&server)?;
let start_ts = monitor_timestamp();
let mut update = Update {
operation: Operation::PullRepo,
target: ResourceTarget::Repo(repo.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = add_update(update.clone()).await?;
let logs = match periphery
.request(api::git::PullRepo {
name: repo.name.clone(),
@@ -161,7 +132,10 @@ impl Resolve<PullRepo, User> for State {
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error("pull repo", serialize_error_pretty(&e))]
vec![Log::error(
"pull repo",
format_serror(&e.context("failed to pull repo").into()),
)]
}
};
@@ -170,13 +144,14 @@ impl Resolve<PullRepo, User> for State {
update.finalize();
if update.success {
update_last_pulled(&repo.name).await;
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
}
}
#[instrument(skip_all, fields(update_id = update.id))]
async fn handle_update_return(
update: Update,
) -> anyhow::Result<Update> {
@@ -198,7 +173,8 @@ async fn handle_update_return(
Ok(update)
}
async fn update_last_pulled(repo_name: &str) {
#[instrument]
async fn update_last_pulled_time(repo_name: &str) {
let res = db_client()
.await
.repos

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
@@ -7,28 +8,23 @@ use monitor_client::{
server::Server,
update::{Log, Update, UpdateStatus},
user::User,
Operation,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
periphery_client,
update::{add_update, make_update, update_update},
},
helpers::{periphery_client, update::update_update},
resource,
state::{action_states, State},
};
impl Resolve<PruneDockerContainers, User> for State {
#[instrument(name = "PruneDockerContainers", skip(self, user))]
impl Resolve<PruneContainers, (User, Update)> for State {
#[instrument(name = "PruneContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneDockerContainers { server }: PruneDockerContainers,
user: User,
PruneContainers { server }: PruneContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
@@ -50,11 +46,6 @@ impl Resolve<PruneDockerContainers, User> for State {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::PruneContainersServer, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::container::PruneContainers {})
.await
@@ -63,9 +54,12 @@ impl Resolve<PruneDockerContainers, User> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune containers", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune containers",
format_serror(
&e.context("failed to prune containers").into(),
),
),
};
update.success = log.success;
@@ -79,12 +73,12 @@ impl Resolve<PruneDockerContainers, User> for State {
}
}
impl Resolve<PruneDockerNetworks, User> for State {
#[instrument(name = "PruneDockerNetworks", skip(self, user))]
impl Resolve<PruneNetworks, (User, Update)> for State {
#[instrument(name = "PruneNetworks", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneDockerNetworks { server }: PruneDockerNetworks,
user: User,
PruneNetworks { server }: PruneNetworks,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
@@ -106,11 +100,6 @@ impl Resolve<PruneDockerNetworks, User> for State {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::PruneNetworksServer, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let log = match periphery
.request(api::network::PruneNetworks {})
.await
@@ -119,9 +108,10 @@ impl Resolve<PruneDockerNetworks, User> for State {
server.name
)) {
Ok(log) => log,
Err(e) => {
Log::error("prune networks", serialize_error_pretty(&e))
}
Err(e) => Log::error(
"prune networks",
format_serror(&e.context("failed to prune networks").into()),
),
};
update.success = log.success;
@@ -135,12 +125,12 @@ impl Resolve<PruneDockerNetworks, User> for State {
}
}
impl Resolve<PruneDockerImages, User> for State {
#[instrument(name = "PruneDockerImages", skip(self, user))]
impl Resolve<PruneImages, (User, Update)> for State {
#[instrument(name = "PruneImages", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneDockerImages { server }: PruneDockerImages,
user: User,
PruneImages { server }: PruneImages,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
@@ -162,11 +152,6 @@ impl Resolve<PruneDockerImages, User> for State {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::PruneImagesServer, &user);
update.in_progress();
update.id = add_update(update.clone()).await?;
let log =
match periphery.request(api::build::PruneImages {}).await {
Ok(log) => log,

View File

@@ -1,4 +1,5 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
@@ -7,26 +8,29 @@ use monitor_client::{
server_template::{ServerTemplate, ServerTemplateConfig},
update::Update,
user::User,
Operation,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
cloud::aws::launch_ec2_instance, helpers::update::{add_update, make_update, update_update}, resource, state::{db_client, State}
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
helpers::update::update_update,
resource,
state::{db_client, State},
};
impl Resolve<LaunchServer, User> for State {
#[instrument(name = "LaunchServer", skip(self, user))]
impl Resolve<LaunchServer, (User, Update)> for State {
#[instrument(name = "LaunchServer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
LaunchServer {
name,
server_template,
}: LaunchServer,
user: User,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
// validate name isn't already taken by another server
if db_client()
@@ -52,29 +56,28 @@ impl Resolve<LaunchServer, User> for State {
)
.await?;
let mut update =
make_update(&template, Operation::LaunchServer, &user);
update.in_progress();
update.push_simple_log(
"launching server",
format!("{:#?}", template.config),
);
update.id = add_update(update.clone()).await?;
update_update(update.clone()).await?;
let config = match template.config {
ServerTemplateConfig::Aws(config) => {
let region = config.region.clone();
let instance = launch_ec2_instance(&name, config).await;
if let Err(e) = &instance {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
let instance = instance.unwrap();
let instance = match launch_ec2_instance(&name, config).await
{
Ok(instance) => instance,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
@@ -88,6 +91,34 @@ impl Resolve<LaunchServer, User> for State {
..Default::default()
}
}
ServerTemplateConfig::Hetzner(config) => {
let datacenter = config.datacenter;
let server = match launch_hetzner_server(&name, config).await
{
Ok(server) => server,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch hetzner server\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {name} on ip {}",
server.ip
),
);
PartialServerConfig {
address: format!("http://{}:8120", server.ip).into(),
region: datacenter.as_ref().to_string().into(),
..Default::default()
}
}
};
match self.resolve(CreateServer { name, config }, user).await {
@@ -96,14 +127,12 @@ impl Resolve<LaunchServer, User> for State {
"create server",
format!("created server {} ({})", server.name, server.id),
);
update.other_data = server.id;
}
Err(e) => {
update.push_error_log(
"create server",
format!(
"failed to create server\n\n{}",
serialize_error_pretty(&e)
),
format_serror(&e.context("failed to create server").into()),
);
}
};

View File

@@ -0,0 +1,394 @@
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
alerter::Alerter,
build::Build,
builder::Builder,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
update::{Log, Update},
user::{sync_user, User},
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use crate::{
helpers::{
query::get_id_to_tags,
sync::{
deployment,
resource::{
get_updates_for_execution, AllResourcesById, ResourceSync,
},
},
update::update_update,
},
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, State},
};
impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
let (res, logs, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
update.logs.extend(logs);
update_update(update.clone()).await?;
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let (servers_to_create, servers_to_update, servers_to_delete) =
get_updates_for_execution::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = deployment::get_updates_for_execution(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
get_updates_for_execution::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (repos_to_create, repos_to_update, repos_to_delete) =
get_updates_for_execution::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
procedures_to_create,
procedures_to_update,
procedures_to_delete,
) = get_updates_for_execution::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
get_updates_for_execution::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
get_updates_for_execution::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
) = get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
) = get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
variables_to_create,
variables_to_update,
variables_to_delete,
) = crate::helpers::sync::variables::get_updates_for_execution(
resources.variables,
sync.config.delete,
)
.await?;
let (
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
) = crate::helpers::sync::user_groups::get_updates_for_execution(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await?;
if resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
&& server_templates_to_update.is_empty()
&& server_templates_to_delete.is_empty()
&& servers_to_create.is_empty()
&& servers_to_update.is_empty()
&& servers_to_delete.is_empty()
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
&& builders_to_create.is_empty()
&& builders_to_update.is_empty()
&& builders_to_delete.is_empty()
&& alerters_to_create.is_empty()
&& alerters_to_update.is_empty()
&& alerters_to_delete.is_empty()
&& repos_to_create.is_empty()
&& repos_to_update.is_empty()
&& repos_to_delete.is_empty()
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
&& variables_to_create.is_empty()
&& variables_to_update.is_empty()
&& variables_to_delete.is_empty()
{
update.push_simple_log(
"No Changes",
format!(
"{}. exiting.",
colored("nothing to do", Color::Green)
),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
// =================
// No deps
maybe_extend(
&mut update.logs,
crate::helpers::sync::variables::run_updates(
variables_to_create,
variables_to_update,
variables_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
crate::helpers::sync::user_groups::run_updates(
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
entities::sync::ResourceSync::run_updates(
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::run_updates(
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Server::run_updates(
servers_to_create,
servers_to_update,
servers_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Alerter::run_updates(
alerters_to_create,
alerters_to_update,
alerters_to_delete,
)
.await,
);
// Dependent on server
maybe_extend(
&mut update.logs,
Builder::run_updates(
builders_to_create,
builders_to_update,
builders_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Repo::run_updates(
repos_to_create,
repos_to_update,
repos_to_delete,
)
.await,
);
// Dependant on builder
maybe_extend(
&mut update.logs,
Build::run_updates(
builds_to_create,
builds_to_update,
builds_to_delete,
)
.await,
);
// Dependant on server / build
if let Some(res) = deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await
{
update.logs.extend(res);
}
// Dependant on everything
maybe_extend(
&mut update.logs,
Procedure::run_updates(
procedures_to_create,
procedures_to_update,
procedures_to_delete,
)
.await,
);
let db = db_client().await;
if let Err(e) = update_one_by_id(
&db.resource_syncs,
&sync.id,
doc! {
"$set": {
"info.last_sync_ts": monitor_timestamp(),
"info.last_sync_hash": hash,
"info.last_sync_message": message,
}
},
None,
)
.await
{
warn!(
"failed to update resource sync {} info after sync | {e:#}",
sync.name
)
}
if let Err(e) = State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
sync_user().to_owned(),
)
.await
{
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
update.push_error_log(
"refresh sync",
format_serror(
&e.context("failed to refresh sync pending after run")
.into(),
),
);
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
fn maybe_extend(logs: &mut Vec<Log>, log: Option<Log>) {
if let Some(log) = log {
logs.push(log);
}
}

View File

@@ -1,4 +1,5 @@
pub mod auth;
pub mod execute;
pub mod read;
pub mod user;
pub mod write;

View File

@@ -13,6 +13,7 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
state::{db_client, State},
};
@@ -26,7 +27,7 @@ impl Resolve<ListAlerts, User> for State {
user: User,
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin {
if !user.admin && !core_config().transparent_mode {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Server,

View File

@@ -14,6 +14,7 @@ use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
@@ -44,13 +45,23 @@ impl Resolve<ListAlerters, User> for State {
}
}
impl Resolve<ListFullAlerters, User> for State {
async fn resolve(
&self,
ListFullAlerters { query }: ListFullAlerters,
user: User,
) -> anyhow::Result<ListFullAlertersResponse> {
resource::list_full_for_user::<Alerter>(query, &user).await
}
}
impl Resolve<GetAlertersSummary, User> for State {
async fn resolve(
&self,
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(

View File

@@ -1,6 +1,5 @@
use std::{
collections::{HashMap, HashSet},
str::FromStr,
sync::OnceLock,
};
@@ -12,23 +11,19 @@ use monitor_client::{
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},
permission::PermissionLevel,
update::{ResourceTargetVariant, UpdateStatus},
update::UpdateStatus,
user::User,
Operation,
},
};
use mungos::{
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId},
options::FindOptions,
},
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::{Resolve, ResolveToString};
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{action_states, build_state_cache, db_client, State},
};
@@ -58,6 +53,16 @@ impl Resolve<ListBuilds, User> for State {
}
}
impl Resolve<ListFullBuilds, User> for State {
async fn resolve(
&self,
ListFullBuilds { query }: ListFullBuilds,
user: User,
) -> anyhow::Result<ListFullBuildsResponse> {
resource::list_full_for_user::<Build>(query, &user).await
}
}
impl Resolve<GetBuildActionState, User> for State {
async fn resolve(
&self,
@@ -86,31 +91,18 @@ impl Resolve<GetBuildsSummary, User> for State {
GetBuildsSummary {}: GetBuildsSummary,
user: User,
) -> anyhow::Result<GetBuildsSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Build,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let builds = resource::list_full_for_user::<Build>(
Default::default(),
&user,
)
.await
.context("failed to get all builds")?;
let builds = find_collect(&db_client().await.builds, query, None)
.await
.context("failed to find all build documents")?;
let mut res = GetBuildsSummaryResponse::default();
let cache = build_state_cache();
let action_states = action_states();
for build in builds {
res.total += 1;
@@ -209,6 +201,7 @@ impl Resolve<GetBuildVersions, User> for State {
major,
minor,
patch,
limit,
}: GetBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
@@ -243,6 +236,7 @@ impl Resolve<GetBuildVersions, User> for State {
filter,
FindOptions::builder()
.sort(doc! { "_id": -1 })
.limit(limit)
.build(),
)
.await
@@ -256,6 +250,24 @@ impl Resolve<GetBuildVersions, User> for State {
}
}
fn github_organizations() -> &'static String {
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
GITHUB_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().github_organizations)
.expect("failed to serialize github organizations")
})
}
impl ResolveToString<ListGithubOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListGithubOrganizations {}: ListGithubOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(github_organizations().clone())
}
}
fn docker_organizations() -> &'static String {
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
DOCKER_ORGANIZATIONS.get_or_init(|| {
@@ -293,6 +305,8 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
}
}
Ok(res.into_iter().collect())
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}

View File

@@ -45,13 +45,23 @@ impl Resolve<ListBuilders, User> for State {
}
}
impl Resolve<ListFullBuilders, User> for State {
async fn resolve(
&self,
ListFullBuilders { query }: ListFullBuilders,
user: User,
) -> anyhow::Result<ListFullBuildersResponse> {
resource::list_full_for_user::<Builder>(query, &user).await
}
}
impl Resolve<GetBuildersSummary, User> for State {
async fn resolve(
&self,
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
@@ -100,7 +110,7 @@ impl Resolve<GetBuilderAvailableAccounts, User> for State {
let res = self
.resolve(
read::GetAvailableAccounts {
server: config.server_id,
server: Some(config.server_id),
},
user,
)

View File

@@ -1,4 +1,4 @@
use std::{cmp, collections::HashSet, str::FromStr};
use std::{cmp, collections::HashSet};
use anyhow::{anyhow, Context};
use monitor_client::{
@@ -10,23 +10,17 @@ use monitor_client::{
},
permission::PermissionLevel,
server::Server,
update::{Log, ResourceTargetVariant},
update::Log,
user::User,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId},
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client, query::get_resource_ids_for_non_admin,
},
helpers::periphery_client,
resource,
state::{action_states, db_client, deployment_status_cache, State},
state::{action_states, deployment_status_cache, State},
};
impl Resolve<GetDeployment, User> for State {
@@ -54,6 +48,16 @@ impl Resolve<ListDeployments, User> for State {
}
}
impl Resolve<ListFullDeployments, User> for State {
async fn resolve(
&self,
ListFullDeployments { query }: ListFullDeployments,
user: User,
) -> anyhow::Result<ListFullDeploymentsResponse> {
resource::list_full_for_user::<Deployment>(query, &user).await
}
}
impl Resolve<GetDeploymentContainer, User> for State {
async fn resolve(
&self,
@@ -117,6 +121,7 @@ impl Resolve<SearchLog, User> for State {
deployment,
terms,
combinator,
invert,
}: SearchLog,
user: User,
) -> anyhow::Result<Log> {
@@ -139,6 +144,7 @@ impl Resolve<SearchLog, User> for State {
name,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")
@@ -200,27 +206,12 @@ impl Resolve<GetDeploymentsSummary, User> for State {
GetDeploymentsSummary {}: GetDeploymentsSummary,
user: User,
) -> anyhow::Result<GetDeploymentsSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let deployments =
find_collect(&db_client().await.deployments, query, None)
.await
.context("failed to find all deployment documents")?;
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
&user,
)
.await
.context("failed to get deployments from db")?;
let mut res = GetDeploymentsSummaryResponse::default();
let status_cache = deployment_status_cache();
for deployment in deployments {
@@ -266,6 +257,8 @@ impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
}
}
Ok(res.into_iter().collect())
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}

View File

@@ -23,6 +23,7 @@ mod repo;
mod search;
mod server;
mod server_template;
mod sync;
mod tag;
mod toml;
mod update;
@@ -38,6 +39,7 @@ mod variable;
enum ReadRequest {
GetVersion(GetVersion),
GetCoreInfo(GetCoreInfo),
GetAvailableAwsEcrLabels(GetAvailableAwsEcrLabels),
// ==== USER ====
ListUsers(ListUsers),
@@ -60,16 +62,19 @@ enum ReadRequest {
GetProcedure(GetProcedure),
GetProcedureActionState(GetProcedureActionState),
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
GetServerTemplatesSummary(GetServerTemplatesSummary),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
ListFullServers(ListFullServers),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
@@ -84,6 +89,7 @@ enum ReadRequest {
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
@@ -95,29 +101,42 @@ enum ReadRequest {
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
#[to_string_resolver]
ListGithubOrganizations(ListGithubOrganizations),
#[to_string_resolver]
ListDockerOrganizations(ListDockerOrganizations),
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
GetRepoActionState(GetRepoActionState),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
GetResourceSyncActionState(GetResourceSyncActionState),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
ListBuilders(ListBuilders),
ListFullBuilders(ListFullBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
GetAlerter(GetAlerter),
ListAlerters(ListAlerters),
ListFullAlerters(ListFullAlerters),
// ==== TOML ====
ExportAllResourcesToToml(ExportAllResourcesToToml),
@@ -154,17 +173,14 @@ pub fn router() -> Router {
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "ReadHandler", level = "debug", skip(user))]
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/read request {req_id} | user: {} ({})",
user.username, user.id
);
debug!("/read request | user: {}", user.username);
let res =
State
.resolve_request(request, user)
@@ -176,7 +192,7 @@ async fn handler(
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/read request {req_id} error: {e:#}");
debug!("/read request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/read request {req_id} | resolve time: {elapsed:?}");
@@ -211,6 +227,18 @@ impl Resolve<GetCoreInfo, User> for State {
.github_webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
})
}
}
impl Resolve<GetAvailableAwsEcrLabels, User> for State {
async fn resolve(
&self,
GetAvailableAwsEcrLabels {}: GetAvailableAwsEcrLabels,
_: User,
) -> anyhow::Result<GetAvailableAwsEcrLabelsResponse> {
Ok(core_config().aws_ecr_registries.keys().cloned().collect())
}
}

View File

@@ -1,30 +1,17 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::{
GetProcedure, GetProcedureActionState,
GetProcedureActionStateResponse, GetProcedureResponse,
GetProceduresSummary, GetProceduresSummaryResponse,
ListProcedures, ListProceduresResponse,
},
api::read::*,
entities::{
permission::PermissionLevel,
procedure::{Procedure, ProcedureState},
update::ResourceTargetVariant,
user::User,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{action_states, db_client, procedure_state_cache, State},
state::{action_states, procedure_state_cache, State},
};
impl Resolve<GetProcedure, User> for State {
@@ -52,33 +39,28 @@ impl Resolve<ListProcedures, User> for State {
}
}
impl Resolve<ListFullProcedures, User> for State {
async fn resolve(
&self,
ListFullProcedures { query }: ListFullProcedures,
user: User,
) -> anyhow::Result<ListFullProceduresResponse> {
resource::list_full_for_user::<Procedure>(query, &user).await
}
}
impl Resolve<GetProceduresSummary, User> for State {
async fn resolve(
&self,
GetProceduresSummary {}: GetProceduresSummary,
user: User,
) -> anyhow::Result<GetProceduresSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Procedure,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let procedures =
find_collect(&db_client().await.procedures, query, None)
.await
.context("failed to find all procedure documents")?;
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
&user,
)
.await
.context("failed to get procedures from db")?;
let mut res = GetProceduresSummaryResponse::default();

View File

@@ -1,25 +1,17 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
update::ResourceTargetVariant,
user::User,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{action_states, db_client, repo_state_cache, State},
state::{action_states, repo_state_cache, State},
};
impl Resolve<GetRepo, User> for State {
@@ -47,6 +39,16 @@ impl Resolve<ListRepos, User> for State {
}
}
impl Resolve<ListFullRepos, User> for State {
async fn resolve(
&self,
ListFullRepos { query }: ListFullRepos,
user: User,
) -> anyhow::Result<ListFullReposResponse> {
resource::list_full_for_user::<Repo>(query, &user).await
}
}
impl Resolve<GetRepoActionState, User> for State {
async fn resolve(
&self,
@@ -75,26 +77,11 @@ impl Resolve<GetReposSummary, User> for State {
GetReposSummary {}: GetReposSummary,
user: User,
) -> anyhow::Result<GetReposSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let repos =
resource::list_full_for_user::<Repo>(Default::default(), &user)
.await
.context("failed to get repos from db")?;
let repos = find_collect(&db_client().await.repos, query, None)
.await
.context("failed to find all repo documents")?;
let mut res = GetReposSummaryResponse::default();
let cache = repo_state_cache();

View File

@@ -108,6 +108,16 @@ impl Resolve<ListServers, User> for State {
}
}
impl Resolve<ListFullServers, User> for State {
async fn resolve(
&self,
ListFullServers { query }: ListFullServers,
user: User,
) -> anyhow::Result<ListFullServersResponse> {
resource::list_full_for_user::<Server>(query, &user).await
}
}
impl Resolve<GetServerState, User> for State {
async fn resolve(
&self,
@@ -379,18 +389,24 @@ impl Resolve<GetAvailableAccounts, User> for State {
GetAvailableAccounts { server }: GetAvailableAccounts,
user: User,
) -> anyhow::Result<GetAvailableAccountsResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let (github, docker) = match server {
Some(server) => {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
(github, docker)
}
None => Default::default(),
};
let mut github_set = HashSet::<String>::new();
@@ -425,10 +441,11 @@ impl Resolve<GetAvailableSecrets, User> for State {
PermissionLevel::Read,
)
.await?;
let secrets = periphery_client(&server)?
let mut secrets = periphery_client(&server)?
.request(api::GetSecrets {})
.await
.context("failed to get accounts from periphery")?;
secrets.sort();
Ok(secrets)
}
}

View File

@@ -2,11 +2,7 @@ use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::{
GetServerTemplate, GetServerTemplateResponse,
GetServerTemplatesSummary, GetServerTemplatesSummaryResponse,
ListServerTemplates, ListServerTemplatesResponse,
},
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
update::ResourceTargetVariant, user::User,
@@ -16,7 +12,9 @@ use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin, resource, state::{db_client, State}
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
impl Resolve<GetServerTemplate, User> for State {
@@ -44,6 +42,16 @@ impl Resolve<ListServerTemplates, User> for State {
}
}
impl Resolve<ListFullServerTemplates, User> for State {
async fn resolve(
&self,
ListFullServerTemplates { query }: ListFullServerTemplates,
user: User,
) -> anyhow::Result<ListFullServerTemplatesResponse> {
resource::list_full_for_user::<ServerTemplate>(query, &user).await
}
}
impl Resolve<GetServerTemplatesSummary, User> for State {
async fn resolve(
&self,
@@ -68,10 +76,10 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
};
let total = db_client()
.await
.builders
.server_templates
.count_documents(query, None)
.await
.context("failed to count all builder documents")?;
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {
total: total as u32,
};

View File

@@ -0,0 +1,139 @@
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
ResourceSyncListItem, ResourceSyncState,
},
user::User,
},
};
use resolver_api::Resolve;
use crate::{
resource,
state::{action_states, resource_sync_state_cache, State},
};
impl Resolve<GetResourceSync, User> for State {
async fn resolve(
&self,
GetResourceSync { sync }: GetResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListResourceSyncs, User> for State {
async fn resolve(
&self,
ListResourceSyncs { query }: ListResourceSyncs,
user: User,
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
resource::list_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<ListFullResourceSyncs, User> for State {
async fn resolve(
&self,
ListFullResourceSyncs { query }: ListFullResourceSyncs,
user: User,
) -> anyhow::Result<ListFullResourceSyncsResponse> {
resource::list_full_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<GetResourceSyncActionState, User> for State {
async fn resolve(
&self,
GetResourceSyncActionState { sync }: GetResourceSyncActionState,
user: User,
) -> anyhow::Result<ResourceSyncActionState> {
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.resource_sync
.get(&sync.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetResourceSyncsSummary, User> for State {
async fn resolve(
&self,
GetResourceSyncsSummary {}: GetResourceSyncsSummary,
user: User,
) -> anyhow::Result<GetResourceSyncsSummaryResponse> {
let resource_syncs =
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
)
.await
.context("failed to get resource_syncs from db")?;
let mut res = GetResourceSyncsSummaryResponse::default();
let cache = resource_sync_state_cache();
let action_states = action_states();
for resource_sync in resource_syncs {
res.total += 1;
match resource_sync.info.pending.data {
PendingSyncUpdatesData::Ok(data) => {
if !data.no_updates() {
res.pending += 1;
continue;
}
}
PendingSyncUpdatesData::Err(_) => {
res.failed += 1;
continue;
}
}
match (
cache.get(&resource_sync.id).await.unwrap_or_default(),
action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.syncing => {
res.syncing += 1;
}
(ResourceSyncState::Ok, _) => res.ok += 1,
(ResourceSyncState::Failed, _) => res.failed += 1,
(ResourceSyncState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(ResourceSyncState::Syncing, _) => {
unreachable!()
}
(ResourceSyncState::Pending, _) => {
unreachable!()
}
}
}
Ok(res)
}
}

View File

@@ -1,9 +1,10 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
api::read::{GetTag, ListTags},
entities::{tag::Tag, user::User},
};
use mungos::find::find_collect;
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
@@ -27,8 +28,12 @@ impl Resolve<ListTags, User> for State {
ListTags { query }: ListTags,
_: User,
) -> anyhow::Result<Vec<Tag>> {
find_collect(&db_client().await.tags, query, None)
.await
.context("failed to get tags from db")
find_collect(
&db_client().await.tags,
query,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to get tags from db")
}
}

View File

@@ -14,13 +14,18 @@ use monitor_client::{
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
deployment::{Deployment, DeploymentImage},
deployment::{
conversions_to_string, term_signal_labels_to_string,
Deployment, DeploymentImage,
},
environment_vars_to_string,
permission::{PermissionLevel, UserTarget},
procedure::Procedure,
repo::Repo,
resource::Resource,
resource::{Resource, ResourceQuery},
server::Server,
server_template::ServerTemplate,
sync::ResourceSync,
toml::{
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
},
@@ -29,43 +34,55 @@ use monitor_client::{
},
};
use mungos::find::find_collect;
use ordered_hash_map::OrderedHashMap;
use partial_derive2::PartialDiff;
use resolver_api::Resolve;
use serde_json::Value;
use crate::{
helpers::query::get_user_user_group_ids,
resource,
resource::{self, MonitorResource},
state::{db_client, State},
};
impl Resolve<ExportAllResourcesToToml, User> for State {
async fn resolve(
&self,
ExportAllResourcesToToml {}: ExportAllResourcesToToml,
ExportAllResourcesToToml { tags }: ExportAllResourcesToToml,
user: User,
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
let mut targets = Vec::<ResourceTarget>::new();
targets.extend(
resource::list_for_user::<Alerter>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
Default::default(),
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
@@ -73,32 +90,50 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(Default::default(), &user)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
Default::default(),
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
let user_groups = if user.admin {
find_collect(&db_client().await.user_groups, None, None)
@@ -116,6 +151,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
ExportResourcesToToml {
targets,
user_groups,
include_variables: true,
},
user,
)
@@ -129,6 +165,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
ExportResourcesToToml {
targets,
user_groups,
include_variables,
}: ExportResourcesToToml,
user: User,
) -> anyhow::Result<ExportResourcesToTomlResponse> {
@@ -145,7 +182,20 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res.alerters.push(convert_resource(alerter, &names.tags))
res
.alerters
.push(convert_resource::<Alerter>(alerter, &names.tags))
}
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
res
.resource_syncs
.push(convert_resource::<ResourceSync>(sync, &names.tags))
}
ResourceTarget::ServerTemplate(id) => {
let template = resource::get_check_permissions::<
@@ -154,9 +204,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
&id, &user, PermissionLevel::Read
)
.await?;
res
.server_templates
.push(convert_resource(template, &names.tags))
res.server_templates.push(
convert_resource::<ServerTemplate>(template, &names.tags),
)
}
ResourceTarget::Server(id) => {
let server = resource::get_check_permissions::<Server>(
@@ -165,7 +215,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res.servers.push(convert_resource(server, &names.tags))
res
.servers
.push(convert_resource::<Server>(server, &names.tags))
}
ResourceTarget::Builder(id) => {
let mut builder =
@@ -181,7 +233,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
names.servers.get(&id).unwrap_or(&String::new()),
)
}
res.builders.push(convert_resource(builder, &names.tags))
res
.builders
.push(convert_resource::<Builder>(builder, &names.tags))
}
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
@@ -197,7 +251,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
.get(&build.config.builder_id)
.unwrap_or(&String::new()),
);
res.builds.push(convert_resource(build, &names.tags))
res
.builds
.push(convert_resource::<Build>(build, &names.tags))
}
ResourceTarget::Deployment(id) => {
let mut deployment = resource::get_check_permissions::<
@@ -221,9 +277,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
names.builds.get(build_id).unwrap_or(&String::new()),
);
}
res
.deployments
.push(convert_resource(deployment, &names.tags))
res.deployments.push(convert_resource::<Deployment>(
deployment,
&names.tags,
))
}
ResourceTarget::Repo(id) => {
let mut repo = resource::get_check_permissions::<Repo>(
@@ -239,7 +296,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
.get(&repo.config.server_id)
.unwrap_or(&String::new()),
);
res.repos.push(convert_resource(repo, &names.tags))
res.repos.push(convert_resource::<Repo>(repo, &names.tags))
}
ResourceTarget::Procedure(id) => {
add_procedure(&id, &mut res, &user, &names)
@@ -256,7 +313,14 @@ impl Resolve<ExportResourcesToToml, User> for State {
.await
.context("failed to add user groups")?;
let toml = toml::to_string(&res)
if include_variables {
res.variables =
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to get variables from db")?;
}
let toml = serialize_resources_toml(&res)
.context("failed to serialize resources to toml")?;
Ok(ExportResourcesToTomlResponse { toml })
@@ -275,67 +339,76 @@ async fn add_procedure(
PermissionLevel::Read,
)
.await?;
for execution in &mut procedure.config.executions {
match &mut execution.execution {
Execution::RunProcedure(exec) => exec.procedure.clone_from(
names
.procedures
.get(&exec.procedure)
.unwrap_or(&String::new()),
),
Execution::RunBuild(exec) => exec.build.clone_from(
names.builds.get(&exec.build).unwrap_or(&String::new()),
),
Execution::Deploy(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::StartContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::StopContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::RemoveContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::CloneRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::PullRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::StopAllContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneDockerNetworks(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneDockerImages(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneDockerContainers(exec) => {
exec.server.clone_from(
for stage in &mut procedure.config.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::RunProcedure(exec) => exec.procedure.clone_from(
names
.procedures
.get(&exec.procedure)
.unwrap_or(&String::new()),
),
Execution::RunBuild(exec) => exec.build.clone_from(
names.builds.get(&exec.build).unwrap_or(&String::new()),
),
Execution::Deploy(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::StartContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::StopContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::RemoveContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::CloneRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::PullRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::StopAllContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
)
),
Execution::PruneNetworks(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneImages(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::RunSync(exec) => exec.sync.clone_from(
names.syncs.get(&exec.sync).unwrap_or(&String::new()),
),
Execution::Sleep(_) | Execution::None(_) => {}
}
Execution::None(_) => continue,
}
}
res
.procedures
.push(convert_resource(procedure, &names.tags));
.push(convert_resource::<Procedure>(procedure, &names.tags));
Ok(())
}
@@ -347,6 +420,7 @@ struct ResourceNames {
repos: HashMap<String, String>,
deployments: HashMap<String, String>,
procedures: HashMap<String, String>,
syncs: HashMap<String, String>,
}
impl ResourceNames {
@@ -395,6 +469,12 @@ impl ResourceNames {
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
syncs: find_collect(&db.resource_syncs, None, None)
.await
.context("failed to get all resource syncs")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
})
}
}
@@ -447,13 +527,13 @@ async fn add_user_groups(
Ok(())
}
fn convert_resource<Config, Info: Default, PartialConfig>(
resource: Resource<Config, Info>,
fn convert_resource<R: MonitorResource>(
resource: Resource<R::Config, R::Info>,
tag_names: &HashMap<String, String>,
) -> ResourceToml<PartialConfig>
where
Config: Into<PartialConfig>,
{
) -> ResourceToml<R::PartialConfig> {
// This makes sure all non-necessary (defaulted) fields don't make it into final toml
let partial: R::PartialConfig = resource.config.into();
let config = R::Config::default().minimize_partial(partial);
ResourceToml {
name: resource.name,
tags: resource
@@ -462,6 +542,248 @@ where
.filter_map(|t| tag_names.get(t).cloned())
.collect(),
description: resource.description,
config: resource.config.into(),
deploy: false,
after: Default::default(),
config,
}
}
fn serialize_resources_toml(
resources: &ResourcesToml,
) -> anyhow::Result<String> {
let mut res = String::new();
let options = toml_pretty::Options::default()
.tab(" ")
.skip_empty_string(true)
.max_inline_array_length(30);
for server in &resources.servers {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[server]]\n");
res.push_str(
&toml_pretty::to_string(&server, options)
.context("failed to serialize servers to toml")?,
);
}
for deployment in &resources.deployments {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[deployment]]\n");
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&deployment)?)?;
let config = parsed
.get_mut("config")
.context("deployment has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(DeploymentImage::Build { version, .. }) =
&deployment.config.image
{
let image = config
.get_mut("image")
.context("deployment has no image")?
.get_mut("params")
.context("deployment image has no params")?
.as_object_mut()
.context("deployment image params is not object")?;
if version.is_none() {
image.remove("version");
} else {
image.insert(
"version".to_string(),
Value::String(version.to_string()),
);
}
}
if let Some(term_signal_labels) =
&deployment.config.term_signal_labels
{
config.insert(
"term_signal_labels".to_string(),
Value::String(term_signal_labels_to_string(
term_signal_labels,
)),
);
}
if let Some(ports) = &deployment.config.ports {
config.insert(
"ports".to_string(),
Value::String(conversions_to_string(ports)),
);
}
if let Some(volumes) = &deployment.config.volumes {
config.insert(
"volumes".to_string(),
Value::String(conversions_to_string(volumes)),
);
}
if let Some(environment) = &deployment.config.environment {
config.insert(
"environment".to_string(),
Value::String(environment_vars_to_string(environment)),
);
}
if let Some(labels) = &deployment.config.labels {
config.insert(
"labels".to_string(),
Value::String(environment_vars_to_string(labels)),
);
}
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize deployments to toml")?,
);
}
for build in &resources.builds {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&build)?)?;
let config = parsed
.get_mut("config")
.context("build has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(version) = &build.config.version {
config.insert(
"version".to_string(),
Value::String(version.to_string()),
);
}
if let Some(build_args) = &build.config.build_args {
config.insert(
"build_args".to_string(),
Value::String(environment_vars_to_string(build_args)),
);
}
if let Some(labels) = &build.config.labels {
config.insert(
"labels".to_string(),
Value::String(environment_vars_to_string(labels)),
);
}
res.push_str("[[build]]\n");
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize builds to toml")?,
);
}
for repo in &resources.repos {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[repo]]\n");
res.push_str(
&toml_pretty::to_string(&repo, options)
.context("failed to serialize repos to toml")?,
);
}
for procedure in &resources.procedures {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&procedure)?)?;
let config = parsed
.get_mut("config")
.context("procedure has no config?")?
.as_object_mut()
.context("config is not object?")?;
let stages = config
.remove("stages")
.context("procedure config has no stages")?;
let stages = stages.as_array().context("stages is not array")?;
res.push_str("[[procedure]]\n");
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize procedures to toml")?,
);
for stage in stages {
res.push_str("\n\n[[procedure.config.stage]]\n");
res.push_str(
&toml_pretty::to_string(stage, options)
.context("failed to serialize procedures to toml")?,
);
}
}
for alerter in &resources.alerters {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[alerter]]\n");
res.push_str(
&toml_pretty::to_string(&alerter, options)
.context("failed to serialize alerters to toml")?,
);
}
for builder in &resources.builders {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[builder]]\n");
res.push_str(
&toml_pretty::to_string(&builder, options)
.context("failed to serialize builders to toml")?,
);
}
for server_template in &resources.server_templates {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[server_template]]\n");
res.push_str(
&toml_pretty::to_string(&server_template, options)
.context("failed to serialize server_templates to toml")?,
);
}
for resource_sync in &resources.resource_syncs {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[resource_sync]]\n");
res.push_str(
&toml_pretty::to_string(&resource_sync, options)
.context("failed to serialize resource_syncs to toml")?,
);
}
for variable in &resources.variables {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[variable]]\n");
res.push_str(
&toml_pretty::to_string(&variable, options)
.context("failed to serialize variables to toml")?,
);
}
for user_group in &resources.user_groups {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[user_group]]\n");
res.push_str(
&toml_pretty::to_string(&user_group, options)
.context("failed to serialize user_groups to toml")?,
);
}
Ok(res)
}

View File

@@ -13,6 +13,7 @@ use monitor_client::{
repo::Repo,
server::Server,
server_template::ServerTemplate,
sync::ResourceSync,
update::{
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
},
@@ -27,6 +28,7 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
@@ -40,7 +42,7 @@ impl Resolve<ListUpdates, User> for State {
ListUpdates { query, page }: ListUpdates,
user: User,
) -> anyhow::Result<ListUpdatesResponse> {
let query = if user.admin {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids = get_resource_ids_for_non_admin(
@@ -139,6 +141,7 @@ impl Resolve<ListUpdates, User> for State {
target: u.target,
status: u.status,
version: u.version,
other_data: u.other_data,
}
})
.collect::<Vec<_>>();
@@ -163,7 +166,7 @@ impl Resolve<GetUpdate, User> for State {
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
if user.admin {
if user.admin || core_config().transparent_mode {
return Ok(update);
}
match &update.target {
@@ -236,6 +239,14 @@ impl Resolve<GetUpdate, User> for State {
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
}
Ok(update)
}

View File

@@ -8,7 +8,9 @@ use monitor_client::{
entities::user::{User, UserConfig},
};
use mungos::{
by_id::find_one_by_id, find::find_collect, mongodb::bson::doc,
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
@@ -47,10 +49,13 @@ impl Resolve<ListUsers, User> for State {
if !user.admin {
return Err(anyhow!("this route is only accessable by admins"));
}
let mut users =
find_collect(&db_client().await.users, None, None)
.await
.context("failed to pull users from db")?;
let mut users = find_collect(
&db_client().await.users,
None,
FindOptions::builder().sort(doc! { "username": 1 }).build(),
)
.await
.context("failed to pull users from db")?;
users.iter_mut().for_each(|user| user.sanitize());
Ok(users)
}
@@ -65,7 +70,7 @@ impl Resolve<ListApiKeys, User> for State {
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": &user.id },
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for api keys")?

View File

@@ -10,7 +10,10 @@ use monitor_client::{
};
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, Document},
mongodb::{
bson::{doc, oid::ObjectId, Document},
options::FindOptions,
},
};
use resolver_api::Resolve;
@@ -51,8 +54,12 @@ impl Resolve<ListUserGroups, User> for State {
if !user.admin {
filter.insert("users", &user.id);
}
find_collect(&db_client().await.user_groups, filter, None)
.await
.context("failed to query db for UserGroups")
find_collect(
&db_client().await.user_groups,
filter,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for UserGroups")
}
}

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
api::read::{
GetVariable, GetVariableResponse, ListVariables,
@@ -6,7 +7,7 @@ use monitor_client::{
},
entities::user::User,
};
use mungos::find::find_collect;
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
@@ -31,10 +32,13 @@ impl Resolve<ListVariables, User> for State {
ListVariables {}: ListVariables,
_: User,
) -> anyhow::Result<ListVariablesResponse> {
let variables =
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to query db for variables")?;
let variables = find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")?;
Ok(ListVariablesResponse {
variables,
secrets: core_config().secrets.keys().cloned().collect(),

225
bin/core/src/api/user.rs Normal file
View File

@@ -0,0 +1,225 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use mongo_indexed::doc;
use monitor_client::{
api::user::{
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
DeleteApiKeyResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::{auth_request, random_string},
helpers::query::get_user,
state::{db_client, State},
};
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum UserRequest {
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/user request {req_id} | user: {} ({})",
user.username, user.id
);
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/user request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/user request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<PushRecentlyViewed, User> for State {
#[instrument(
name = "PushRecentlyViewed",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
PushRecentlyViewed { resource }: PushRecentlyViewed,
user: User,
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
Ok(PushRecentlyViewedResponse {})
}
}
impl Resolve<SetLastSeenUpdate, User> for State {
#[instrument(
name = "SetLastSeenUpdate",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
SetLastSeenUpdate {}: SetLastSeenUpdate,
user: User,
) -> anyhow::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": monitor_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateApiKey, User> for State {
#[instrument(
name = "CreateApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
CreateApiKey { name, expires }: CreateApiKey,
user: User,
) -> anyhow::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: monitor_timestamp(),
expires,
};
db_client()
.await
.api_keys
.insert_one(api_key, None)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
impl Resolve<DeleteApiKey, User> for State {
#[instrument(
name = "DeleteApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
DeleteApiKey { key }: DeleteApiKey,
user: User,
) -> anyhow::Result<DeleteApiKeyResponse> {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user"));
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}

View File

@@ -28,13 +28,10 @@ impl Resolve<CopyAlerter, User> for State {
CopyAlerter { name, id }: CopyAlerter,
user: User,
) -> anyhow::Result<Alerter> {
let Alerter {
config,
..
} = resource::get_check_permissions::<Alerter>(
&id,
&user,
PermissionLevel::Write,
let Alerter { config, .. } = resource::get_check_permissions::<
Alerter,
>(
&id, &user, PermissionLevel::Write
)
.await?;
resource::create::<Alerter>(&name, config.into(), &user).await

View File

@@ -1,147 +0,0 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{
api_key::ApiKey,
monitor_timestamp,
user::{User, UserConfig},
},
};
use mungos::{by_id::find_one_by_id, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{
auth::random_string,
helpers::query::get_user,
state::{db_client, State},
};
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateApiKey, User> for State {
#[instrument(
name = "CreateApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
CreateApiKey { name, expires }: CreateApiKey,
user: User,
) -> anyhow::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: monitor_timestamp(),
expires,
};
db_client()
.await
.api_keys
.insert_one(api_key, None)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
impl Resolve<DeleteApiKey, User> for State {
#[instrument(
name = "DeleteApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
DeleteApiKey { key }: DeleteApiKey,
user: User,
) -> anyhow::Result<DeleteApiKeyResponse> {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user"));
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}
impl Resolve<CreateApiKeyForServiceUser, User> for State {
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateApiKeyForServiceUser {
user_id,
name,
expires,
}: CreateApiKeyForServiceUser,
user: User,
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let service_user =
find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
self
.resolve(CreateApiKey { name, expires }, service_user)
.await
}
}
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
user: User,
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().await.users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})
}
}

View File

@@ -24,15 +24,13 @@ impl Resolve<CopyBuild, User> for State {
CopyBuild { name, id }: CopyBuild,
user: User,
) -> anyhow::Result<Build> {
let Build {
config,
..
} = resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let Build { config, .. } =
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Build>(&name, config.into(), &user).await
}
}

View File

@@ -5,7 +5,7 @@ use monitor_client::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
update::ResourceTarget, user::User,
sync::ResourceSync, update::ResourceTarget, user::User,
},
};
use resolver_api::Resolve;
@@ -92,6 +92,14 @@ impl Resolve<UpdateDescription, User> for State {
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_description::<ResourceSync>(
&id,
&description,
&user,
)
.await?;
}
}
Ok(UpdateDescriptionResponse {})
}

View File

@@ -13,7 +13,6 @@ use uuid::Uuid;
use crate::{auth::auth_request, state::State};
mod alerter;
mod api_key;
mod build;
mod builder;
mod deployment;
@@ -23,8 +22,9 @@ mod procedure;
mod repo;
mod server;
mod server_template;
mod service_user;
mod sync;
mod tag;
mod user;
mod user_group;
mod variable;
@@ -33,18 +33,12 @@ mod variable;
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum WriteRequest {
// ==== API KEY ====
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER ====
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
pub enum WriteRequest {
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER GROUP ====
CreateUserGroup(CreateUserGroup),
@@ -112,6 +106,13 @@ enum WriteRequest {
DeleteProcedure(DeleteProcedure),
UpdateProcedure(UpdateProcedure),
// ==== SYNC ====
CreateResourceSync(CreateResourceSync),
CopyResourceSync(CopyResourceSync),
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
@@ -148,16 +149,13 @@ async fn handler(
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "WriteRequest", skip(user))]
#[instrument(name = "WriteRequest", skip(user), fields(user_id = user.id))]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> anyhow::Result<String> {
info!(
"/write request {req_id} | user: {} ({})",
user.username, user.id
);
info!("/write request | user: {}", user.username);
let timer = Instant::now();
@@ -177,7 +175,7 @@ async fn task(
}
let elapsed = timer.elapsed();
info!("/write request {req_id} | resolve time: {elapsed:?}");
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -304,5 +304,20 @@ async fn extract_resource_target_with_validation(
.id;
Ok((ResourceTargetVariant::ServerTemplate, id))
}
ResourceTarget::ResourceSync(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?
.id;
Ok((ResourceTargetVariant::ResourceSync, id))
}
}
}

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
@@ -13,7 +14,6 @@ use monitor_client::{
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use serror::serialize_error_pretty;
use crate::{
helpers::{
@@ -112,8 +112,10 @@ impl Resolve<CreateNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("create network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
),
};
update.finalize();
@@ -149,8 +151,10 @@ impl Resolve<DeleteNetwork, User> for State {
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update
.push_error_log("delete network", serialize_error_pretty(&e)),
Err(e) => update.push_error_log(
"delete network",
format_serror(&e.context("failed to delete network").into()),
),
};
update.finalize();

View File

@@ -13,6 +13,7 @@ use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateServerTemplate, User> for State {
#[instrument(name = "CreateServerTemplate", skip(self, user))]
async fn resolve(
&self,
CreateServerTemplate { name, config }: CreateServerTemplate,
@@ -23,6 +24,7 @@ impl Resolve<CreateServerTemplate, User> for State {
}
impl Resolve<CopyServerTemplate, User> for State {
#[instrument(name = "CopyServerTemplate", skip(self, user))]
async fn resolve(
&self,
CopyServerTemplate { name, id }: CopyServerTemplate,
@@ -41,6 +43,7 @@ impl Resolve<CopyServerTemplate, User> for State {
}
impl Resolve<DeleteServerTemplate, User> for State {
#[instrument(name = "DeleteServerTemplate", skip(self, user))]
async fn resolve(
&self,
DeleteServerTemplate { id }: DeleteServerTemplate,
@@ -51,6 +54,7 @@ impl Resolve<DeleteServerTemplate, User> for State {
}
impl Resolve<UpdateServerTemplate, User> for State {
#[instrument(name = "UpdateServerTemplate", skip(self, user))]
async fn resolve(
&self,
UpdateServerTemplate { id, config }: UpdateServerTemplate,

View File

@@ -1,101 +1,29 @@
use std::{collections::VecDeque, str::FromStr};
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
CreateServiceUser, CreateServiceUserResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse, UpdateServiceUserDescription,
UpdateServiceUserDescriptionResponse,
api::{
user::CreateApiKey,
write::{
CreateApiKeyForServiceUser, CreateApiKeyForServiceUserResponse,
CreateServiceUser, CreateServiceUserResponse,
DeleteApiKeyForServiceUser, DeleteApiKeyForServiceUserResponse,
UpdateServiceUserDescription,
UpdateServiceUserDescriptionResponse,
},
},
entities::{
monitor_timestamp,
update::ResourceTarget,
user::{User, UserConfig},
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, oid::ObjectId, to_bson},
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<PushRecentlyViewed, User> for State {
#[instrument(name = "PushRecentlyViewed", skip(self, user))]
async fn resolve(
&self,
PushRecentlyViewed { resource }: PushRecentlyViewed,
user: User,
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
Ok(PushRecentlyViewedResponse {})
}
}
impl Resolve<SetLastSeenUpdate, User> for State {
#[instrument(name = "SetLastSeenUpdate", skip(self, user))]
async fn resolve(
&self,
SetLastSeenUpdate {}: SetLastSeenUpdate,
user: User,
) -> anyhow::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": monitor_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
use crate::state::{db_client, State};
impl Resolve<CreateServiceUser, User> for State {
#[instrument(name = "CreateServiceUser", skip(self, user))]
@@ -185,3 +113,64 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
.context("user with username not found")
}
}
impl Resolve<CreateApiKeyForServiceUser, User> for State {
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateApiKeyForServiceUser {
user_id,
name,
expires,
}: CreateApiKeyForServiceUser,
user: User,
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let service_user =
find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
self
.resolve(CreateApiKey { name, expires }, service_user)
.await
}
}
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
user: User,
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().await.users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})
}
}

View File

@@ -0,0 +1,325 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
self,
alert::{Alert, AlertData},
alerter::Alerter,
build::Build,
builder::Builder,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{stats::SeverityLevel, Server},
server_template::ServerTemplate,
sync::{
PendingSyncUpdates, PendingSyncUpdatesData,
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
ResourceSync,
},
update::ResourceTarget,
user::User,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::{
deployment,
resource::{get_updates_for_view, AllResourcesById},
},
},
resource,
state::{db_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
#[instrument(name = "CreateResourceSync", skip(self, user))]
async fn resolve(
&self,
CreateResourceSync { name, config }: CreateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::create::<ResourceSync>(&name, config, &user).await
}
}
impl Resolve<CopyResourceSync, User> for State {
#[instrument(name = "CopyResourceSync", skip(self, user))]
async fn resolve(
&self,
CopyResourceSync { name, id }: CopyResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
let ResourceSync { config, .. } =
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<ResourceSync>(&name, config.into(), &user)
.await
}
}
impl Resolve<DeleteResourceSync, User> for State {
#[instrument(name = "DeleteResourceSync", skip(self, user))]
async fn resolve(
&self,
DeleteResourceSync { id }: DeleteResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::delete::<ResourceSync>(&id, &user).await
}
}
impl Resolve<UpdateResourceSync, User> for State {
#[instrument(name = "UpdateResourceSync", skip(self, user))]
async fn resolve(
&self,
UpdateResourceSync { id, config }: UpdateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::update::<ResourceSync>(&id, config, &user).await
}
}
impl Resolve<RefreshResourceSyncPending, User> for State {
async fn resolve(
&self,
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
user: User,
) -> anyhow::Result<ResourceSync> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// sync should be able to do this.
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
let res = async {
let (res, _, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let data = PendingSyncUpdatesDataOk {
server_updates: get_updates_for_view::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server updates")?,
deployment_updates: deployment::get_updates_for_view(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get deployment updates")?,
build_updates: get_updates_for_view::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get build updates")?,
repo_updates: get_updates_for_view::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get repo updates")?,
procedure_updates: get_updates_for_view::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get procedure updates")?,
alerter_updates: get_updates_for_view::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get alerter updates")?,
builder_updates: get_updates_for_view::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get builder updates")?,
server_template_updates:
get_updates_for_view::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server template updates")?,
resource_sync_updates: get_updates_for_view::<
entities::sync::ResourceSync,
>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get resource sync updates")?,
variable_updates:
crate::helpers::sync::variables::get_updates_for_view(
resources.variables,
sync.config.delete,
)
.await
.context("failed to get variable updates")?,
user_group_updates:
crate::helpers::sync::user_groups::get_updates_for_view(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await
.context("failed to get user group updates")?,
};
anyhow::Ok((hash, message, data))
}
.await;
let (pending, has_updates) = match res {
Ok((hash, message, data)) => {
let has_updates = !data.no_updates();
(
PendingSyncUpdates {
hash: Some(hash),
message: Some(message),
data: PendingSyncUpdatesData::Ok(data),
},
has_updates,
)
}
Err(e) => (
PendingSyncUpdates {
hash: None,
message: None,
data: PendingSyncUpdatesData::Err(
PendingSyncUpdatesDataErr {
message: format_serror(&e.into()),
},
),
},
false,
),
};
let pending = to_document(&pending)
.context("failed to serialize pending to document")?;
update_one_by_id(
&db_client().await.resource_syncs,
&sync.id,
doc! { "$set": { "info.pending": pending } },
None,
)
.await?;
// check to update alert
let id = sync.id.clone();
let name = sync.name.clone();
tokio::task::spawn(async move {
let db = db_client().await;
let Some(existing) = db_client()
.await
.alerts
.find_one(
doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
.ok()
else {
return;
};
match (existing, has_updates) {
// OPEN A NEW ALERT
(None, true) => {
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
resolved: false,
level: SeverityLevel::Ok,
target: ResourceTarget::ResourceSync(id.clone()),
data: AlertData::ResourceSyncPendingUpdates { id, name },
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
update_one_by_id(
&db.alerts,
&existing.id,
doc! {
"$set": {
"resolved": true,
"resolved_ts": monitor_timestamp()
}
},
None,
)
.await
.context("failed to close existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
}
// NOTHING TO DO
_ => {}
}
});
crate::resource::get::<ResourceSync>(&sync.id).await
}
}

View File

@@ -10,7 +10,7 @@ use monitor_client::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, tag::Tag,
server_template::ServerTemplate, sync::ResourceSync, tag::Tag,
update::ResourceTarget, user::User,
},
};
@@ -191,6 +191,15 @@ impl Resolve<UpdateTagsOnResource, User> for State {
resource::update_tags::<ServerTemplate>(&id, tags, user)
.await?
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ResourceSync>(&id, tags, user).await?
}
};
Ok(UpdateTagsOnResourceResponse {})
}

View File

@@ -216,8 +216,8 @@ impl GithubOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize)]
@@ -225,5 +225,5 @@ pub struct GithubUserResponse {
pub login: String,
pub id: u128,
pub avatar_url: String,
pub email: Option<String>,
// pub email: Option<String>,
}

View File

@@ -2,7 +2,9 @@ use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use jwt::Token;
use monitor_client::entities::config::core::{CoreConfig, OauthCredentials};
use monitor_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize};
use serde_json::Value;
@@ -183,10 +185,10 @@ impl GoogleOauthClient {
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
// pub access_token: String,
pub id_token: String,
pub scope: String,
pub token_type: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize, Clone)]

View File

@@ -1,9 +1,8 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use aws_config::BehaviorVersion;
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,
@@ -12,11 +11,12 @@ use aws_sdk_ec2::{
},
Client,
};
use base64::Engine;
use monitor_client::entities::{
alert::{Alert, AlertData, AlertDataVariant},
alert::{Alert, AlertData},
monitor_timestamp,
server::stats::SeverityLevel,
server_template::AwsServerTemplateConfig,
server_template::aws::AwsServerTemplateConfig,
update::ResourceTarget,
};
@@ -92,7 +92,10 @@ pub async fn launch_ec2_instance(
)
.min_count(1)
.max_count(1)
.user_data(user_data);
.user_data(
base64::engine::general_purpose::STANDARD_NO_PAD
.encode(user_data),
);
for volume in volumes {
let ebs = EbsBlockDevice::builder()
@@ -165,16 +168,16 @@ pub async fn terminate_ec2_instance_with_retry(
}
Err(e) => {
if i == MAX_TERMINATION_TRIES - 1 {
error!("failed to terminate instance {instance_id}.");
error!("failed to terminate aws instance {instance_id}.");
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
resolved: false,
level: SeverityLevel::Critical,
target: ResourceTarget::system(),
variant: AlertDataVariant::AwsBuilderTerminationFailed,
data: AlertData::AwsBuilderTerminationFailed {
instance_id: instance_id.to_string(),
message: format!("{e:#}"),
},
resolved_ts: None,
};
@@ -191,7 +194,7 @@ pub async fn terminate_ec2_instance_with_retry(
unreachable!()
}
#[instrument]
#[instrument(skip(client))]
async fn terminate_ec2_instance_inner(
client: &Client,
instance_id: &str,

View File

@@ -0,0 +1,82 @@
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ecr::Client as EcrClient;
use run_command::async_run_command;
#[tracing::instrument(skip(access_key_id, secret_access_key))]
async fn make_ecr_client(
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> EcrClient {
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
EcrClient::new(&config)
}
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn maybe_create_repo(
repo: &str,
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<()> {
let client =
make_ecr_client(region, access_key_id, secret_access_key).await;
let existing = client
.describe_repositories()
.send()
.await
.context("failed to describe existing repositories")?
.repositories
.unwrap_or_default();
if existing.iter().any(|r| {
if let Some(name) = r.repository_name() {
name == repo
} else {
false
}
}) {
return Ok(());
};
client
.create_repository()
.repository_name(repo)
.send()
.await
.context("failed to create repository")?;
Ok(())
}
/// Gets a token docker login.
///
/// Requires the aws cli be installed on the host
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn get_ecr_token(
region: &str,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<String> {
let log = async_run_command(&format!(
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
))
.await;
if log.success() {
Ok(log.stdout)
} else {
Err(
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
.context("failed to get aws ecr login token"),
)
}
}

View File

@@ -0,0 +1,2 @@
pub mod ec2;
pub mod ecr;

View File

@@ -0,0 +1,157 @@
use anyhow::{anyhow, Context};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{de::DeserializeOwned, Serialize};
use super::{
common::{
HetznerActionResponse, HetznerDatacenterResponse,
HetznerServerResponse, HetznerVolumeResponse,
},
create_server::{CreateServerBody, CreateServerResponse},
create_volume::{CreateVolumeBody, CreateVolumeResponse},
};
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
pub struct HetznerClient(reqwest::Client);
impl HetznerClient {
pub fn new(token: &str) -> HetznerClient {
HetznerClient(
reqwest::ClientBuilder::new()
.default_headers(
[(
HeaderName::from_static("authorization"),
HeaderValue::from_str(&format!("Bearer {token}"))
.unwrap(),
)]
.into_iter()
.collect(),
)
.build()
.context("failed to build Hetzner request client")
.unwrap(),
)
}
pub async fn get_server(
&self,
id: i64,
) -> anyhow::Result<HetznerServerResponse> {
self.get(&format!("/servers/{id}")).await
}
pub async fn create_server(
&self,
body: &CreateServerBody,
) -> anyhow::Result<CreateServerResponse> {
self.post("/servers", body).await
}
#[allow(unused)]
pub async fn delete_server(
&self,
id: i64,
) -> anyhow::Result<HetznerActionResponse> {
self.delete(&format!("/servers/{id}")).await
}
pub async fn get_volume(
&self,
id: i64,
) -> anyhow::Result<HetznerVolumeResponse> {
self.get(&format!("/volumes/{id}")).await
}
pub async fn create_volume(
&self,
body: &CreateVolumeBody,
) -> anyhow::Result<CreateVolumeResponse> {
self.post("/volumes", body).await
}
#[allow(unused)]
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
let res = self
.0
.delete(format!("{BASE_URL}/volumes/{id}"))
.send()
.await
.context("failed at request to delete volume")?;
let status = res.status();
if status == StatusCode::NO_CONTENT {
Ok(())
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
Err(anyhow!("{status} | {text}"))
}
}
#[allow(unused)]
pub async fn list_datacenters(
&self,
) -> anyhow::Result<HetznerDatacenterResponse> {
self.get("/datacenters").await
}
async fn get<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.get(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at GET request to Hetzner | path: {path}")
})
}
async fn post<Body: Serialize, Res: DeserializeOwned>(
&self,
path: &str,
body: &Body,
) -> anyhow::Result<Res> {
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
handle_req(req).await.with_context(|| {
format!("failed at POST request to Hetzner | path: {path}")
})
}
#[allow(unused)]
async fn delete<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.delete(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at DELETE request to Hetzner | path: {path}")
})
}
}
async fn handle_req<Res: DeserializeOwned>(
req: RequestBuilder,
) -> anyhow::Result<Res> {
let res = req.send().await?;
let status = res.status();
if status.is_success() {
res.json().await.context("failed to parse response to json")
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
if let Ok(json_error) =
serde_json::from_str::<serde_json::Value>(&text)
{
return Err(anyhow!("{status} | {json_error:?}"));
}
Err(anyhow!("{status} | {text}"))
}
}

View File

@@ -0,0 +1,277 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerResponse {
pub server: HetznerServer,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServer {
pub id: i64,
pub name: String,
pub primary_disk_size: f64,
pub image: Option<HetznerImage>,
pub private_net: Vec<HetznerPrivateNet>,
pub public_net: HetznerPublicNet,
pub server_type: HetznerServerTypeDetails,
pub status: HetznerServerStatus,
#[serde(default)]
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerTypeDetails {
pub architecture: String,
pub cores: i64,
pub cpu_type: String,
pub description: String,
pub disk: f64,
pub id: i64,
pub memory: f64,
pub name: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPrivateNet {
pub alias_ips: Vec<String>,
pub ip: String,
pub mac_address: String,
pub network: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPublicNet {
#[serde(default)]
pub firewalls: Vec<HetznerFirewall>,
pub floating_ips: Vec<i64>,
pub ipv4: Option<HetznerIpv4>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerFirewall {
pub id: i64,
pub status: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerIpv4 {
pub id: Option<i64>,
pub blocked: bool,
pub dns_ptr: String,
pub ip: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerImage {
pub id: i64,
pub description: String,
pub name: Option<String>,
pub os_flavor: String,
pub os_version: Option<String>,
pub rapid_deploy: Option<bool>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerActionResponse {
pub action: HetznerAction,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerAction {
pub command: String,
pub error: Option<HetznerError>,
pub finished: Option<String>,
pub id: i64,
pub progress: i32,
pub resources: Vec<HetznerResource>,
pub started: String,
pub status: HetznerActionStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerError {
pub code: String,
pub message: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerResource {
pub id: i64,
#[serde(rename = "type")]
pub ty: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolumeResponse {
pub volume: HetznerVolume,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolume {
/// Name of the Resource. Must be unique per Project.
pub name: String,
/// Point in time when the Resource was created (in ISO-8601 format).
pub created: String,
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
pub format: Option<HetznerVolumeFormat>,
/// ID of the Volume.
pub id: i64,
/// User-defined labels ( key/value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Device path on the file system for the Volume
pub linux_device: String,
/// Protection configuration for the Resource.
pub protection: HetznerProtection,
/// ID of the Server the Volume is attached to, null if it is not attached at all
pub server: Option<i64>,
/// Size in GB of the Volume
pub size: i64,
/// Current status of the Volume. Allowed: `creating`, `available`
pub status: HetznerVolumeStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerProtection {
/// Prevent the Resource from being deleted.
pub delete: bool,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterResponse {
pub datacenters: Vec<HetznerDatacenterDetails>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterDetails {
pub id: i64,
pub name: String,
pub location: serde_json::Map<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HetznerLocation {
#[serde(rename = "nbg1")]
Nuremberg1,
#[serde(rename = "hel1")]
Helsinki1,
#[serde(rename = "fsn1")]
Falkenstein1,
#[serde(rename = "ash")]
Ashburn,
#[serde(rename = "hil")]
Hillsboro,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum HetznerDatacenter {
#[serde(rename = "nbg1-dc3")]
Nuremberg1Dc3,
#[serde(rename = "hel1-dc2")]
Helsinki1Dc2,
#[serde(rename = "fsn1-dc14")]
Falkenstein1Dc14,
#[serde(rename = "ash-dc1")]
AshburnDc1,
#[serde(rename = "hil-dc1")]
HillsboroDc1,
}
impl From<HetznerDatacenter> for HetznerLocation {
fn from(value: HetznerDatacenter) -> Self {
match value {
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
HetznerDatacenter::Falkenstein1Dc14 => {
HetznerLocation::Falkenstein1
}
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeFormat {
Xfs,
Ext4,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeStatus {
Creating,
Available,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerServerStatus {
Running,
Initializing,
Starting,
Stopping,
Off,
Deleting,
Migrating,
Rebuilding,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerActionStatus {
Running,
Success,
Error,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cx11")]
SharedIntel1Core2Ram20Disk,
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx21")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx31")]
SharedIntel2Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx41")]
SharedIntel4Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx51")]
SharedIntel8Core32Ram240Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated
#[serde(rename = "ccx13")]
DedicatedAmd2Core8Ram80Disk,
#[serde(rename = "ccx23")]
DedicatedAmd4Core16Ram160Disk,
#[serde(rename = "ccx33")]
DedicatedAmd8Core32Ram240Disk,
#[serde(rename = "ccx43")]
DedicatedAmd16Core64Ram360Disk,
#[serde(rename = "ccx53")]
DedicatedAmd32Core128Ram600Disk,
#[serde(rename = "ccx63")]
DedicatedAmd48Core192Ram960Disk,
}

View File

@@ -0,0 +1,76 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
HetznerServerType,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateServerBody {
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
pub name: String,
/// Auto-mount Volumes after attach
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// ID or name of Datacenter to create Server in (must not be used together with location)
#[serde(skip_serializing_if = "Option::is_none")]
pub datacenter: Option<HetznerDatacenter>,
/// ID or name of Location to create Server in (must not be used together with datacenter)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Firewalls which should be applied on the Server's public network interface at creation time
pub firewalls: Vec<Firewall>,
/// ID or name of the Image the Server is created from
pub image: String,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Network IDs which should be attached to the Server private network interface at the creation time
pub networks: Vec<i64>,
/// ID of the Placement Group the server should be in
#[serde(skip_serializing_if = "Option::is_none")]
pub placement_group: Option<i64>,
/// Public Network options
#[serde(skip_serializing_if = "Option::is_none")]
pub public_net: Option<PublicNet>,
/// ID or name of the Server type this Server should be created with
pub server_type: HetznerServerType,
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
pub ssh_keys: Vec<String>,
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
pub start_after_create: bool,
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
#[serde(skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct Firewall {
/// ID of the Firewall
pub firewall: i64,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct PublicNet {
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
pub enable_ipv4: bool,
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
pub enable_ipv6: bool,
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv4: Option<i64>,
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv6: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateServerResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub root_password: Option<String>,
pub server: HetznerServer,
}

View File

@@ -0,0 +1,36 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateVolumeBody {
/// Name of the volume
pub name: String,
/// Auto-mount Volume after attach. server must be provided.
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// Format Volume after creation. One of: xfs, ext4
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<HetznerVolumeFormat>,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Location to create the Volume in (can be omitted if Server is specified)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
#[serde(skip_serializing_if = "Option::is_none")]
pub server: Option<i64>,
/// Size of the Volume in GB
pub size: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateVolumeResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub volume: HetznerVolume,
}

View File

@@ -0,0 +1,282 @@
use std::{
sync::{Arc, Mutex, OnceLock},
time::Duration,
};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use monitor_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
HetznerVolumeFormat,
};
use crate::{
cloud::hetzner::{
common::HetznerServerStatus, create_server::CreateServerBody,
create_volume::CreateVolumeBody,
},
config::core_config,
};
use self::{client::HetznerClient, common::HetznerVolumeStatus};
mod client;
mod common;
mod create_server;
mod create_volume;
fn hetzner() -> Option<&'static HetznerClient> {
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
OnceLock::new();
HETZNER_CLIENT
.get_or_init(|| {
let token = &core_config().hetzner.token;
(!token.is_empty()).then(|| HetznerClient::new(token))
})
.as_ref()
}
pub struct HetznerServerMinimal {
pub id: i64,
pub ip: String,
}
const POLL_RATE_SECS: u64 = 3;
const MAX_POLL_TRIES: usize = 100;
#[instrument]
pub async fn launch_hetzner_server(
name: &str,
config: HetznerServerTemplateConfig,
) -> anyhow::Result<HetznerServerMinimal> {
let hetzner =
*hetzner().as_ref().context("Hetzner token not configured")?;
let HetznerServerTemplateConfig {
image,
datacenter,
private_network_ids,
placement_group,
enable_public_ipv4,
enable_public_ipv6,
firewall_ids,
server_type,
ssh_keys,
user_data,
use_public_ip,
labels,
volumes,
port: _,
} = config;
let datacenter = hetzner_datacenter(datacenter);
// Create volumes and get their ids
let mut volume_ids = Vec::new();
for volume in volumes {
let body = CreateVolumeBody {
name: volume.name,
format: Some(hetzner_format(volume.format)),
location: Some(datacenter.into()),
labels: volume.labels,
size: volume.size_gb,
automount: None,
server: None,
};
let id = hetzner
.create_volume(&body)
.await
.context("failed to create hetzner volume")?
.volume
.id;
volume_ids.push(id);
}
// Make sure volumes are available before continue
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
for _ in 0..MAX_POLL_TRIES {
if vol_ids_poll.lock().unwrap().is_empty() {
break;
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let ids = vol_ids_poll.lock().unwrap().clone();
let futures = ids.into_iter().map(|id| {
let vol_ids = vol_ids_poll.clone();
async move {
let Ok(res) = hetzner.get_volume(id).await else {
return;
};
if matches!(res.volume.status, HetznerVolumeStatus::Available)
{
vol_ids.lock().unwrap().retain(|_id| *_id != id);
}
}
});
join_all(futures).await;
}
if !vol_ids_poll.lock().unwrap().is_empty() {
return Err(anyhow!("Volumes not ready after poll"));
}
let body = CreateServerBody {
name: name.to_string(),
automount: None,
datacenter: Some(datacenter),
location: None,
firewalls: firewall_ids
.into_iter()
.map(|firewall| create_server::Firewall { firewall })
.collect(),
image,
labels,
networks: private_network_ids,
placement_group: (placement_group > 0).then_some(placement_group),
public_net: (enable_public_ipv4 || enable_public_ipv6).then_some(
create_server::PublicNet {
enable_ipv4: enable_public_ipv4,
enable_ipv6: enable_public_ipv6,
ipv4: None,
ipv6: None,
},
),
server_type: hetzner_server_type(server_type),
ssh_keys,
start_after_create: true,
user_data: (!user_data.is_empty()).then_some(user_data),
volumes: volume_ids,
};
let server_id = hetzner
.create_server(&body)
.await
.context("failed to create hetnzer server")?
.server
.id;
for _ in 0..MAX_POLL_TRIES {
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let Ok(res) = hetzner.get_server(server_id).await else {
continue;
};
if matches!(res.server.status, HetznerServerStatus::Running) {
let ip = if use_public_ip {
res
.server
.public_net
.ipv4
.context("instance does not have public ipv4 attached")?
.ip
} else {
res
.server
.private_net
.first()
.context("no private networks attached")?
.ip
.to_string()
};
let server = HetznerServerMinimal { id: server_id, ip };
return Ok(server);
}
}
Err(anyhow!(
"failed to verify server running after polling status"
))
}
fn hetzner_format(
format: HetznerVolumeFormat,
) -> common::HetznerVolumeFormat {
match format {
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
}
}
fn hetzner_datacenter(
datacenter: HetznerDatacenter,
) -> common::HetznerDatacenter {
match datacenter {
HetznerDatacenter::Nuremberg1Dc3 => {
common::HetznerDatacenter::Nuremberg1Dc3
}
HetznerDatacenter::Helsinki1Dc2 => {
common::HetznerDatacenter::Helsinki1Dc2
}
HetznerDatacenter::Falkenstein1Dc14 => {
common::HetznerDatacenter::Falkenstein1Dc14
}
HetznerDatacenter::AshburnDc1 => {
common::HetznerDatacenter::AshburnDc1
}
HetznerDatacenter::HillsboroDc1 => {
common::HetznerDatacenter::HillsboroDc1
}
}
}
fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedIntel1Core2Ram20Disk => {
common::HetznerServerType::SharedIntel1Core2Ram20Disk
}
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
HetznerServerType::SharedArm2Core4Ram40Disk => {
common::HetznerServerType::SharedArm2Core4Ram40Disk
}
HetznerServerType::SharedIntel2Core4Ram40Disk => {
common::HetznerServerType::SharedIntel2Core4Ram40Disk
}
HetznerServerType::SharedAmd3Core4Ram80Disk => {
common::HetznerServerType::SharedAmd3Core4Ram80Disk
}
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel2Core8Ram80Disk => {
common::HetznerServerType::SharedIntel2Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
}
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel4Core16Ram160Disk => {
common::HetznerServerType::SharedIntel4Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
}
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel8Core32Ram240Disk => {
common::HetznerServerType::SharedIntel8Core32Ram240Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk
}
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
}
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
}
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
}
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
}
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
}
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
}
}
}

View File

@@ -1,5 +1,8 @@
pub mod aws;
#[allow(unused)]
pub mod hetzner;
#[derive(Debug)]
pub enum BuildCleanupData {
Server { repo_name: String },

View File

@@ -2,7 +2,13 @@ use std::sync::OnceLock;
use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::config::core::{CoreConfig, Env};
use monitor_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, HetznerCredentials, MongoConfig,
OauthCredentials,
},
logger::LogConfig,
};
use serde::Deserialize;
pub fn frontend_path() -> &'static String {
@@ -34,91 +40,124 @@ pub fn core_config() -> &'static CoreConfig {
.context("failed to parse core Env")
.unwrap();
let config_path = &env.monitor_config_path;
let mut config =
let config =
parse_config_file::<CoreConfig>(config_path.as_str())
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.monitor_title.unwrap_or(config.title),
host: env.monitor_host.unwrap_or(config.host),
port: env.monitor_port.unwrap_or(config.port),
passkey: env.monitor_passkey.unwrap_or(config.passkey),
jwt_valid_for: env
.monitor_jwt_valid_for
.unwrap_or(config.jwt_valid_for),
sync_directory: env
.monitor_sync_directory
.map(|dir|
dir.parse()
.context("failed to parse env MONITOR_SYNC_DIRECTORY as valid path").unwrap())
.unwrap_or(config.sync_directory),
monitoring_interval: env
.monitor_monitoring_interval
.unwrap_or(config.monitoring_interval),
keep_stats_for_days: env
.monitor_keep_stats_for_days
.unwrap_or(config.keep_stats_for_days),
keep_alerts_for_days: env
.monitor_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days),
github_webhook_secret: env
.monitor_github_webhook_secret
.unwrap_or(config.github_webhook_secret),
github_webhook_base_url: env
.monitor_github_webhook_base_url
.or(config.github_webhook_base_url),
github_organizations: env.monitor_github_organizations
.unwrap_or(config.github_organizations),
docker_organizations: env
.monitor_docker_organizations
.unwrap_or(config.docker_organizations),
transparent_mode: env
.monitor_transparent_mode
.unwrap_or(config.transparent_mode),
ui_write_disabled: env
.monitor_ui_write_disabled
.unwrap_or(config.ui_write_disabled),
local_auth: env.monitor_local_auth.unwrap_or(config.local_auth),
google_oauth: OauthCredentials {
enabled: env
.monitor_google_oauth_enabled
.unwrap_or(config.google_oauth.enabled),
id: env
.monitor_google_oauth_id
.unwrap_or(config.google_oauth.id),
secret: env
.monitor_google_oauth_secret
.unwrap_or(config.google_oauth.secret),
},
github_oauth: OauthCredentials {
enabled: env
.monitor_github_oauth_enabled
.unwrap_or(config.github_oauth.enabled),
id: env
.monitor_github_oauth_id
.unwrap_or(config.github_oauth.id),
secret: env
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
aws: AwsCredentials {
access_key_id: env
.monitor_aws_access_key_id
.unwrap_or(config.aws.access_key_id),
secret_access_key: env
.monitor_aws_secret_access_key
.unwrap_or(config.aws.secret_access_key),
},
hetzner: HetznerCredentials {
token: env
.monitor_hetzner_token
.unwrap_or(config.hetzner.token),
},
mongo: MongoConfig {
uri: env.monitor_mongo_uri.or(config.mongo.uri),
address: env.monitor_mongo_address.or(config.mongo.address),
username: env
.monitor_mongo_username
.or(config.mongo.username),
password: env
.monitor_mongo_password
.or(config.mongo.password),
app_name: env
.monitor_mongo_app_name
.unwrap_or(config.mongo.app_name),
db_name: env
.monitor_mongo_db_name
.unwrap_or(config.mongo.db_name),
},
logging: LogConfig {
level: env
.monitor_logging_level
.unwrap_or(config.logging.level),
stdio: env
.monitor_logging_stdio
.unwrap_or(config.logging.stdio),
otlp_endpoint: env
.monitor_logging_otlp_endpoint
.or(config.logging.otlp_endpoint),
opentelemetry_service_name: env
.monitor_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
},
// Overrides
config.title = env.monitor_title.unwrap_or(config.title);
config.host = env.monitor_host.unwrap_or(config.host);
config.port = env.monitor_port.unwrap_or(config.port);
config.passkey = env.monitor_passkey.unwrap_or(config.passkey);
config.jwt_valid_for =
env.monitor_jwt_valid_for.unwrap_or(config.jwt_valid_for);
config.monitoring_interval = env
.monitor_monitoring_interval
.unwrap_or(config.monitoring_interval);
config.keep_stats_for_days = env
.monitor_keep_stats_for_days
.unwrap_or(config.keep_stats_for_days);
config.keep_alerts_for_days = env
.monitor_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days);
config.github_webhook_secret = env
.monitor_github_webhook_secret
.unwrap_or(config.github_webhook_secret);
config.github_webhook_base_url = env
.monitor_github_webhook_base_url
.or(config.github_webhook_base_url);
config.docker_organizations = env
.monitor_docker_organizations
.unwrap_or(config.docker_organizations);
config.logging.level =
env.monitor_logging_level.unwrap_or(config.logging.level);
config.logging.stdio =
env.monitor_logging_stdio.unwrap_or(config.logging.stdio);
config.logging.otlp_endpoint = env
.monitor_logging_otlp_endpoint
.or(config.logging.otlp_endpoint);
config.logging.opentelemetry_service_name = env
.monitor_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name);
config.local_auth =
env.monitor_local_auth.unwrap_or(config.local_auth);
config.github_oauth.enabled = env
.monitor_github_oauth_enabled
.unwrap_or(config.github_oauth.enabled);
config.github_oauth.id = env
.monitor_github_oauth_id
.unwrap_or(config.github_oauth.id);
config.github_oauth.secret = env
.monitor_github_oauth_secret
.unwrap_or(config.github_oauth.secret);
config.google_oauth.enabled = env
.monitor_google_oauth_enabled
.unwrap_or(config.google_oauth.enabled);
config.google_oauth.id = env
.monitor_google_oauth_id
.unwrap_or(config.google_oauth.id);
config.google_oauth.secret = env
.monitor_google_oauth_secret
.unwrap_or(config.google_oauth.secret);
config.mongo.uri = env.monitor_mongo_uri.or(config.mongo.uri);
config.mongo.address =
env.monitor_mongo_address.or(config.mongo.address);
config.mongo.username =
env.monitor_mongo_username.or(config.mongo.username);
config.mongo.password =
env.monitor_mongo_password.or(config.mongo.password);
config.mongo.app_name =
env.monitor_mongo_app_name.unwrap_or(config.mongo.app_name);
config.mongo.db_name =
env.monitor_mongo_db_name.unwrap_or(config.mongo.db_name);
config.aws.access_key_id = env
.monitor_aws_access_key_id
.unwrap_or(config.aws.access_key_id);
config.aws.secret_access_key = env
.monitor_aws_secret_access_key
.unwrap_or(config.aws.secret_access_key);
config
// These can't be overridden on env
secrets: config.secrets,
github_accounts: config.github_accounts,
docker_accounts: config.docker_accounts,
aws_ecr_registries: config.aws_ecr_registries,
}
})
}

View File

@@ -12,6 +12,7 @@ use monitor_client::entities::{
repo::Repo,
server::{stats::SystemStatsRecord, Server},
server_template::ServerTemplate,
sync::ResourceSync,
tag::Tag,
update::Update,
user::User,
@@ -42,6 +43,7 @@ pub struct DbClient {
pub procedures: Collection<Procedure>,
pub alerters: Collection<Alerter>,
pub server_templates: Collection<ServerTemplate>,
pub resource_syncs: Collection<ResourceSync>,
//
pub db: Database,
}
@@ -101,6 +103,8 @@ impl DbClient {
procedures: resource_collection(&db, "Procedure").await?,
server_templates: resource_collection(&db, "ServerTemplate")
.await?,
resource_syncs: resource_collection(&db, "ResourceSync")
.await?,
//
db,
};

View File

@@ -6,7 +6,7 @@ use monitor_client::{
entities::{
build::BuildActionState, deployment::DeploymentActionState,
procedure::ProcedureActionState, repo::RepoActionState,
server::ServerActionState,
server::ServerActionState, sync::ResourceSyncActionState,
},
};
@@ -21,6 +21,8 @@ pub struct ActionStates {
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
pub procedure:
Cache<String, Arc<ActionState<ProcedureActionState>>>,
pub resource_sync:
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
}
/// Need to be able to check "busy" with write lock acquired.

View File

@@ -1,16 +1,17 @@
use anyhow::{anyhow, Context};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use monitor_client::entities::{
alert::{Alert, AlertData},
alerter::*,
deployment::DeploymentState,
server::stats::SeverityLevel,
update::ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use reqwest::StatusCode;
use slack::types::Block;
use crate::state::db_client;
use crate::{config::core_config, state::db_client};
#[instrument]
pub async fn send_alerts(alerts: &[Alert]) {
@@ -18,21 +19,21 @@ pub async fn send_alerts(alerts: &[Alert]) {
return;
}
let alerters = find_collect(
let alerters = match find_collect(
&db_client().await.alerters,
doc! { "config.params.enabled": true },
doc! { "config.enabled": true },
None,
)
.await;
if let Err(e) = alerters {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
return;
}
let alerters = alerters.unwrap();
.await
{
Ok(alerters) => alerters,
Err(e) => {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
);
return;
}
};
let handles =
alerts.iter().map(|alert| send_alert(&alerters, alert));
@@ -46,23 +47,49 @@ async fn send_alert(alerters: &[Alerter], alert: &Alert) {
return;
}
let alert_type = alert.data.extract_variant();
let handles = alerters.iter().map(|alerter| async {
match &alerter.config {
AlerterConfig::Slack(SlackAlerterConfig { url, enabled }) => {
if !enabled {
return Ok(());
}
send_slack_alert(url, alert)
.await
.context("failed to send slack alert")
// Don't send if not enabled
if !alerter.config.enabled {
return Ok(());
}
// Don't send if alert type not configured on the alerter
if !alerter.config.alert_types.is_empty()
&& !alerter.config.alert_types.contains(&alert_type)
{
return Ok(());
}
// Don't send if resource is in the blacklist
if alerter.config.except_resources.contains(&alert.target) {
return Ok(());
}
// Don't send if whitelist configured and target is not included
if !alerter.config.resources.is_empty()
&& !alerter.config.resources.contains(&alert.target)
{
return Ok(());
}
match &alerter.config.endpoint {
AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => {
send_slack_alert(url, alert).await.with_context(|| {
format!(
"failed to send alert to slack alerter {}",
alerter.name
)
})
}
AlerterConfig::Custom(CustomAlerterConfig { url, enabled }) => {
if !enabled {
return Ok(());
}
send_custom_alert(url, alert).await.context(format!(
"failed to send alert to custom alerter at {url}"
))
AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => {
send_custom_alert(url, alert).await.with_context(|| {
format!(
"failed to send alert to custom alerter {}",
alerter.name
)
})
}
}
});
@@ -86,7 +113,7 @@ async fn send_custom_alert(
.await
.context("failed at post request to alerter")?;
let status = res.status();
if status != StatusCode::OK {
if !status.is_success() {
let text = res
.text()
.await
@@ -105,7 +132,12 @@ async fn send_slack_alert(
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let (text, blocks): (_, Option<_>) = match &alert.data {
AlertData::ServerUnreachable { name, region, .. } => {
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
@@ -122,10 +154,18 @@ async fn send_slack_alert(
SeverityLevel::Critical => {
let text =
format!("{level} | *{name}*{region} is *unreachable* ❌");
let err = err
.as_ref()
.map(|e| format!("\nerror: {e:#?}"))
.unwrap_or_default();
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is *unreachable* ❌"
"*{name}*{region} is *unreachable* ❌{err}"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
@@ -134,70 +174,136 @@ async fn send_slack_alert(
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
..
} => {
let region = fmt_region(region);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨");
let blocks = vec![
Block::header(format!("{level} 🚨")),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text =
format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} memory usage at *{percentage:.1}%* 💾"
)),
Block::section(format!(
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
}
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
..
} => {
let region = fmt_region(region);
let percentage = 100.0 * used_gb / total_gb;
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿 🚨");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿 🚨"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
];
(text, blocks.into())
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} disk usage at *{percentage:.1}%* 💿"
)),
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
}
}
AlertData::ContainerStateChange {
name,
server_name,
from,
to,
id,
..
} => {
let to = fmt_docker_container_state(to);
@@ -205,18 +311,70 @@ async fn send_slack_alert(
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"server: {server_name}\nprevious: {from}"
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
)),
];
(text, blocks.into())
}
AlertData::AwsBuilderTerminationFailed { instance_id } => {
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
let text = format!(
"{level} | Failed to terminated AWS builder instance"
"{level} | Failed to terminated AWS builder instance "
);
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("instance id: {instance_id}")),
Block::section(format!(
"instance id: *{instance_id}*\n{message}"
)),
];
(text, blocks.into())
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let text =
format!("{level} | There are pending resource sync updates");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"sync id: *{id}*\nsync name: *{name}*",
)),
Block::section(resource_link(
ResourceTargetVariant::ResourceSync,
id,
)),
];
(text, blocks.into())
}
AlertData::BuildFailed {
id,
name,
version,
err,
} => {
let text = format!("{level} | Build {name} has failed");
let err = err
.as_ref()
.map(|log| {
let stdout = (!log.stdout.is_empty())
.then(|| format!("\nstdout: {}", log.stdout))
.unwrap_or_default();
let stderr = (!log.stderr.is_empty())
.then(|| format!("\nstderr: {}", log.stderr))
.unwrap_or_default();
format!("\nfailed at stage: {}{stdout}{stderr}", log.stage)
})
.unwrap_or_default();
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"build id: *{id}*\nbuild name: *{name}*\nversion: v{version}{err}",
)),
Block::section(resource_link(ResourceTargetVariant::Build, id))
];
(text, blocks.into())
}
@@ -249,7 +407,41 @@ fn fmt_docker_container_state(state: &DeploymentState) -> String {
fn fmt_level(level: SeverityLevel) -> &'static str {
match level {
SeverityLevel::Critical => "CRITICAL 🚨",
SeverityLevel::Warning => "WARNING 🚨",
SeverityLevel::Warning => "WARNING ‼️",
SeverityLevel::Ok => "OK ✅",
}
}
fn resource_link(
resource_type: ResourceTargetVariant,
id: &str,
) -> String {
let path = match resource_type {
ResourceTargetVariant::System => unreachable!(),
ResourceTargetVariant::Build => format!("/builds/{id}"),
ResourceTargetVariant::Builder => {
format!("/builders/{id}")
}
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}
ResourceTargetVariant::Repo => format!("/repos/{id}"),
ResourceTargetVariant::Alerter => {
format!("/alerters/{id}")
}
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}
};
format!("{}{path}", core_config().host)
}

View File

@@ -1,13 +1,14 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use mongo_indexed::Document;
use monitor_client::entities::{
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
update::ResourceTarget,
user::User,
};
use mungos::mongodb::bson::doc;
use mungos::mongodb::bson::{doc, Bson};
use periphery_client::PeripheryClient;
use rand::{thread_rng, Rng};
@@ -20,6 +21,7 @@ pub mod channel;
pub mod procedure;
pub mod prune;
pub mod query;
pub mod sync;
pub mod update;
// pub mod resource;
@@ -116,3 +118,23 @@ pub async fn create_permission<T>(
error!("failed to create permission for {target:?} | {e:#}");
};
}
/// Flattens a document only one level deep
///
/// eg `{ config: { label: "yes", thing: { field1: "ok", field2: "ok" } } }` ->
/// `{ "config.label": "yes", "config.thing": { field1: "ok", field2: "ok" } }`
pub fn flatten_document(doc: Document) -> Document {
let mut target = Document::new();
for (outer_field, bson) in doc {
if let Bson::Document(doc) = bson {
for (inner_field, bson) in doc {
target.insert(format!("{outer_field}.{inner_field}"), bson);
}
} else {
target.insert(outer_field, bson);
}
}
target
}

View File

@@ -1,219 +1,78 @@
use std::time::{Duration, Instant};
use anyhow::{anyhow, Context, Ok};
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::execute::Execution,
entities::{
monitor_timestamp,
procedure::{EnabledExecution, Procedure, ProcedureType},
update::Update,
user::procedure_user,
procedure::Procedure, update::Update, user::procedure_user,
},
};
use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::state::State;
use crate::{api::execute::ExecuteRequest, state::State};
use super::update::update_update;
use super::update::{init_execution_update, update_update};
#[instrument]
#[instrument(skip_all)]
pub async fn execute_procedure(
procedure: &Procedure,
update: &Mutex<Update>,
) -> anyhow::Result<()> {
let start_ts = monitor_timestamp();
use ProcedureType::*;
match procedure.config.procedure_type {
Sequence => {
add_line_to_update(
update,
&format!(
"executing sequence: {} ({})",
procedure.name, procedure.id
),
)
.await;
execute_sequence(
filter_list_by_enabled(&procedure.config.executions),
&procedure.id,
&procedure.name,
update,
)
.await
.with_context(|| {
let time = Duration::from_millis(
(monitor_timestamp() - start_ts) as u64,
);
format!(
"failed sequence execution after {time:?}. {} ({})",
procedure.name, procedure.id
)
})?;
let time = Duration::from_millis(
(monitor_timestamp() - start_ts) as u64,
);
add_line_to_update(
update,
&format!(
"finished sequence execution in {time:?}: {} ({}) ✅",
procedure.name, procedure.id
),
)
.await;
Ok(())
for stage in &procedure.config.stages {
if !stage.enabled {
continue;
}
Parallel => {
add_line_to_update(
update,
&format!(
"executing parallel: {} ({})",
procedure.name, procedure.id
),
)
.await;
execute_parallel(
filter_list_by_enabled(&procedure.config.executions),
&procedure.id,
&procedure.name,
update,
)
.await
.with_context(|| {
let time = Duration::from_millis(
(monitor_timestamp() - start_ts) as u64,
);
format!(
"failed parallel execution after {time:?}. {} ({})",
procedure.name, procedure.id
)
})?;
let time = Duration::from_millis(
(monitor_timestamp() - start_ts) as u64,
);
add_line_to_update(
update,
&format!(
"finished parallel execution in {time:?}: {} ({}) ✅",
procedure.name, procedure.id
),
)
.await;
Ok(())
}
}
}
#[instrument]
async fn execute_execution(
execution: Execution,
// used to prevent recursive procedure
parent_id: &str,
parent_name: &str,
) -> anyhow::Result<()> {
let user = procedure_user().to_owned();
let update = match execution {
Execution::None(_) => return Ok(()),
Execution::RunProcedure(req) => {
if req.procedure == parent_id || req.procedure == parent_name {
return Err(anyhow!("Self referential procedure detected"));
}
State
.resolve(req, user)
.await
.context("failed at RunProcedure")?
}
Execution::RunBuild(req) => State
.resolve(req, user)
.await
.context("failed at RunBuild")?,
Execution::Deploy(req) => {
State.resolve(req, user).await.context("failed at Deploy")?
}
Execution::StartContainer(req) => State
.resolve(req, user)
.await
.context("failed at StartContainer")?,
Execution::StopContainer(req) => {
State
.resolve(req, user)
.await
.context("failed at StopContainer")?
}
Execution::StopAllContainers(req) => State
.resolve(req, user)
.await
.context("failed at StopAllContainers")?,
Execution::RemoveContainer(req) => State
.resolve(req, user)
.await
.context("failed at RemoveContainer")?,
Execution::CloneRepo(req) => State
.resolve(req, user)
.await
.context("failed at CloneRepo")?,
Execution::PullRepo(req) => State
.resolve(req, user)
.await
.context("failed at PullRepo")?,
Execution::PruneDockerNetworks(req) => State
.resolve(req, user)
.await
.context("failed at PruneDockerNetworks")?,
Execution::PruneDockerImages(req) => State
.resolve(req, user)
.await
.context("failed at PruneDockerImages")?,
Execution::PruneDockerContainers(req) => State
.resolve(req, user)
.await
.context("failed at PruneDockerContainers")?,
};
if update.success {
Ok(())
} else {
Err(anyhow!(
"execution not successful. see update {}",
update.id
))
}
}
#[instrument]
async fn execute_sequence(
executions: Vec<Execution>,
parent_id: &str,
parent_name: &str,
update: &Mutex<Update>,
) -> anyhow::Result<()> {
for execution in executions {
let now = Instant::now();
add_line_to_update(
update,
&format!("executing stage: {execution:?}"),
)
.await;
let fail_log = format!("failed on {execution:?}");
execute_execution(execution.clone(), parent_id, parent_name)
.await
.context(fail_log)?;
add_line_to_update(
update,
&format!(
"finished stage in {:?}: {execution:?}",
now.elapsed()
"{}: executing stage: '{}'",
muted("INFO"),
bold(&stage.name)
),
)
.await;
let timer = Instant::now();
execute_stage(
stage
.executions
.iter()
.filter(|item| item.enabled)
.map(|item| item.execution.clone())
.collect(),
&procedure.id,
&procedure.name,
update,
)
.await
.with_context(|| {
format!(
"{}: failed stage '{}' execution after {:?}",
colored("ERROR", Color::Red),
bold(&stage.name),
timer.elapsed(),
)
})?;
add_line_to_update(
update,
&format!(
"{}: {} stage '{}' execution in {:?}",
muted("INFO"),
colored("finished", Color::Green),
bold(&stage.name),
timer.elapsed()
),
)
.await;
}
Ok(())
}
#[instrument]
async fn execute_parallel(
#[instrument(skip(update))]
async fn execute_stage(
executions: Vec<Execution>,
parent_id: &str,
parent_name: &str,
@@ -223,10 +82,13 @@ async fn execute_parallel(
let now = Instant::now();
add_line_to_update(
update,
&format!("executing stage: {execution:?}"),
&format!("{}: executing: {execution:?}", muted("INFO")),
)
.await;
let fail_log = format!("failed on {execution:?}");
let fail_log = format!(
"{}: failed on {execution:?}",
colored("ERROR", Color::Red)
);
let res =
execute_execution(execution.clone(), parent_id, parent_name)
.await
@@ -234,7 +96,9 @@ async fn execute_parallel(
add_line_to_update(
update,
&format!(
"finished stage in {:?}: {execution:?}",
"{}: {} execution in {:?}: {execution:?}",
muted("INFO"),
colored("finished", Color::Green),
now.elapsed()
),
)
@@ -248,14 +112,181 @@ async fn execute_parallel(
Ok(())
}
fn filter_list_by_enabled(
list: &[EnabledExecution],
) -> Vec<Execution> {
list
.iter()
.filter(|item| item.enabled)
.map(|item| item.execution.clone())
.collect()
async fn execute_execution(
execution: Execution,
// used to prevent recursive procedure
parent_id: &str,
parent_name: &str,
) -> anyhow::Result<()> {
let user = procedure_user().to_owned();
let update = match execution {
Execution::None(_) => return Ok(()),
Execution::RunProcedure(req) => {
if req.procedure == parent_id || req.procedure == parent_name {
return Err(anyhow!("Self referential procedure detected"));
}
let req = ExecuteRequest::RunProcedure(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunProcedure")?
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunBuild")?
}
Execution::Deploy(req) => {
let req = ExecuteRequest::Deploy(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at Deploy")?
}
Execution::StartContainer(req) => {
let req = ExecuteRequest::StartContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StartContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StartContainer")?
}
Execution::StopContainer(req) => {
let req = ExecuteRequest::StopContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StopContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopContainer")?
}
Execution::StopAllContainers(req) => {
let req = ExecuteRequest::StopAllContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StopAllContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at StopAllContainers")?
}
Execution::RemoveContainer(req) => {
let req = ExecuteRequest::RemoveContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RemoveContainer(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer")?
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CloneRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo")?
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo")?
}
Execution::PruneNetworks(req) => {
let req = ExecuteRequest::PruneNetworks(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneNetworks(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneNetworks")?
}
Execution::PruneImages(req) => {
let req = ExecuteRequest::PruneImages(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneImages(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneImages")?
}
Execution::PruneContainers(req) => {
let req = ExecuteRequest::PruneContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneContainers(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers")?
}
Execution::RunSync(req) => {
let req = ExecuteRequest::RunSync(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State
.resolve(req, (user, update))
.await
.context("failed at RunSync")?
}
Execution::Sleep(req) => {
tokio::time::sleep(Duration::from_millis(
req.duration_ms as u64,
))
.await;
Update {
success: true,
..Default::default()
}
}
};
if update.success {
Ok(())
} else {
Err(anyhow!(
"{}: execution not successful. see update '{}'",
colored("ERROR", Color::Red),
bold(&update.id),
))
}
}
/// ASSUMES FIRST LOG IS ALREADY CREATED

View File

@@ -1,16 +1,24 @@
use anyhow::Context;
use async_timing_util::{
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS,
};
use mungos::mongodb::bson::doc;
use futures::future::join_all;
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api::build::PruneImages;
use crate::{config::core_config, state::db_client};
use super::periphery_client;
pub fn spawn_prune_loop() {
tokio::spawn(async move {
loop {
wait_until_timelength(Timelength::OneDay, 5000).await;
let (stats_res, alerts_res) =
tokio::join!(prune_stats(), prune_alerts());
let (images_res, stats_res, alerts_res) =
tokio::join!(prune_images(), prune_stats(), prune_alerts());
if let Err(e) = images_res {
error!("error in pruning images | {e:#}");
}
if let Err(e) = stats_res {
error!("error in pruning stats | {e:#}");
}
@@ -21,6 +29,35 @@ pub fn spawn_prune_loop() {
});
}
async fn prune_images() -> anyhow::Result<()> {
let futures = find_collect(&db_client().await.servers, None, None)
.await
.context("failed to get servers from db")?
.into_iter()
// This could be done in the mongo query, but rather have rust type system guarantee this.
.filter(|server| server.config.auto_prune)
.map(|server| async move {
(
async {
periphery_client(&server)?.request(PruneImages {}).await
}
.await,
server,
)
});
for (res, server) in join_all(futures).await {
if let Err(e) = res {
error!(
"failed to prune images on server {} ({}) | {e:#}",
server.name, server.id
)
}
}
Ok(())
}
async fn prune_stats() -> anyhow::Result<()> {
if core_config().keep_stats_for_days == 0 {
return Ok(());

View File

@@ -9,17 +9,21 @@ use monitor_client::entities::{
permission::PermissionLevel,
server::{Server, ServerState},
tag::Tag,
update::ResourceTargetVariant,
update::{ResourceTargetVariant, Update},
user::{admin_service_user, User},
variable::Variable,
Operation,
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, Document},
mongodb::{
bson::{doc, oid::ObjectId, Document},
options::FindOneOptions,
},
};
use crate::{resource, state::db_client};
use crate::{config::core_config, resource, state::db_client};
#[instrument(level = "debug")]
pub async fn get_user(user_id: &str) -> anyhow::Result<User> {
@@ -103,6 +107,18 @@ pub async fn get_tag_check_owner(
Err(anyhow!("user must be tag owner or admin"))
}
pub async fn get_id_to_tags(
filter: impl Into<Option<Document>>,
) -> anyhow::Result<HashMap<String, Tag>> {
let res = find_collect(&db_client().await.tags, filter, None)
.await
.context("failed to query db for tags")?
.into_iter()
.map(|tag| (tag.id.clone(), tag))
.collect();
Ok(res)
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
user_id: &str,
@@ -150,6 +166,11 @@ pub async fn get_user_permission_on_resource(
resource_variant: ResourceTargetVariant,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
let lowest_permission = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
let permission = find_collect(
&db_client().await.permissions,
doc! {
@@ -163,7 +184,7 @@ pub async fn get_user_permission_on_resource(
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(PermissionLevel::None, |level, permission| {
.fold(lowest_permission, |level, permission| {
if permission.level > level {
permission.level
} else {
@@ -226,3 +247,25 @@ pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
format!("no variable found with given name: {name}")
})
}
pub async fn get_latest_update(
resource_type: ResourceTargetVariant,
id: &str,
operation: Operation,
) -> anyhow::Result<Option<Update>> {
db_client()
.await
.updates
.find_one(
doc! {
"target.type": resource_type.as_ref(),
"target.id": id,
"operation": operation.as_ref()
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
.await
.context("failed to query db for latest update")
}

View File

@@ -0,0 +1,858 @@
use std::{collections::HashMap, time::Duration};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use futures::future::join_all;
use monitor_client::{
api::{execute::Deploy, read::GetBuildVersions},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
PartialDeploymentConfig,
},
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use mungos::find::find_collect;
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update,
resource::MonitorResource,
state::{deployment_status_cache, State},
};
use super::resource::{
run_update_description, run_update_tags, AllResourcesById,
ResourceSync,
};
pub type ToUpdate = Vec<ToUpdateItem>;
pub type ToCreate = Vec<ResourceToml<PartialDeploymentConfig>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult = (ToCreate, ToUpdate, ToDelete);
pub struct ToUpdateItem {
pub id: String,
pub resource: ResourceToml<PartialDeploymentConfig>,
pub update_description: bool,
pub update_tags: bool,
pub deploy: bool,
}
/// Turns all the diffs into a readable string
pub async fn get_updates_for_view(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: format!("{} Updates", Deployment::resource_type()),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
update.to_delete += 1;
to_delete.push(resource.name.clone())
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, state, reason) = extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
if state == DeploymentState::Unknown {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\nDeployment sync actions could not be computed due to Unknown deployment state\n-------------------",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource.name)
));
}
continue;
}
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
Deployment::resource_type(),
bold(&resource.name)
));
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, Color::Red),
muted("to"),
colored(&resource.description, Color::Green)
));
}
if resource.tags != original_tags {
let from =
colored(&format!("{:?}", original_tags), Color::Red);
let to =
colored(&format!("{:?}", resource.tags), Color::Green);
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
muted("field"),
muted("from"),
muted("to"),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(from, Color::Red),
muted("to"),
colored(to, Color::Green)
)
},
));
if state == DeploymentState::Unknown {
lines.push(format!(
"{}: Deployment sync actions {} due to Unknown deployment state",
colored("ERROR", Color::Red),
bold("could not be computed")
));
} else if to_deploy {
let mut line = if state == DeploymentState::Running {
format!(
"{}: {reason}, {}",
muted("deploy"),
bold("sync will trigger deploy")
)
} else {
format!(
"{}: deployment is currently in {} state, {}",
muted("deploy"),
colored(&state.to_string(), Color::Red),
bold("sync will trigger deploy")
)
};
if !resource.after.is_empty() {
line.push_str(&format!(
"\n{}: {:?}",
muted("deploy after"),
resource.after
));
}
lines.push(line);
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
None => {
update.to_create += 1;
let mut lines = vec![
format!(
"{}: {}",
muted("description"),
resource.description,
),
format!("{}: {:?}", muted("tags"), resource.tags,),
format!(
"{}: {}",
muted("config"),
serde_json::to_string_pretty(&resource.config)
.context("failed to serialize config to json")?
),
];
if resource.deploy {
lines.push(format!(
"{}: {}",
muted("will deploy"),
colored("true", Color::Green)
));
if !resource.after.is_empty() {
lines.push(format!(
"{}: {:?}",
muted("deploy after"),
resource.after
));
}
}
update.log.push_str(&format!(
"\n\n{}: {}: {}\n{}",
colored("CREATE", Color::Green),
Deployment::resource_type(),
bold(&resource.name),
lines.join("\n")
))
}
}
}
for name in to_delete {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("DELETE", Color::Red),
Deployment::resource_type(),
bold(&name)
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
/// Gets all the resources to update. For use in sync execution.
pub async fn get_updates_for_execution(
resources: Vec<ResourceToml<PartialDeploymentConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<UpdatesResult> {
let map = find_collect(Deployment::coll().await, None, None)
.await
.context("failed to get deployments from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut to_create = ToCreate::new();
let mut to_update = ToUpdate::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
let mut to_deploy_cache = HashMap::<String, bool>::new();
let mut to_deploy_build_cache = HashMap::<String, String>::new();
for mut resource in resources.clone() {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = resource.config.into();
resource.config = config.into();
Deployment::validate_partial_config(&mut resource.config);
let mut diff = Deployment::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
let (to_deploy, _state, _reason) =
extract_to_deploy_and_state(
all_resources,
&map,
&resources,
resource.name.clone(),
&mut to_deploy_cache,
&mut to_deploy_build_cache,
)
.await?;
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& !to_deploy
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
deploy: to_deploy,
};
to_update.push(update);
}
None => to_create.push(resource),
}
}
Ok((to_create, to_update, to_delete))
}
type Res<'a> = std::pin::Pin<
Box<
dyn std::future::Future<
Output = anyhow::Result<(bool, DeploymentState, String)>,
> + Send
+ 'a,
>,
>;
fn extract_to_deploy_and_state<'a>(
all_resources: &'a AllResourcesById,
map: &'a HashMap<String, Deployment>,
resources: &'a [ResourceToml<PartialDeploymentConfig>],
name: String,
// name to 'to_deploy'
cache: &'a mut HashMap<String, bool>,
// build id to latest built version string
build_cache: &'a mut HashMap<String, String>,
) -> Res<'a> {
Box::pin(async move {
let mut reason = String::new();
let Some(deployment) = resources.iter().find(|r| r.name == name)
else {
// this case should be unreachable, the names come off of a loop over resources
cache.insert(name, false);
return Ok((false, DeploymentState::Unknown, reason));
};
if deployment.deploy {
let Some(original) = map.get(&name) else {
// not created, definitely deploy
cache.insert(name, true);
// Don't need reason here, will be populated automatically
return Ok((true, DeploymentState::NotDeployed, reason));
};
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: DeploymentConfig = deployment.config.clone().into();
let mut config: PartialDeploymentConfig = config.into();
Deployment::validate_partial_config(&mut config);
let mut diff = Deployment::get_diff(
original.config.clone(),
config,
all_resources,
)?;
Deployment::validate_diff(&mut diff);
let status = &deployment_status_cache()
.get_or_insert_default(&original.id)
.await
.curr;
let state = status.state;
let mut to_deploy = match state {
DeploymentState::Unknown => false,
DeploymentState::Running => {
// Needs to only check config fields that affect docker run
let changed = diff.server_id.is_some()
|| diff.image.is_some()
|| diff.image_registry.is_some()
|| diff.skip_secret_interp.is_some()
|| diff.network.is_some()
|| diff.restart.is_some()
|| diff.command.is_some()
|| diff.extra_args.is_some()
|| diff.ports.is_some()
|| diff.volumes.is_some()
|| diff.environment.is_some()
|| diff.labels.is_some();
if changed {
reason = String::from("deployment config has changed")
}
changed
}
// All other cases will require Deploy to enter Running state.
// Don't need reason here as this case is handled outside, using returned state.
_ => true,
};
// Check if build attached, version latest, and there is a new build.
if !to_deploy {
// only need to check original, if diff.image was Some, to_deploy would be true.
if let DeploymentImage::Build { build_id, version } =
&original.config.image
{
// check if version is none, ie use latest build
if version.is_none() {
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.split(':').last())
.unwrap_or("0.0.0");
match build_cache.get(build_id) {
Some(version) if deployed_version != version => {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
Some(_) => {}
None => {
let Some(version) = State
.resolve(
GetBuildVersions {
build: build_id.to_string(),
limit: Some(1),
..Default::default()
},
sync_user().to_owned(),
)
.await
.context("failed to get build versions")?
.pop()
else {
// this case shouldn't ever happen, how would deployment be deployed if build was never built?
return Ok((
false,
DeploymentState::NotDeployed,
reason,
));
};
let version = version.version.to_string();
build_cache
.insert(build_id.to_string(), version.clone());
if deployed_version != version {
to_deploy = true;
reason = format!(
"attached build has new version ({version})"
);
}
}
};
}
}
}
// Still need to check 'after' if they need deploy
if !to_deploy {
for name in &deployment.after {
match cache.get(name) {
Some(will_deploy) if *will_deploy => {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
Some(_) => {}
None => {
let (will_deploy, _, _) = extract_to_deploy_and_state(
all_resources,
map,
resources,
name.to_string(),
cache,
build_cache,
)
.await?;
if will_deploy {
to_deploy = true;
reason = format!(
"parent dependency '{}' is deploying",
bold(name)
);
break;
}
}
}
}
}
cache.insert(name, to_deploy);
Ok((to_deploy, state, reason))
} else {
// The state in this case doesn't matter and won't be read (as long as it isn't 'Unknown' which will log in all cases)
cache.insert(name, false);
Ok((false, DeploymentState::NotDeployed, reason))
}
})
}
pub async fn run_updates(
to_create: ToCreate,
to_update: ToUpdate,
to_delete: ToDelete,
) -> Option<Vec<Log>> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log = String::new();
// Collect all the deployment names that need to be deployed
// and their 'after' dependencies
let mut to_deploy = Vec::<(String, Vec<String>)>::new();
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match crate::resource::create::<Deployment>(
&resource.name,
resource.config,
sync_user(),
)
.await
{
Ok(resource) => resource.id,
Err(e) => {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name)
));
continue;
}
};
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
run_update_description::<Deployment>(
id,
&name,
description,
&mut log,
&mut has_error,
)
.await;
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", Color::Green),
Deployment::resource_type(),
bold(&name)
));
if resource.deploy {
to_deploy.push((resource.name, resource.after));
}
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
deploy,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Deployment>(
id.clone(),
&name,
description,
&mut log,
&mut has_error,
)
.await;
}
if update_tags {
run_update_tags::<Deployment>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
}
let mut config_update_error = false;
if !resource.config.is_none() {
if let Err(e) = crate::resource::update::<Deployment>(
&id,
resource.config,
sync_user(),
)
.await
{
has_error = true;
config_update_error = true;
log.push_str(&format!(
"\n{}: failed to update config on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' configuration",
muted("INFO"),
colored("updated", Color::Blue),
Deployment::resource_type(),
bold(&name)
));
}
}
if !config_update_error && deploy {
to_deploy.push((resource.name, resource.after));
}
}
for resource in to_delete {
if let Err(e) =
crate::resource::delete::<Deployment>(&resource, sync_user())
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Deployment::resource_type(),
bold(&resource),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Deployment::resource_type(),
bold(&resource)
));
}
}
let mut logs = Vec::with_capacity(1);
let stage = format!("Update {}s", Deployment::resource_type());
if has_error {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::error(&stage, log));
return Some(logs);
} else if !log.is_empty() {
let log = format!(
"running updates on {}s{log}",
Deployment::resource_type()
);
logs.push(Log::simple(&stage, log));
}
if to_deploy.is_empty() {
return Some(logs);
}
let mut log = format!(
"{}: running executions to sync deployment state",
muted("INFO")
);
let mut round = 1;
while !to_deploy.is_empty() {
// Collect all waiting deployments without waiting dependencies.
let good_to_deploy = to_deploy
.iter()
.filter(|(_, after)| {
to_deploy.iter().all(|(name, _)| !after.contains(name))
})
.map(|(name, _)| name.clone())
.collect::<Vec<_>>();
// Deploy the ones ready for deployment
let res =
join_all(good_to_deploy.iter().map(|name| async move {
let res = async {
let req = ExecuteRequest::Deploy(Deploy {
deployment: name.to_string(),
stop_signal: None,
stop_time: None,
});
let user = sync_user();
let update = init_execution_update(&req, user).await?;
let ExecuteRequest::Deploy(req) = req else {
unreachable!()
};
State.resolve(req, (user.to_owned(), update)).await
}
.await;
(name, res)
}))
.await;
// Log results of deploy
for (name, res) in res {
if let Err(e) = res {
has_error = true;
log.push_str(&format!(
"\n{}: failed to deploy '{}' in round {} | {e:#}",
colored("ERROR", Color::Red),
bold(name),
bold(round)
));
} else {
log.push_str(&format!(
"\n{}: deployed '{}' in round {}",
muted("INFO"),
bold(name),
bold(round)
));
}
}
// Early exit if any deploy has errors
if has_error {
log.push_str(&format!(
"\n{}: exited in round {} {}",
muted("INFO"),
bold(round),
colored("with errors", Color::Red)
));
logs.push(Log::error("Sync Deployment State", log));
return Some(logs);
}
// Remove the deployed ones from 'to_deploy'
to_deploy.retain(|(name, _)| !good_to_deploy.contains(name));
// If there must be another round, these are dependent on the first round.
// Sleep for 1s to allow for first round to startup
if !to_deploy.is_empty() {
// Increment the round
round += 1;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
log.push_str(&format!(
"\n{}: finished after {} round{}",
muted("INFO"),
bold(round),
(round > 1).then_some("s").unwrap_or_default()
));
logs.push(Log::simple("Sync Deployment State", log));
Some(logs)
}
impl ResourceSync for Deployment {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: resources
.builds
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
}

View File

@@ -0,0 +1,82 @@
use std::{fs, path::Path};
use anyhow::{anyhow, Context};
use formatting::{colored, muted, Color};
use monitor_client::entities::{toml::ResourcesToml, update::Log};
use serde::de::DeserializeOwned;
pub fn read_resources(
path: &Path,
) -> anyhow::Result<(ResourcesToml, Log)> {
let mut res = ResourcesToml::default();
let mut log =
format!("{}: reading resources from {path:?}", muted("INFO"));
read_resources_recursive(path, &mut res, &mut log).with_context(
|| format!("failed to read resources from {path:?}"),
)?;
Ok((res, Log::simple("read remote resources", log)))
}
fn read_resources_recursive(
path: &Path,
resources: &mut ResourcesToml,
log: &mut String,
) -> anyhow::Result<()> {
let res =
fs::metadata(path).context("failed to get path metadata")?;
if res.is_file() {
if !path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let more = parse_toml_file::<ResourcesToml>(path)
.context("failed to parse resource file")?;
log.push('\n');
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", Color::Green),
colored(path.display(), Color::Blue)
));
resources.servers.extend(more.servers);
resources.deployments.extend(more.deployments);
resources.builds.extend(more.builds);
resources.repos.extend(more.repos);
resources.procedures.extend(more.procedures);
resources.builders.extend(more.builders);
resources.alerters.extend(more.alerters);
resources.server_templates.extend(more.server_templates);
resources.resource_syncs.extend(more.resource_syncs);
resources.user_groups.extend(more.user_groups);
resources.variables.extend(more.variables);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
read_resources_recursive(&path, resources, log).with_context(
|| format!("failed to read resources from {path:?}"),
)?;
}
Ok(())
} else {
Err(anyhow!("resources path is neither file nor directory"))
}
}
fn parse_toml_file<T: DeserializeOwned>(
path: impl AsRef<std::path::Path>,
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse toml contents")
}

View File

@@ -0,0 +1,44 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
api::write::RefreshResourceSyncPending, entities::user::sync_user,
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::state::{db_client, State};
pub mod deployment;
pub mod remote;
pub mod resource;
pub mod user_groups;
pub mod variables;
mod file;
mod resources;
pub fn spawn_sync_refresh_loop() {
tokio::spawn(async move {
let db = db_client().await;
let user = sync_user();
loop {
wait_until_timelength(Timelength::FiveMinutes, 0).await;
let Ok(syncs) = find_collect(&db.resource_syncs, None, None)
.await
.inspect_err(|e| warn!("failed to get resource syncs from db in refresh task | {e:#}")) else {
continue;
};
for sync in syncs {
State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
user.clone(),
)
.await
.inspect_err(|e| {
warn!("failed to refresh resource sync in refresh task | sync: {} | {e:#}", sync.name)
})
.ok();
}
}
});
}

View File

@@ -0,0 +1,72 @@
use std::fs;
use anyhow::{anyhow, Context};
use monitor_client::entities::{
sync::ResourceSync, to_monitor_name, toml::ResourcesToml,
update::Log, CloneArgs, LatestCommit,
};
use crate::{config::core_config, state::resource_sync_lock_cache};
pub async fn get_remote_resources(
sync: &ResourceSync,
) -> anyhow::Result<(
anyhow::Result<ResourcesToml>,
Vec<Log>,
// commit short hash
String,
// commit message
String,
)> {
let name = to_monitor_name(&sync.name);
let clone_args: CloneArgs = sync.into();
let config = core_config();
let github_token = clone_args
.github_account
.as_ref()
.map(|account| {
config.github_accounts.get(account).ok_or_else(|| {
anyhow!("did not find github token for account {account}")
})
})
.transpose()?
.cloned();
fs::create_dir_all(&config.sync_directory)
.context("failed to create sync directory")?;
// lock simultaneous access to same directory
let lock = resource_sync_lock_cache()
.get_or_insert_default(&sync.id)
.await;
let _lock = lock.lock().await;
let mut logs =
git::clone(clone_args, &config.sync_directory, github_token)
.await
.context("failed to clone resource repo")?;
let repo_dir = config.sync_directory.join(&name);
let LatestCommit { hash, message } =
git::get_commit_hash_info(&repo_dir)
.await
.context("failed to get commit hash info")?;
let repo_path = config.sync_directory.join(&sync.name);
let resource_path = repo_path.join(&sync.config.resource_path);
let res = super::file::read_resources(&resource_path).map(
|(resources, log)| {
logs.push(log);
resources
},
);
if let Err(e) = std::fs::remove_dir_all(&repo_path) {
warn!("failed to remove sync repo directory | {e:?}")
}
Ok((res, logs, hash, message))
}

View File

@@ -0,0 +1,554 @@
use std::collections::HashMap;
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
self,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use mungos::find::find_collect;
use partial_derive2::{Diff, FieldDiff, MaybeNone};
use resolver_api::Resolve;
use crate::{resource::MonitorResource, state::State};
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
pub struct ToUpdateItem<T: Default> {
pub id: String,
pub resource: ResourceToml<T>,
pub update_description: bool,
pub update_tags: bool,
}
pub trait ResourceSync: MonitorResource + Sized {
fn resource_target(id: String) -> ResourceTarget;
/// Apply any changes to incoming toml partial config
/// before it is diffed against existing config
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
/// Diffs the declared toml (partial) against the full existing config.
/// Removes all fields from toml (partial) that haven't changed.
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff>;
/// Apply any changes to computed config diff
/// before logging
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
async fn run_updates(
to_create: ToCreate<Self::PartialConfig>,
to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) -> Option<Log> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log =
format!("running updates on {}s", Self::resource_type());
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match crate::resource::create::<Self>(
&resource.name,
resource.config,
sync_user(),
)
.await
{
Ok(resource) => resource.id,
Err(e) => {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
continue;
}
};
run_update_tags::<Self>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
run_update_description::<Self>(
id,
&name,
description,
&mut log,
&mut has_error,
)
.await;
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", Color::Green),
Self::resource_type(),
bold(&name)
));
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Self>(
id.clone(),
&name,
description,
&mut log,
&mut has_error,
)
.await;
}
if update_tags {
run_update_tags::<Self>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
}
if !resource.config.is_none() {
if let Err(e) = crate::resource::update::<Self>(
&id,
resource.config,
sync_user(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to update config on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' configuration",
muted("INFO"),
colored("updated", Color::Blue),
Self::resource_type(),
bold(&name)
));
}
}
}
for resource in to_delete {
if let Err(e) =
crate::resource::delete::<Self>(&resource, sync_user()).await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&resource),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Self::resource_type(),
bold(&resource)
));
}
}
let stage = format!("Update {}s", Self::resource_type());
Some(if has_error {
Log::error(&stage, log)
} else {
Log::simple(&stage, log)
})
}
}
/// Turns all the diffs into a readable string
pub async fn get_updates_for_view<Resource: ResourceSync>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(Resource::coll().await, None, None)
.await
.context("failed to get resources from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: format!("{} Updates", Resource::resource_type()),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
update.to_delete += 1;
to_delete.push(resource.name.clone())
}
}
}
for mut resource in resources {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: Resource::Config = resource.config.into();
resource.config = config.into();
Resource::validate_partial_config(&mut resource.config);
let mut diff = Resource::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Resource::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
Resource::resource_type(),
bold(&resource.name)
));
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, Color::Red),
muted("to"),
colored(&resource.description, Color::Green)
));
}
if resource.tags != original_tags {
let from =
colored(&format!("{:?}", original_tags), Color::Red);
let to =
colored(&format!("{:?}", resource.tags), Color::Green);
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
muted("field"),
muted("from"),
muted("to"),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(from, Color::Red),
muted("to"),
colored(to, Color::Green)
)
},
));
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
colored("CREATE", Color::Green),
Resource::resource_type(),
bold(&resource.name),
muted("description"),
resource.description,
muted("tags"),
resource.tags,
muted("config"),
serde_json::to_string_pretty(&resource.config)
.context("failed to serialize config to json")?
))
}
}
}
for name in to_delete {
update.log.push_str(&format!(
"\n\n{}: {}: '{}'\n-------------------",
colored("DELETE", Color::Red),
Resource::resource_type(),
bold(&name)
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
/// Gets all the resources to update. For use in sync execution.
pub async fn get_updates_for_execution<Resource: ResourceSync>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
id_to_tags: &HashMap<String, Tag>,
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
let map = find_collect(Resource::coll().await, None, None)
.await
.context("failed to get resources from db")?
.into_iter()
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
for mut resource in resources {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: Resource::Config = resource.config.into();
resource.config = config.into();
Resource::validate_partial_config(&mut resource.config);
let mut diff = Resource::get_diff(
original.config.clone(),
resource.config,
all_resources,
)?;
Resource::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
};
to_update.push(update);
}
None => to_create.push(resource),
}
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_update_tags<Resource: ResourceSync>(
id: String,
name: &str,
tags: Vec<String>,
log: &mut String,
has_error: &mut bool,
) {
// Update tags
if let Err(e) = State
.resolve(
UpdateTagsOnResource {
target: Resource::resource_target(id),
tags,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to update tags on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Resource::resource_type(),
bold(name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' tags",
muted("INFO"),
colored("updated", Color::Blue),
Resource::resource_type(),
bold(name)
));
}
}
pub async fn run_update_description<Resource: ResourceSync>(
id: String,
name: &str,
description: String,
log: &mut String,
has_error: &mut bool,
) {
if let Err(e) = State
.resolve(
UpdateDescription {
target: Resource::resource_target(id.clone()),
description,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to update description on {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Resource::resource_type(),
bold(name),
))
} else {
log.push_str(&format!(
"\n{}: {} {} '{}' description",
muted("INFO"),
colored("updated", Color::Blue),
Resource::resource_type(),
bold(name)
));
}
}
pub struct AllResourcesById {
pub servers: HashMap<String, Server>,
pub deployments: HashMap<String, Deployment>,
pub builds: HashMap<String, Build>,
pub repos: HashMap<String, Repo>,
pub procedures: HashMap<String, Procedure>,
pub builders: HashMap<String, Builder>,
pub alerters: HashMap<String, Alerter>,
pub templates: HashMap<String, ServerTemplate>,
pub syncs: HashMap<String, entities::sync::ResourceSync>,
}
impl AllResourcesById {
pub async fn load() -> anyhow::Result<Self> {
Ok(Self {
servers: crate::resource::get_id_to_resource_map::<Server>()
.await?,
deployments: crate::resource::get_id_to_resource_map::<
Deployment,
>()
.await?,
builds: crate::resource::get_id_to_resource_map::<Build>()
.await?,
repos: crate::resource::get_id_to_resource_map::<Repo>()
.await?,
procedures:
crate::resource::get_id_to_resource_map::<Procedure>().await?,
builders: crate::resource::get_id_to_resource_map::<Builder>()
.await?,
alerters: crate::resource::get_id_to_resource_map::<Alerter>()
.await?,
templates: crate::resource::get_id_to_resource_map::<
ServerTemplate,
>()
.await?,
syncs: crate::resource::get_id_to_resource_map::<
entities::sync::ResourceSync,
>()
.await?,
})
}
}

View File

@@ -0,0 +1,450 @@
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::execute::Execution,
entities::{
self,
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
update::{Log, ResourceTarget},
user::sync_user,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
use crate::{
helpers::sync::resource::{
run_update_description, run_update_tags, ResourceSync,
ToUpdateItem,
},
resource::MonitorResource,
};
use super::resource::{
AllResourcesById, ToCreate, ToDelete, ToUpdate,
};
impl ResourceSync for Server {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Server(id)
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
_resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Build {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Build(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
original.builder_id = resources
.builders
.get(&original.builder_id)
.map(|b| b.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
fn validate_diff(diff: &mut Self::ConfigDiff) {
if let Some((_, to)) = &diff.version {
if to.is_none() {
diff.version = None;
}
}
}
}
impl ResourceSync for Repo {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Repo(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// Need to replace server id with name
original.server_id = resources
.servers
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Alerter {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Alerter(id)
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
_resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Builder {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Builder(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace server builder id with name
if let BuilderConfig::Server(config) = &mut original {
config.server_id = resources
.servers
.get(&config.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Ok(original.partial_diff(update))
}
}
impl ResourceSync for ServerTemplate {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ServerTemplate(id)
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
_resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
}
impl ResourceSync for entities::sync::ResourceSync {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
_resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
}
impl ResourceSync for Procedure {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Procedure(id)
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
for stage in &mut original.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::None(_) => {}
Execution::RunProcedure(config) => {
config.procedure = resources
.procedures
.get(&config.procedure)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = resources
.builds
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CloneRepo(config) => {
config.repo = resources
.repos
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PullRepo(config) => {
config.repo = resources
.repos
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneNetworks(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneImages(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RunSync(config) => {
config.sync = resources
.syncs
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::Sleep(_) => {}
}
}
}
Ok(original.partial_diff(update))
}
async fn run_updates(
mut to_create: ToCreate<Self::PartialConfig>,
mut to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) -> Option<Log> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log =
format!("running updates on {}s", Self::resource_type());
for name in to_delete {
if let Err(e) =
crate::resource::delete::<Procedure>(&name, sync_user()).await
{
has_error = true;
log.push_str(&format!(
"{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Self::resource_type(),
bold(&name)
));
}
}
if to_update.is_empty() && to_create.is_empty() {
let stage = "Update Procedures";
return Some(if has_error {
Log::error(stage, log)
} else {
Log::simple(stage, log)
});
}
for i in 0..10 {
let mut to_pull = Vec::new();
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in &to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if *update_description {
run_update_description::<Procedure>(
id.clone(),
&name,
description,
&mut log,
&mut has_error,
)
.await;
}
if *update_tags {
run_update_tags::<Procedure>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
}
if !resource.config.is_none() {
if let Err(e) = crate::resource::update::<Procedure>(
id,
resource.config.clone(),
sync_user(),
)
.await
{
if i == 9 {
has_error = true;
log.push_str(&format!(
"\n{}: failed to update {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
}
continue;
}
}
log.push_str(&format!(
"\n{}: {} '{}' updated",
muted("INFO"),
Self::resource_type(),
bold(&name)
));
// have to clone out so to_update is mutable
to_pull.push(id.clone());
}
//
to_update.retain(|resource| !to_pull.contains(&resource.id));
let mut to_pull = Vec::new();
for resource in &to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match crate::resource::create::<Procedure>(
&name,
resource.config.clone(),
sync_user(),
)
.await
{
Ok(resource) => resource.id,
Err(e) => {
if i == 9 {
has_error = true;
log.push_str(&format!(
"\n{}: failed to create {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name)
));
}
continue;
}
};
run_update_tags::<Procedure>(
id.clone(),
&name,
tags,
&mut log,
&mut has_error,
)
.await;
run_update_description::<Procedure>(
id,
&name,
description,
&mut log,
&mut has_error,
)
.await;
log.push_str(&format!(
"\n{}: {} {} '{}'",
muted("INFO"),
colored("created", Color::Green),
Self::resource_type(),
bold(&name)
));
to_pull.push(name);
}
to_create.retain(|resource| !to_pull.contains(&resource.name));
if to_update.is_empty() && to_create.is_empty() {
let stage = "Update Procedures";
return Some(if has_error {
Log::error(stage, log)
} else {
Log::simple(stage, log)
});
}
}
warn!("procedure sync loop exited after max iterations");
todo!()
}
}

View File

@@ -0,0 +1,652 @@
use std::{cmp::Ordering, collections::HashMap};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
sync::SyncUpdate,
toml::{PermissionToml, UserGroupToml},
update::{Log, ResourceTarget},
user::sync_user,
},
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::state::{db_client, State};
use super::resource::AllResourcesById;
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
}
pub struct DeleteItem {
id: String,
name: String,
}
pub async fn get_updates_for_view(
user_groups: Vec<UserGroupToml>,
delete: bool,
all_resources: &AllResourcesById,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(&db_client().await.user_groups, None, None)
.await
.context("failed to query db for UserGroups")?
.into_iter()
.map(|ug| (ug.name.clone(), ug))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: String::from("User Group Updates"),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for user_group in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
update.to_delete += 1;
to_delete.push(user_group.name.clone());
}
}
}
let id_to_user = find_collect(&db_client().await.users, None, None)
.await
.context("failed to query db for Users")?
.into_iter()
.map(|user| (user.id.clone(), user))
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
update.to_create += 1;
update.log.push_str(&format!(
"\n\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
colored("CREATE", Color::Green),
colored(&user_group.name, Color::Green),
muted("users"),
user_group.users,
muted("permissions"),
user_group.permissions,
));
continue;
}
};
let mut original_users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
},
sync_user().to_owned(),
)
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = all_resources
.builds
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = all_resources
.builders
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = all_resources
.deployments
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = all_resources
.servers
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = all_resources
.repos
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = all_resources
.alerters
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = all_resources
.procedures
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all_resources
.templates
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = all_resources
.syncs
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only add log after diff detected
if update_users || update_permissions {
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
bold(&user_group.name),
));
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
.iter()
.filter(|user| !original_users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), Color::Green)
};
let removing = original_users
.iter()
.filter(|user| !user_group.users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
muted("field"),
muted("removing"),
muted("adding"),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None")
} else {
colored(&adding.join(", "), Color::Green)
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None")
} else {
colored(&removing.join(", "), Color::Red)
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
muted("field"),
muted("removing"),
muted("adding"),
))
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
}
for name in &to_delete {
update.log.push_str(&format!(
"\n\n{}: user group: '{}'\n-------------------",
colored("DELETE", Color::Red),
bold(name),
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
pub async fn get_updates_for_execution(
user_groups: Vec<UserGroupToml>,
delete: bool,
all_resources: &AllResourcesById,
) -> anyhow::Result<(
Vec<UserGroupToml>,
Vec<UpdateItem>,
Vec<DeleteItem>,
)> {
let map = find_collect(&db_client().await.user_groups, None, None)
.await
.context("failed to query db for UserGroups")?
.into_iter()
.map(|ug| (ug.name.clone(), ug))
.collect::<HashMap<_, _>>();
let mut to_create = Vec::<UserGroupToml>::new();
let mut to_update = Vec::<UpdateItem>::new();
let mut to_delete = Vec::<DeleteItem>::new();
if delete {
for user_group in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
to_delete.push(DeleteItem {
id: user_group.id.clone(),
name: user_group.name.clone(),
});
}
}
}
if user_groups.is_empty() {
return Ok((to_create, to_update, to_delete));
}
let id_to_user = find_collect(&db_client().await.users, None, None)
.await
.context("failed to query db for Users")?
.into_iter()
.map(|user| (user.id.clone(), user))
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
to_create.push(user_group);
continue;
}
};
let mut original_users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
},
sync_user().to_owned(),
)
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = all_resources
.builds
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = all_resources
.builders
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = all_resources
.deployments
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = all_resources
.servers
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = all_resources
.repos
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = all_resources
.alerters
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = all_resources
.procedures
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all_resources
.templates
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = all_resources
.syncs
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
});
}
}
Ok((to_create, to_update, to_delete))
}
/// order permissions in deterministic way
fn sort_permissions(
a: &PermissionToml,
b: &PermissionToml,
) -> Ordering {
let (a_t, a_id) = a.target.extract_variant_id();
let (b_t, b_id) = b.target.extract_variant_id();
match (a_t.cmp(&b_t), a_id.cmp(b_id)) {
(Ordering::Greater, _) => Ordering::Greater,
(Ordering::Less, _) => Ordering::Less,
(_, Ordering::Greater) => Ordering::Greater,
(_, Ordering::Less) => Ordering::Less,
_ => Ordering::Equal,
}
}
pub async fn run_updates(
to_create: Vec<UserGroupToml>,
to_update: Vec<UpdateItem>,
to_delete: Vec<DeleteItem>,
) -> Option<Log> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log = String::from("running updates on UserGroups");
// Create the non-existant user groups
for user_group in to_create {
// Create the user group
if let Err(e) = State
.resolve(
CreateUserGroup {
name: user_group.name.clone(),
},
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to create user group '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group.name)
));
continue;
} else {
log.push_str(&format!(
"\n{}: {} user group '{}'",
muted("INFO"),
colored("created", Color::Green),
bold(&user_group.name)
))
};
set_users(
user_group.name.clone(),
user_group.users,
&mut log,
&mut has_error,
)
.await;
run_update_permissions(
user_group.name,
user_group.permissions,
&mut log,
&mut has_error,
)
.await;
}
// Update the existing user groups
for UpdateItem {
user_group,
update_users,
update_permissions,
} in to_update
{
if update_users {
set_users(
user_group.name.clone(),
user_group.users,
&mut log,
&mut has_error,
)
.await;
}
if update_permissions {
run_update_permissions(
user_group.name,
user_group.permissions,
&mut log,
&mut has_error,
)
.await;
}
}
for user_group in to_delete {
if let Err(e) = State
.resolve(
DeleteUserGroup { id: user_group.id },
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete user group '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group.name)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}'",
muted("INFO"),
colored("deleted", Color::Red),
bold(&user_group.name)
))
}
}
let stage = "Update UserGroups";
Some(if has_error {
Log::error(stage, log)
} else {
Log::simple(stage, log)
})
}
async fn set_users(
user_group: String,
users: Vec<String>,
log: &mut String,
has_error: &mut bool,
) {
if let Err(e) = State
.resolve(
SetUsersInUserGroup {
user_group: user_group.clone(),
users,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set users in group {} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' users",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
log: &mut String,
has_error: &mut bool,
) {
for PermissionToml { target, level } in permissions {
if let Err(e) = State
.resolve(
UpdatePermissionOnTarget {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_target: target.clone(),
permission: level,
},
sync_user().to_owned(),
)
.await
{
*has_error = true;
log.push_str(&format!(
"\n{}: failed to set permssion in group {} | target: {target:?} | {e:#}",
colored("ERROR", Color::Red),
bold(&user_group)
))
} else {
log.push_str(&format!(
"\n{}: {} user group '{}' permissions",
muted("INFO"),
colored("updated", Color::Blue),
bold(&user_group)
))
}
}
}

View File

@@ -0,0 +1,319 @@
use std::collections::HashMap;
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
api::write::{
CreateVariable, DeleteVariable, UpdateVariableDescription,
UpdateVariableValue,
},
entities::{
sync::SyncUpdate, update::Log, user::sync_user,
variable::Variable,
},
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::state::{db_client, State};
pub struct ToUpdateItem {
pub variable: Variable,
pub update_value: bool,
pub update_description: bool,
}
pub async fn get_updates_for_view(
variables: Vec<Variable>,
delete: bool,
) -> anyhow::Result<Option<SyncUpdate>> {
let map = find_collect(&db_client().await.variables, None, None)
.await
.context("failed to query db for variables")?
.into_iter()
.map(|v| (v.name.clone(), v))
.collect::<HashMap<_, _>>();
let mut update = SyncUpdate {
log: String::from("Variable Updates"),
..Default::default()
};
let mut to_delete = Vec::<String>::new();
if delete {
for variable in map.values() {
if !variables.iter().any(|v| v.name == variable.name) {
update.to_delete += 1;
to_delete.push(variable.name.clone());
}
}
}
for variable in variables {
match map.get(&variable.name) {
Some(original) => {
let item = ToUpdateItem {
update_value: original.value != variable.value,
update_description: original.description
!= variable.description,
variable,
};
if !item.update_value && !item.update_description {
continue;
}
update.to_update += 1;
update.log.push_str(&format!(
"\n\n{}: variable: '{}'\n-------------------",
colored("UPDATE", Color::Blue),
bold(&item.variable.name),
));
let mut lines = Vec::<String>::new();
if item.update_value {
lines.push(format!(
"{}: 'value'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.value, Color::Red),
muted("to"),
colored(&item.variable.value, Color::Green)
))
}
if item.update_description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
muted("field"),
muted("from"),
colored(&original.description, Color::Red),
muted("to"),
colored(&item.variable.description, Color::Green)
))
}
update.log.push('\n');
update.log.push_str(&lines.join("\n-------------------\n"));
}
None => {
update.to_create += 1;
if variable.description.is_empty() {
update.log.push_str(&format!(
"\n\n{}: variable: {}\n{}: {}",
colored("CREATE", Color::Green),
colored(&variable.name, Color::Green),
muted("value"),
variable.value,
));
} else {
update.log.push_str(&format!(
"\n\n{}: variable: {}\n{}: {}\n{}: {}",
colored("CREATE", Color::Green),
colored(&variable.name, Color::Green),
muted("description"),
variable.description,
muted("value"),
variable.value,
));
}
}
}
}
for name in &to_delete {
update.log.push_str(&format!(
"\n\n{}: variable: '{}'\n-------------------",
colored("DELETE", Color::Red),
bold(name),
));
}
let any_change = update.to_create > 0
|| update.to_update > 0
|| update.to_delete > 0;
Ok(any_change.then_some(update))
}
pub async fn get_updates_for_execution(
variables: Vec<Variable>,
delete: bool,
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
let map = find_collect(&db_client().await.variables, None, None)
.await
.context("failed to query db for variables")?
.into_iter()
.map(|v| (v.name.clone(), v))
.collect::<HashMap<_, _>>();
let mut to_create = Vec::<Variable>::new();
let mut to_update = Vec::<ToUpdateItem>::new();
let mut to_delete = Vec::<String>::new();
if delete {
for variable in map.values() {
if !variables.iter().any(|v| v.name == variable.name) {
to_delete.push(variable.name.clone());
}
}
}
for variable in variables {
match map.get(&variable.name) {
Some(original) => {
let item = ToUpdateItem {
update_value: original.value != variable.value,
update_description: original.description
!= variable.description,
variable,
};
if !item.update_value && !item.update_description {
continue;
}
to_update.push(item);
}
None => to_create.push(variable),
}
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_updates(
to_create: Vec<Variable>,
to_update: Vec<ToUpdateItem>,
to_delete: Vec<String>,
) -> Option<Log> {
if to_create.is_empty()
&& to_update.is_empty()
&& to_delete.is_empty()
{
return None;
}
let mut has_error = false;
let mut log = String::from("running updates on Variables");
for variable in to_create {
if let Err(e) = State
.resolve(
CreateVariable {
name: variable.name.clone(),
value: variable.value,
description: variable.description,
},
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to create variable '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&variable.name)
));
} else {
log.push_str(&format!(
"\n{}: {} variable '{}'",
muted("INFO"),
colored("created", Color::Green),
bold(&variable.name)
))
};
}
for ToUpdateItem {
variable,
update_value,
update_description,
} in to_update
{
if update_value {
if let Err(e) = State
.resolve(
UpdateVariableValue {
name: variable.name.clone(),
value: variable.value,
},
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to update variable value for '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&variable.name)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}' value",
muted("INFO"),
colored("updated", Color::Blue),
bold(&variable.name)
))
};
}
if update_description {
if let Err(e) = State
.resolve(
UpdateVariableDescription {
name: variable.name.clone(),
description: variable.description,
},
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to update variable description for '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&variable.name)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}' description",
muted("INFO"),
colored("updated", Color::Blue),
bold(&variable.name)
))
};
}
}
for variable in to_delete {
if let Err(e) = State
.resolve(
DeleteVariable {
name: variable.clone(),
},
sync_user().to_owned(),
)
.await
{
has_error = true;
log.push_str(&format!(
"\n{}: failed to delete variable '{}' | {e:#}",
colored("ERROR", Color::Red),
bold(&variable)
))
} else {
log.push_str(&format!(
"\n{}: {} variable '{}'",
muted("INFO"),
colored("deleted", Color::Red),
bold(&variable)
))
}
}
let stage = "Update Variables";
Some(if has_error {
Log::error(stage, log)
} else {
Log::simple(stage, log)
})
}

Some files were not shown because too many files have changed in this diff Show More