Compare commits

...

1481 Commits

Author SHA1 Message Date
mbecker20
9418a6d963 update client to 1.9.0 2024-06-23 02:30:50 -07:00
mbecker20
57646b750f clean up 2024-06-23 02:29:47 -07:00
mbecker20
0d57f9411c can deploy ecr 2024-06-23 02:27:19 -07:00
mbecker20
7d396dd539 clean up ecr 2024-06-23 02:22:14 -07:00
mbecker20
bfe762b71a install unzip 2024-06-23 01:37:12 -07:00
mbecker20
16ede84bac install aws cli core 2024-06-23 01:31:15 -07:00
mbecker20
4524db94db get ecr token using cli 2024-06-23 01:23:56 -07:00
mbecker20
580dab4acd improve error log formatting 2024-06-23 01:02:52 -07:00
mbecker20
645382856a update only flattens one level deep 2024-06-22 23:56:01 -07:00
mbecker20
5c4e6a6dbb select aws config 2024-06-22 23:33:35 -07:00
mbecker20
66810e1efb add method to get availabel aws ecr labels 2024-06-22 23:29:02 -07:00
mbecker20
69a84882f0 1.9.0 2024-06-22 23:06:53 -07:00
mbecker20
41648436a5 default periphery method fields 2024-06-22 22:59:51 -07:00
mbecker20
083a88aa7b implement aws ecr image registry 2024-06-22 22:57:26 -07:00
mbecker20
750f95c90d improve shortcut menu 2024-06-22 18:24:38 -07:00
mbecker20
129f3ecd82 add more kb shortcuts and shortcut menu 2024-06-22 02:56:57 -07:00
mbecker20
1b754f80ab fix double emojis 2024-06-22 01:54:45 -07:00
mbecker20
968a882012 fix alerter table 2024-06-22 01:29:31 -07:00
mbecker20
696ebdb26f label blacklist correctly 2024-06-22 01:25:38 -07:00
mbecker20
8fee04607d imporve slack alerting 2024-06-22 01:10:13 -07:00
mbecker20
6fe250244b add alerter blacklist 2024-06-22 00:30:43 -07:00
mbecker20
b530af0eec send_alerts for sync alert 2024-06-21 23:09:38 -07:00
mbecker20
21e9361079 remove unused 2024-06-21 02:28:35 -07:00
mbecker20
524d2d956b fix alerts usage 2024-06-21 02:23:42 -07:00
mbecker20
aca9633941 add links and errors to slack messages 2024-06-21 01:12:46 -07:00
mbecker20
32e1bd2dda add badges for tag filter shortcuts 2024-06-21 00:15:40 -07:00
mbecker20
cb363d1559 add shift + T and shift + C to manage tags 2024-06-20 23:51:12 -07:00
mbecker20
63eb74b9c8 Add and configure build alerts 2024-06-20 23:41:28 -07:00
mbecker20
bbcc27704f bump rust builder version 2024-06-16 16:00:57 -07:00
mbecker20
0aa9513dd0 1.8.0 2024-06-16 15:36:51 -07:00
mbecker20
26b216b478 add resources page 2024-06-16 15:33:31 -07:00
mbecker20
166299bb57 sync docs 2024-06-16 14:35:09 -07:00
mbecker20
03c47eb3dc remove cli sync 2024-06-16 01:41:54 -07:00
mbecker20
1fcb4ad085 move / update changelog 2024-06-16 01:41:15 -07:00
mbecker20
f51af8fbe1 docs 2024-06-16 01:34:08 -07:00
mbecker20
4a975e1b92 update resource sync docs 2024-06-16 01:33:05 -07:00
mbecker20
ba556e3284 fix doc link 2024-06-16 00:31:23 -07:00
mbecker20
299a326942 log build has new version 2024-06-16 00:20:22 -07:00
mbecker20
a5d4b9aefb add cached results reasons 2024-06-16 00:04:05 -07:00
mbecker20
40b820ae42 add reason to deploy logs 2024-06-15 22:01:14 -07:00
mbecker20
7028bf2996 remove termination_signal for tokio signal 2024-06-15 21:48:54 -07:00
mbecker20
75ebd0e6c0 fix fe cancel logic error 2024-06-15 21:36:26 -07:00
mbecker20
426153df66 try improve toml parse error message 2024-06-15 21:33:53 -07:00
mbecker20
5bd423a6a6 sync deploy new build 2024-06-15 21:15:17 -07:00
mbecker20
c24131d383 nested propogate read resources error 2024-06-15 20:37:29 -07:00
mbecker20
9f54b6c26a 1.8.0. improve env config UI, add sync deploy state management 2024-06-15 20:15:33 -07:00
mbecker20
ab8ae51ece slight more colors 2024-06-15 20:14:25 -07:00
mbecker20
ef2a83ff16 add colors to procedure logs 2024-06-15 20:06:34 -07:00
mbecker20
7872771aee clean up sync log 2024-06-15 19:45:53 -07:00
mbecker20
b12cf858d8 sync deploy logs need \n 2024-06-15 19:36:46 -07:00
mbecker20
38dba91c3a sync deploy accounts for any dependencies in 'after' need deploy 2024-06-15 19:20:45 -07:00
mbecker20
ea8136aa57 add sync deployment state log 2024-06-15 17:31:49 -07:00
mbecker20
f956e12e28 move formatting to shared lib 2024-06-15 17:15:05 -07:00
mbecker20
207ea52b95 add finished log 2024-06-15 17:12:02 -07:00
mbecker20
caf28d3a26 sync deploy 2024-06-15 17:03:16 -07:00
mbecker20
8fff45649d implement sync deployment get updates for view with deploy action 2024-06-15 15:50:10 -07:00
mbecker20
de5df70e11 invert search FE 2024-06-15 00:58:03 -07:00
mbecker20
3df010ac2a read req error debug 2024-06-15 00:54:11 -07:00
mbecker20
2d3beb708e invert logs 2024-06-15 00:28:04 -07:00
mbecker20
1dc22d01c4 improve execute instrumentation 2024-06-15 00:20:28 -07:00
mbecker20
eb029d0408 clone repo to specific directory on host 2024-06-14 23:43:47 -07:00
mbecker20
f926932181 build / deployment env variable / secret selectors 2024-06-14 23:28:08 -07:00
mbecker20
cc96d80c6a string deser filter empty lines 2024-06-14 22:20:39 -07:00
mbecker20
144b49495c string deser can handle empty string 2024-06-14 22:15:02 -07:00
mbecker20
de9354bdc7 frontend manage env with string 2024-06-14 22:10:07 -07:00
mbecker20
38bfee84d7 read resources propogate error 2024-06-14 21:53:13 -07:00
mbecker20
ec33d9fb9e trim incoming value env var string, conversion string, before deserialize 2024-06-14 21:42:59 -07:00
mbecker20
0a66937b1d fix unused liniting 2024-06-14 21:30:10 -07:00
mbecker20
43cc0c3bc1 remove @ in format date 2024-06-14 14:48:22 -07:00
mbecker20
c14b395c70 quick copy variable value 2024-06-12 12:15:29 -07:00
mbecker20
7b8529a7c6 tweak colors 2024-06-12 11:55:06 -07:00
mbecker20
547c089581 update colors 2024-06-12 11:53:39 -07:00
mbecker20
4fe5e461b3 use stroke for icons 2024-06-12 03:48:47 -07:00
mbecker20
edfb873f7c improve error logs 2024-06-12 03:22:51 -07:00
mbecker20
5ef5294c44 remove onkeydown causing redundant create 2024-06-12 03:15:07 -07:00
mbecker20
5d3c50e04f reorder procedure config table 2024-06-12 02:47:41 -07:00
mbecker20
f10efbb5ba add bg to body 2024-06-12 02:39:26 -07:00
mbecker20
39ce98161b add the colors, always plz 2024-06-12 02:21:49 -07:00
mbecker20
cff6e79eee fix omnibar all resource types 2024-06-12 01:46:30 -07:00
mbecker20
dedf22ede8 continue on disabled stage 2024-06-12 01:25:10 -07:00
mbecker20
6955b92a99 add same colors in update 2024-06-12 01:15:39 -07:00
mbecker20
5c63eeab02 better sync coloring 2024-06-12 01:13:33 -07:00
mbecker20
4c14a4ae20 create variable log skip description line if it's empty 2024-06-12 00:39:23 -07:00
mbecker20
29fd856a2d deal with deployment build version 2024-06-11 03:07:56 -07:00
mbecker20
195bdbd94a fix " to \" 2024-06-11 02:14:57 -07:00
mbecker20
298ccd945c improve export dialog sizing 2024-06-11 01:42:06 -07:00
mbecker20
436e4e79e9 toml include ResourceSync 2024-06-11 01:09:37 -07:00
mbecker20
8b8c89d976 1.7.3 procedure stage alias 2024-06-11 00:51:16 -07:00
mbecker20
25c8d25636 1.7.2 default resource config parsing 2024-06-11 00:44:41 -07:00
mbecker20
ea242de2e4 default the config if not exists 2024-06-11 00:34:11 -07:00
mbecker20
be03547407 reorder struct fields for improved toml 2024-06-11 00:04:20 -07:00
mbecker20
9c0d28b311 allow inline arrow up to max length 2024-06-10 23:53:23 -07:00
mbecker20
f269deb99c update toml_pretty 2024-06-10 23:30:17 -07:00
mbecker20
3df8163131 improve procedure toml 2024-06-10 23:14:04 -07:00
mbecker20
33a16a9bd2 need 2 \n 2024-06-10 22:36:17 -07:00
mbecker20
215e7d1bdc update toml_pretty 2024-06-10 22:11:40 -07:00
mbecker20
25e0905c0c fix deserializers 2024-06-10 21:31:17 -07:00
mbecker20
1c07ccea85 bump toml for multiline string 2024-06-10 19:26:01 -07:00
mbecker20
405ec1b8cc bump toml_pretty for fix 2024-06-10 18:58:33 -07:00
mbecker20
4f212bd06f update toml_pretty with skip empty strings 2024-06-10 18:43:53 -07:00
mbecker20
074f4ea2db fix toml 2024-06-10 18:07:05 -07:00
mbecker20
c9abccaf02 build use string serialized version 2024-06-10 17:59:03 -07:00
mbecker20
6428fa6de2 1.7.1 2024-06-10 17:37:22 -07:00
mbecker20
883f54431d custom to toml serializer for api 2024-06-10 17:34:56 -07:00
mbecker20
28dc030e2b custom Vec<EnvVar>, Vec<Conversion> deserializers to support config them as string 2024-06-10 14:39:51 -07:00
mbecker20
145d933e63 pt-2 2024-06-10 01:47:46 -07:00
mbecker20
9772ca1a1c add Resource Sync system user 2024-06-10 01:46:26 -07:00
mbecker20
4059b69201 core auto refreshes all syncs every 5 min 2024-06-09 23:49:02 -07:00
mbecker20
8e175ea5a1 add pending sync alert variant 2024-06-09 23:23:40 -07:00
mbecker20
d931b8b4e7 fix deployment when image_type None 2024-06-09 23:15:52 -07:00
mbecker20
0982800ad2 update client to 1.7.0 2024-06-09 22:47:49 -07:00
mbecker20
4382ad0b3b migrate 1.6 to 1.7 2024-06-09 22:46:21 -07:00
mbecker20
e7891f7870 update docs for ghcr 2024-06-09 21:56:01 -07:00
mbecker20
6bada46841 add export variables / user groups 2024-06-09 21:32:53 -07:00
mbecker20
eae6cbd228 label the image 2024-06-09 20:55:09 -07:00
mbecker20
a0ee6180b2 finish 1.7.0 2024-06-09 19:45:46 -07:00
mbecker20
3ce3de8768 configure registry 2024-06-09 19:34:49 -07:00
mbecker20
6c46993b61 New Monitor logo cr. George Weston 2024-06-09 18:38:58 -07:00
mbecker20
fbd9d14aaa change handler loggin 2024-06-09 15:11:18 -07:00
mbecker20
1011ec60ab rename to ghcr 2024-06-09 14:55:26 -07:00
mbecker20
48e17a7c87 update config example 2024-06-09 03:43:26 -07:00
mbecker20
a94baded55 1.7.0 2024-06-09 03:06:17 -07:00
mbecker20
e97c0873cf get types 2024-06-09 03:05:07 -07:00
mbecker20
43a0b76811 small 2024-06-09 03:04:05 -07:00
mbecker20
2d2577e5ee ghcr 2024-06-09 02:46:57 -07:00
mbecker20
202ac77de3 from on the new types 2024-06-09 02:18:40 -07:00
mbecker20
568c963419 core / periphery support ghcr 2024-06-09 02:01:51 -07:00
mbecker20
5c3294241d add 1.6 build schema for 1.7 migration 2024-06-08 15:35:31 -07:00
mbecker20
648a04be88 add sleep execution for procedure 2024-06-08 14:51:19 -07:00
mbecker20
1b5822f649 custom version deserializer. support string versions 2024-06-08 14:23:26 -07:00
mbecker20
c41a008603 fix variable update 2024-06-08 05:22:32 -07:00
mbecker20
603243b0eb need partial default on alerter enabled 2024-06-08 04:56:52 -07:00
mbecker20
d09ab36696 any sync error shows up in log 2024-06-08 04:34:09 -07:00
mbecker20
ad168c87f7 use approp dialog menus 2024-06-08 04:12:55 -07:00
mbecker20
914f4c6197 seems to work 2024-06-08 03:35:22 -07:00
mbecker20
c73d918e18 no unnecessary user group sync 2024-06-08 02:56:53 -07:00
mbecker20
9d116f56cb sort lists by name 2024-06-08 02:21:32 -07:00
mbecker20
8a8dede5db resource sync state 2024-06-08 02:12:04 -07:00
mbecker20
d2cecf316c add pending update alert 2024-06-08 01:39:18 -07:00
mbecker20
cad1ee123e improv the sync 2024-06-08 00:50:30 -07:00
mbecker20
6aa801b705 lock sync dir access 2024-06-07 22:02:58 -07:00
mbecker20
078ba59002 ensure sync directory exist 2024-06-07 21:02:28 -07:00
mbecker20
5eacb7191b fix the fe errors with most boilerplate 2024-06-07 20:00:01 -07:00
mbecker20
45eafd10b9 finish sync backend? 2024-06-07 19:00:03 -07:00
mbecker20
42c486807c implement resource sync cli 2024-06-07 17:11:58 -07:00
mbecker20
8c31fcff02 backend for resource sync 2024-06-07 03:52:07 -07:00
mbecker20
49f1d40ce8 implement RunSync 2024-06-07 02:43:45 -07:00
mbecker20
bf85e886bd abit more 2024-06-06 03:02:25 -07:00
mbecker20
eda0b233ca implement sync 2024-06-06 02:38:47 -07:00
mbecker20
5efb227851 update ts client response 2024-06-05 23:46:05 -07:00
mbecker20
1a45fffe75 move some libraries out 2024-06-05 23:44:06 -07:00
mbecker20
fa72f2e5ef update execute task handling 2024-06-05 22:50:03 -07:00
mbecker20
c9152db300 unneeded import 2024-06-05 22:42:28 -07:00
mbecker20
25fcca7246 should fix procedure 2024-06-05 22:42:04 -07:00
mbecker20
ac449e38d5 init boilerplate 2024-06-05 17:29:59 -07:00
mbecker20
d6c66948ba skip update in execute task instrument 2024-06-05 16:16:21 -07:00
mbecker20
b6af790aef sort resources in selector 2024-06-05 15:39:47 -07:00
mbecker20
36a49210a0 fix filter by split 2024-06-05 15:16:30 -07:00
mbecker20
d2b2aa0550 its not really distributed 2024-06-05 02:41:08 -07:00
mbecker20
7f4c883416 hide / show to toggle alert area 2024-06-05 01:59:16 -07:00
mbecker20
676fb3c732 common filtering method 2024-06-04 05:50:36 -07:00
mbecker20
17da4bd2fa procedure setconfig on update 2024-06-04 04:46:40 -07:00
mbecker20
b44e57bbf6 improve update date format 2024-06-04 04:36:55 -07:00
mbecker20
6aa5b5faae use same search alg for all command inputs 2024-06-04 04:21:24 -07:00
mbecker20
9565855477 fix fe error 2024-06-04 02:23:53 -07:00
mbecker20
3504c083b4 export all resources toml filter resources by tag 2024-06-04 02:11:07 -07:00
mbecker20
5fdaa9a808 make overflowing tags wrap 2024-06-04 01:45:20 -07:00
mbecker20
ec35b14077 further improve BuildState if cancel. 2024-06-04 01:22:41 -07:00
mbecker20
158f3ad89b fmt update operation with regex everywhere 2024-06-03 16:36:53 -07:00
mbecker20
7257ecbaed version link to docs 2024-06-03 16:12:09 -07:00
mbecker20
a2a94f23ee publish client + cli 1.6.2 2024-06-03 15:03:43 -07:00
mbecker20
03cad5b23b partial config from files first merged onto full config default before diff with remote 2024-06-03 15:01:14 -07:00
mbecker20
2460b5edf7 update log internal scroll 2024-06-03 03:10:35 -07:00
mbecker20
83fdb180aa avoid deployment state change alert involving status Unknown 2024-06-03 03:01:23 -07:00
mbecker20
9b1d32ebdf base64 encode aws user data before send 2024-06-03 00:44:45 -07:00
mbecker20
ea4ae7651c readme 2024-06-02 21:19:46 -07:00
mbecker20
5f6fabd925 1.6.1 pass creds as args cli 2024-06-02 21:17:23 -07:00
mbecker20
38d9495ab1 fix cli readme 2024-06-02 21:03:37 -07:00
mbecker20
46ad5b3953 1.6.0 Improve procedure with multiple stages 2024-06-02 21:00:06 -07:00
mbecker20
e60b817208 improve saving 2024-06-02 20:57:44 -07:00
mbecker20
0ce5248292 improve changes made visibility 2024-06-02 20:54:41 -07:00
mbecker20
050c29f4a3 show when changes made 2024-06-02 20:30:06 -07:00
mbecker20
8580728933 alert config working 2024-06-02 20:15:49 -07:00
mbecker20
3c5868d111 alert refactor 2024-06-02 19:15:13 -07:00
mbecker20
40e1b1ff88 improve build cancel disabled logic to prevent redundant cancels 2024-06-02 17:50:33 -07:00
mbecker20
99641b2e39 improve update toast title 2024-06-02 17:42:15 -07:00
mbecker20
f0e7757eb4 improve validate CancelBuild 2024-06-02 17:39:13 -07:00
mbecker20
f7283b1fc1 update alerter to support type filtering. 2024-06-02 17:16:35 -07:00
mbecker20
771af21eae migrator support migrate permissions 2024-06-02 15:35:36 -07:00
mbecker20
0dda791ec7 fix build not try add_update 2024-06-02 04:56:37 -07:00
mbecker20
bc76b1c07e only push recently viewed if exists 2024-06-02 04:43:01 -07:00
mbecker20
8b537924fb correct execution target passed by name 2024-06-02 04:38:32 -07:00
mbecker20
f5ce3570e4 execute api returns update immediately 2024-06-02 04:14:51 -07:00
mbecker20
f1e51d275c move stages up / down 2024-06-02 02:39:48 -07:00
mbecker20
eaa10d96b5 finish new procedure config 2024-06-02 02:06:01 -07:00
mbecker20
037364068d refresh caches on create / update 2024-06-02 01:07:53 -07:00
mbecker20
2441bc8cbf fix lint 2024-06-02 00:44:47 -07:00
mbecker20
92ac003910 backend for updated procedure schema 2024-06-02 00:36:39 -07:00
mbecker20
693f24763f new deployment / repo from server page 2024-06-01 20:33:38 -07:00
mbecker20
d9d44ceee1 update readme with manual 2024-06-01 19:58:20 -07:00
mbecker20
30ab8ed17b update cli with execute features. 2024-06-01 19:47:46 -07:00
mbecker20
2bf2be54cc bookworm base 2024-05-29 13:09:52 -07:00
mbecker20
b7ea680958 alert table rename Target to Resource 2024-05-29 01:48:32 -07:00
mbecker20
2a56d09f89 improve periphery start command docs 2024-05-29 01:40:45 -07:00
mbecker20
2612f742b2 remove trailing whitespace in error log 2024-05-29 00:22:04 -07:00
mbecker20
29bdf5c71d pretty clone fail message 2024-05-29 00:20:58 -07:00
mbecker20
873d9ea433 builder instance failed reachability adds log that instance will be terminated 2024-05-29 00:16:55 -07:00
mbecker20
717f3afa89 fix build config when not builder 2024-05-28 14:33:52 -07:00
mbecker20
ec31d1af01 fix 2024-05-28 05:35:35 -07:00
mbecker20
9e5c52b9a4 update client version 2024-05-28 05:32:19 -07:00
mbecker20
762873d5be implement ui_write_disabled 2024-05-28 05:30:37 -07:00
mbecker20
67fa512975 core version in topbar 2024-05-28 05:06:39 -07:00
mbecker20
502dd3a4a8 update client version 2024-05-28 04:58:31 -07:00
mbecker20
8c22bdd473 1.5.4 add variable support to monitor cli 2024-05-28 04:57:41 -07:00
mbecker20
ba6801da11 cli much faster 2024-05-28 04:02:34 -07:00
mbecker20
309802093c 1.5.3 add ListFull methods 2024-05-28 03:42:35 -07:00
mbecker20
3d1e3009b3 add ListFull methods 2024-05-28 03:25:50 -07:00
mbecker20
fdc23c2650 improve docs 2024-05-28 03:06:42 -07:00
mbecker20
072ee6834e update dashboard screenshots 2024-05-28 01:44:11 -07:00
mbecker20
bedbf76349 red 2024-05-28 01:40:44 -07:00
mbecker20
e26d1211cc Cloud 2024-05-28 01:38:35 -07:00
mbecker20
0342ee4dd9 Hetzner 2024-05-28 01:38:08 -07:00
mbecker20
669d5c81b4 read. me. 2024-05-28 01:35:49 -07:00
mbecker20
defbab5955 monitor cli 2024-05-28 01:30:57 -07:00
mbecker20
9405295e4a update changelog 2024-05-28 01:23:44 -07:00
mbecker20
28c077ed4c remove hetzner automount 2024-05-26 02:30:56 -07:00
mbecker20
61406c1b00 add back wait for volume 2024-05-26 02:10:34 -07:00
mbecker20
64638730b9 waiting for volumes makes no difference. dont seem to automount 2024-05-26 01:34:14 -07:00
mbecker20
c0942c6d1d remove execute fail message 2024-05-26 00:42:33 -07:00
mbecker20
ff964cd0fe fix updates 2024-05-26 00:38:59 -07:00
mbecker20
d56f632a11 improve unknown server styling 2024-05-26 00:21:43 -07:00
mbecker20
a7f22b6cfb instrument ServerTemplate write api 2024-05-26 00:09:38 -07:00
mbecker20
6053fc1d99 hetzner poll volumes for ready before launch server 2024-05-26 00:04:31 -07:00
mbecker20
573ff1863c 1.5.2 2024-05-25 23:34:14 -07:00
mbecker20
dd4a9b0cb5 add defaults to Hetzner volume 2024-05-25 23:32:16 -07:00
mbecker20
d243cf2da7 all resources search case insensitive 2024-05-25 23:07:54 -07:00
mbecker20
4e06e788ae PushRecentlyViewed and SetLastSeenUpdate should be debug instrument 2024-05-25 21:13:25 -07:00
mbecker20
a0f71f8af5 table search not case sensitive 2024-05-25 21:07:53 -07:00
mbecker20
fcbb75d0c0 update some tracing stuff 2024-05-25 20:46:56 -07:00
mbecker20
0a8419bb13 update client to 1.5.1 2024-05-25 20:38:35 -07:00
mbecker20
40fe76cf27 1.5.1 move routes to /user 2024-05-25 20:36:52 -07:00
mbecker20
5594d3c1d9 add server to repo table / info 2024-05-25 19:45:23 -07:00
mbecker20
b12aeb259f clean up 2024-05-25 18:38:17 -07:00
mbecker20
b121b0ac07 fix remove from recently viewed 2024-05-25 18:34:05 -07:00
mbecker20
a9f1d91b1b update deps 2024-05-25 18:06:49 -07:00
mbecker20
abf48d0243 1.5.0 doc update and add other_data 2024-05-25 17:57:31 -07:00
mbecker20
447690d8bf remove TextUpdateMenu update on enter 2024-05-25 16:58:34 -07:00
mbecker20
a70c0a2697 increase hetzner polling time 2024-05-25 15:49:10 -07:00
mbecker20
0758e6ff81 get ip after instance is running 2024-05-25 15:29:11 -07:00
mbecker20
ea0e059ee1 hetzner response optional parsing 2024-05-25 14:26:26 -07:00
mbecker20
c9e0524794 add repos tab to server page 2024-05-25 14:05:02 -07:00
mbecker20
81ceaf1eae move automount 2024-05-25 13:50:59 -07:00
mbecker20
37c07ff748 only actually add automount if volumes nonempty 2024-05-25 13:43:06 -07:00
mbecker20
62e8943ebe improve client error message 2024-05-25 13:40:05 -07:00
mbecker20
99ccffbc38 configure hetzner template working 2024-05-25 03:09:31 -07:00
mbecker20
84dc29b77f update ts types 2024-05-25 01:37:05 -07:00
mbecker20
81bab4aa50 clean up some unused stuff 2024-05-25 01:35:28 -07:00
mbecker20
9fa2fd0f58 implement hetzner server launch 2024-05-25 01:16:54 -07:00
mbecker20
3745967690 ensure env overrides fully applied 2024-05-24 16:40:15 -07:00
mbecker20
e8cfc13342 implement transparent mode 2024-05-23 02:19:42 -07:00
mbecker20
ec47bb11ee start on hetzner 2024-05-23 01:47:02 -07:00
mbecker20
d008c95853 no destructive update toasts 2024-05-22 04:27:33 -07:00
mbecker20
4986d70506 remove unneded build toasts 2024-05-22 04:23:53 -07:00
mbecker20
1372a5fb39 3s 2024-05-22 04:22:06 -07:00
mbecker20
f54224650f fix update 2024-05-22 04:20:48 -07:00
mbecker20
2eee1459e7 fix ws 2024-05-22 04:02:37 -07:00
mbecker20
5a3fd891c4 huh 2024-05-22 04:00:43 -07:00
mbecker20
ba3f288c2d improve toasts 2024-05-22 03:55:26 -07:00
mbecker20
6d5fd7dc5d improve update table 2024-05-22 03:25:49 -07:00
mbecker20
df3fd7c4e9 update builder ami 2024-05-22 03:15:30 -07:00
mbecker20
395f032ee2 fix update details name 2024-05-22 03:15:23 -07:00
mbecker20
de2bd800c4 update client to 1.4.1 2024-05-22 02:01:40 -07:00
mbecker20
75352a91ff 1.4.1 fix cli - shouldn't send update if no change 2024-05-22 01:59:25 -07:00
mbecker20
9b12270d04 update client published version 2024-05-22 00:29:34 -07:00
mbecker20
7fc378798f fix cli toml patch 2024-05-22 00:28:21 -07:00
mbecker20
3db2c93303 expande resource table status column 2024-05-22 00:25:54 -07:00
mbecker20
150d6562bf improve table with better row sizing 2024-05-21 01:31:46 -07:00
mbecker20
c3b549b051 resource to_list_item should be infallible 2024-05-21 00:46:13 -07:00
mbecker20
931f2bd92d log auto bottom and increase height 2024-05-20 22:56:32 -07:00
mbecker20
6b6324d79c theme toggle indicator 2024-05-20 22:52:25 -07:00
mbecker20
2c65d924f9 dashboard recents 2 cols unless 2xl 2024-05-20 22:46:21 -07:00
mbecker20
dd1fecf190 a little smaller 2024-05-20 22:36:01 -07:00
mbecker20
aa96a37db4 decrease sidebar vertical size 2024-05-20 21:40:14 -07:00
mbecker20
ec9e9638f5 more gap between resources on dashboard 2024-05-20 03:55:58 -07:00
mbecker20
e33019cab8 add prune images to prune loop 2024-05-20 03:55:01 -07:00
mbecker20
951cb82e0c supports 2024-05-20 03:34:59 -07:00
mbecker20
0643f96053 log 2024-05-20 03:34:07 -07:00
mbecker20
56d835f2d2 not 2024-05-20 03:33:02 -07:00
mbecker20
d8fb8f8649 fix 2024-05-20 03:31:52 -07:00
mbecker20
7197d628e5 fit 2024-05-20 03:30:34 -07:00
mbecker20
96083178dd add changelog to readme 2024-05-20 03:29:45 -07:00
mbecker20
9d1b705ab1 improve login page 2024-05-20 01:36:14 -07:00
mbecker20
2582bc9ba3 simplify resource components, improve update details loading behavior 2024-05-19 15:27:02 -07:00
mbecker20
44f34b9b40 show core secrets on variables page 2024-05-19 02:56:32 -07:00
mbecker20
bbb18d8280 standard search bar location 2024-05-19 02:38:24 -07:00
mbecker20
da95b7d074 add lizard logo / favicon 2024-05-19 02:20:43 -07:00
mbecker20
6b25309aed variables working 2024-05-19 01:51:09 -07:00
mbecker20
f8e371af31 periphery needs axum feature 2024-05-18 16:00:20 -07:00
mbecker20
a0f5ae8c7f add core side interpolation to update 2024-05-18 15:58:23 -07:00
mbecker20
2f371af288 1.4.0 2024-05-18 15:37:30 -07:00
mbecker20
76840efddc implement core variables / secrets on the backend 2024-05-18 15:34:58 -07:00
mbecker20
8f01e441a4 increase per page limit of alerts and updates 2024-05-18 02:42:03 -07:00
mbecker20
41a6e0a65a improve dashboard 2024-05-18 02:34:47 -07:00
mbecker20
40027f7430 improve alert table 2024-05-18 01:07:23 -07:00
mbecker20
a2c69aba87 improve deployment build version selector 2024-05-18 00:49:57 -07:00
mbecker20
a5d3fbedc6 remove build versions pagination 2024-05-18 00:12:08 -07:00
mbecker20
b311b11785 label recents 2024-05-18 00:05:44 -07:00
mbecker20
7a0b29b387 move mongo / dockers deps to client features 2024-05-17 23:48:05 -07:00
mbecker20
d3a87fdb5f update snippet 2024-05-17 22:40:23 -07:00
mbecker20
9b7ab6d98a update resolver and remove async-trait 2024-05-17 22:38:06 -07:00
mbecker20
c302e28d86 improve omnibar search suggestion 2024-05-17 02:13:53 -07:00
mbecker20
33be989e3a dashboard2 2024-05-17 02:07:48 -07:00
mbecker20
c9d65300c9 work on dashboard 2 2024-05-17 01:35:34 -07:00
mbecker20
e96b676366 add build / repo / procedure state stuff to summary 2024-05-17 01:10:48 -07:00
mbecker20
0bff4a5e51 10 recents per resource type 2024-05-16 00:02:09 -07:00
mbecker20
9b12334922 update ts types and fix stuff 2024-05-15 19:17:54 -07:00
mbecker20
68659630fc split recently viewed by resource type 2024-05-15 19:15:03 -07:00
mbecker20
8b33647620 add prop 2024-05-15 02:32:21 -07:00
mbecker20
871aba62d5 move description to right 2024-05-15 02:21:06 -07:00
mbecker20
c649094a8a intro 2024-05-14 23:15:57 -07:00
mbecker20
e3c11db89e update deps 2024-05-14 02:10:10 -07:00
mbecker20
c43293109d add repo and homepage 2024-05-14 01:55:13 -07:00
mbecker20
d3e4f9f638 1.3.0 clean publish 2024-05-14 01:43:01 -07:00
mbecker20
eecf583b0e add dotenv for docsite convenience 2024-05-14 01:37:50 -07:00
mbecker20
a518806d8b docs 2024-05-14 01:33:38 -07:00
mbecker20
8a4611c380 fix link 2024-05-14 01:20:30 -07:00
mbecker20
d679fbe72f client readme 2024-05-14 01:17:55 -07:00
mbecker20
1cf02bc4b4 docs 2024-05-14 01:00:04 -07:00
mbecker20
bf703eef35 frontend support procedure state 2024-05-13 22:28:45 -07:00
mbecker20
985058afb0 ts types 2024-05-13 15:30:37 -07:00
mbecker20
eac1145958 add ProcedureState 2024-05-13 15:30:23 -07:00
mbecker20
1b408d92d9 theme 2024-05-13 01:32:24 -07:00
mbecker20
ee95c2c76b update screenshots 2024-05-13 01:31:49 -07:00
mbecker20
e83124ebff build ConfigOrDeployments 2024-05-13 01:11:12 -07:00
mbecker20
e912ae050a deploy working 2024-05-13 01:01:06 -07:00
mbecker20
99253d6182 build extra arg suggestions 2024-05-13 00:14:12 -07:00
mbecker20
ef91577ac5 check disabled 2024-05-12 23:48:53 -07:00
mbecker20
b97f9b30b3 aws config looking good 2024-05-12 23:44:06 -07:00
mbecker20
7cb11dbc5d improve builder config 2024-05-12 23:33:33 -07:00
mbecker20
6d815629fc everything looking good 2024-05-12 23:12:54 -07:00
mbecker20
f8021d8541 repo config good 2024-05-12 23:01:25 -07:00
mbecker20
1f444fdbc2 builds looking good 2024-05-12 22:58:03 -07:00
mbecker20
af76dd1be4 more config 2024-05-12 22:38:57 -07:00
mbecker20
5cb91c6f8d improve config 2024-05-12 21:35:47 -07:00
mbecker20
de5502aec7 dark less blue 2024-05-12 19:39:23 -07:00
mbecker20
ef4ae4c5f2 state aware repo actions 2024-05-12 16:03:56 -07:00
mbecker20
866eb6d81b looking good 2024-05-12 15:55:49 -07:00
mbecker20
58d6c16eea update sheet from top 2024-05-12 15:37:56 -07:00
mbecker20
ccbf13ae84 get username return avatar 2024-05-12 13:44:32 -07:00
mbecker20
21f6acd3d7 add other resources to omnisearch 2024-05-12 13:11:52 -07:00
mbecker20
dce59d1383 center search 2024-05-12 13:07:10 -07:00
mbecker20
2fb544c3b0 sidebar gap 2024-05-12 13:02:47 -07:00
mbecker20
1ba288be79 split sidebar home 2024-05-12 12:59:45 -07:00
mbecker20
1ff21d2986 more cli loggin 2024-05-12 12:21:38 -07:00
mbecker20
79cc2c1bb7 improve cli logging 2024-05-12 12:15:25 -07:00
mbecker20
17b2e6660c rename alert_logger -> alerter. cli colors 2024-05-12 11:59:49 -07:00
mbecker20
4ef095fe55 improve server stats tables 2024-05-12 04:23:38 -07:00
mbecker20
fb0a7352e3 filter out docker overlay disks 2024-05-12 03:49:29 -07:00
mbecker20
9a087e5975 clean up resource / tree view 2024-05-12 03:26:45 -07:00
mbecker20
814e47031d dope search 2024-05-12 02:55:13 -07:00
mbecker20
1304565e40 fix log to bottom 2024-05-12 01:54:31 -07:00
mbecker20
85616d0669 command loops 2024-05-12 01:46:22 -07:00
mbecker20
feff4647e7 add display for repo latest hash / message 2024-05-12 01:41:41 -07:00
mbecker20
549e15bfe2 add more image tags 2024-05-12 01:24:43 -07:00
mbecker20
a08baf8432 actually mount GetLatestCommit 2024-05-12 01:18:33 -07:00
mbecker20
99c47ce133 just mess with stuff 2024-05-12 01:09:58 -07:00
mbecker20
26a4691c0b server page - swap version / stats link 2024-05-11 23:55:14 -07:00
mbecker20
addb35aa69 give placeholders 2024-05-11 23:38:48 -07:00
mbecker20
16bf78f9ad enable commit config 2024-05-11 23:11:41 -07:00
mbecker20
3ed4f91d82 periphery cli args name to periphery 2024-05-11 23:06:04 -07:00
mbecker20
653fb894a2 delete file 2024-05-11 22:44:13 -07:00
mbecker20
0f9798a5f2 1.2.0 2024-05-11 22:34:35 -07:00
mbecker20
6776a20ec5 build repo state cache 2024-05-11 22:30:33 -07:00
mbecker20
fb21e8586f clone commit hash 2024-05-11 21:38:29 -07:00
mbecker20
8b2c4d604a periphery get repo status 2024-05-11 18:42:21 -07:00
mbecker20
c0b010d5ce rename DockerContainerState -> DeploymentState 2024-05-11 18:29:40 -07:00
mbecker20
97de34a088 align state / status distinction 2024-05-11 18:22:24 -07:00
mbecker20
6c0b76a270 add big icons 2024-05-11 18:04:30 -07:00
mbecker20
eebd44ab9b common resource filter 2024-05-11 17:42:38 -07:00
mbecker20
783250c5ce sort build / repo status update query by most recent 2024-05-11 16:12:11 -07:00
mbecker20
70ff93050f choose between config / log for deployment 2024-05-11 16:06:36 -07:00
mbecker20
1cc1813185 fix dev fe build 2024-05-11 14:50:05 -07:00
mbecker20
b4f9b87d06 update resources, cli 2024-05-11 14:48:26 -07:00
mbecker20
26b09a767e unlink 2024-05-11 13:44:51 -07:00
mbecker20
bba6c4d8b6 add status to build / repo 2024-05-10 22:59:20 -07:00
mbecker20
ea440235c4 configure enable / disable action on webhook recieve 2024-05-10 21:54:41 -07:00
mbecker20
f9949bf988 readOnly webhhok copy 2024-05-10 20:40:54 -07:00
mbecker20
b978db012e slight style 2024-05-10 20:37:56 -07:00
mbecker20
bc2fbdd657 remove rws and code faster reconnect 2024-05-10 18:04:35 -07:00
mbecker20
a5571bcf4d 2xl show 8 recents 2024-05-10 17:30:20 -07:00
mbecker20
683a528dd9 mx-8 2024-05-10 17:23:28 -07:00
mbecker20
4a283b6052 remove container on topbar 2024-05-10 17:21:11 -07:00
mbecker20
37224ee1ad dont req for username of built in users 2024-05-10 17:14:55 -07:00
mbecker20
5e7445b10d add a god damn sidebar 2024-05-10 17:07:51 -07:00
mbecker20
1829a7da34 update diff looking good 2024-05-10 03:59:51 -07:00
mbecker20
4a1a653bd9 rust 1.78.0 2024-05-10 03:40:30 -07:00
mbecker20
840c1a87d0 try putting the html in th diff directly 2024-05-10 03:19:18 -07:00
mbecker20
c90368e2af add periphery build repo 2024-05-10 02:29:47 -07:00
mbecker20
1f9d74fadb fix default creds path 2024-05-10 02:28:06 -07:00
mbecker20
5b261058fe add updates to dashboard 2024-05-10 02:04:23 -07:00
mbecker20
cf6632ba02 partial_derive2 0.4.2 better diffs 2024-05-10 01:50:08 -07:00
mbecker20
c7124bd63c colored diff 2024-05-10 01:03:23 -07:00
mbecker20
ba19e45607 imporve config styling 2024-05-10 00:50:29 -07:00
mbecker20
20282ffcbb update frontend deps 2024-05-10 00:10:40 -07:00
mbecker20
cb8ad90838 cli readme and default creds path 2024-05-09 22:29:40 -07:00
mbecker20
caac3fdcc4 v{version} 2024-05-09 15:51:54 -07:00
mbecker20
44da282060 improve server version 2024-05-09 15:47:58 -07:00
mbecker20
a2e27b09fc show version 2024-05-09 15:39:30 -07:00
mbecker20
c1b1f397fd comments 2024-05-09 15:36:15 -07:00
mbecker20
1d0f239594 delete binary before recurl 2024-05-09 15:34:03 -07:00
mbecker20
549bc78799 log version 2024-05-09 15:28:32 -07:00
mbecker20
9eb9b57e36 install load latest version automatically if its not passed 2024-05-09 15:25:57 -07:00
mbecker20
c38849961e add Repos to search 2024-05-09 14:08:01 -07:00
mbecker20
3acfa0c4b1 more client docs 2024-05-09 03:25:37 -07:00
mbecker20
62b083c3be docsite 2024-05-09 03:12:53 -07:00
mbecker20
ee6fc4c590 include description / tags in diff 2024-05-09 01:53:29 -07:00
mbecker20
4fa550b3d3 monrun diffing 2024-05-09 01:33:03 -07:00
mbecker20
1c44ae98fb monrun / api better diffs 2024-05-09 00:34:21 -07:00
mbecker20
148223f995 config diff update log 2024-05-08 00:34:41 -07:00
mbecker20
4678f83542 fix clippy lints 2024-05-08 00:21:37 -07:00
mbecker20
340bac078f unneeded in cargo toml 2024-05-07 23:31:31 -07:00
mbecker20
3f8e75bbd8 update to partial derive 0.4.0 2024-05-07 23:29:57 -07:00
mbecker20
c4278d14a9 only show actions if more than 0 Actions on resource 2024-05-07 04:08:12 -07:00
mbecker20
4909106c3c turn off build icon spinning. still turns green 2024-05-07 03:45:00 -07:00
mbecker20
0f04a2848a clean up action states 2024-05-07 03:43:20 -07:00
mbecker20
ba073bf8b2 minimize update diffs 2024-05-07 03:40:30 -07:00
mbecker20
640809aa6b use trait for resource crud 2024-05-07 02:54:01 -07:00
mbecker20
943bb4c61a handle update config / description / tags seperately 2024-05-06 03:10:57 -07:00
mbecker20
ef43cb7920 clean up deployments toml 2024-05-06 02:39:04 -07:00
mbecker20
1abe634679 export individual resources 2024-05-06 02:24:35 -07:00
mbecker20
18ab18b6f6 fix for server template sync 2024-05-06 01:55:14 -07:00
mbecker20
aad971a599 sync working 2024-05-06 01:44:20 -07:00
mbecker20
3d9da97d7b monrun better check for empty 2024-05-06 01:36:22 -07:00
mbecker20
2d0a09f760 monrun 2024-05-06 01:12:14 -07:00
mbecker20
568c317d6d create repo does not clone 2024-05-06 00:47:27 -07:00
mbecker20
f0e1f253f4 add queue to listeners using locks 2024-05-06 00:33:19 -07:00
mbecker20
7634b9fa1d example 2024-05-06 00:21:03 -07:00
mbecker20
2e1795eba6 webhook 2024-05-06 00:16:22 -07:00
mbecker20
0f6b3d6e9b add github webhook copiers 2024-05-06 00:00:47 -07:00
mbecker20
a70afcc461 fix build client 2024-05-05 23:20:45 -07:00
mbecker20
d78fc2b282 improve onpull 2024-05-05 23:17:27 -07:00
mbecker20
249610afce frontend onpull 2024-05-05 22:55:37 -07:00
mbecker20
b93b639b40 add extra args style 2024-05-05 22:40:27 -07:00
mbecker20
13ceb55fe8 fix primary dropdown templates 2024-05-05 20:36:56 -07:00
mbecker20
7ebd38d350 slight style 2024-05-05 20:32:19 -07:00
mbecker20
c12c74005e finish configure server templates 2024-05-05 19:08:47 -07:00
mbecker20
c44dc1c6f6 clean server template schema 2024-05-05 18:39:10 -07:00
mbecker20
cd2b9ec4ed validate name not taken on LaunchServer 2024-05-05 16:35:55 -07:00
mbecker20
dc62442c00 LaunchServer revise 2024-05-05 14:16:19 -07:00
mbecker20
81a90f7ae4 launch server 2024-05-05 14:08:49 -07:00
mbecker20
af092e0d88 include server template in monrun export / import 2024-05-05 13:18:50 -07:00
mbecker20
0e70015fd1 add server template page 2024-05-05 13:02:25 -07:00
mbecker20
a214deef86 fix GetServerTemplatesSummary 2024-05-05 13:00:46 -07:00
mbecker20
0efb2966b0 set build version to 0.0.0 on Copy 2024-05-04 23:54:09 -07:00
mbecker20
1e8422a506 custom placeholder for AccountSelector 2024-05-04 23:10:46 -07:00
mbecker20
7bb386e6d0 Account Selector can set back to None 2024-05-04 18:57:42 -07:00
mbecker20
c464ca5612 resources clear their config after update completes 2024-05-04 18:11:23 -07:00
mbecker20
742630fdee create new resources navs to created resource 2024-05-04 17:54:21 -07:00
mbecker20
154fd899fe fix all the links 2024-05-04 17:49:06 -07:00
mbecker20
aa25f9d4c9 servers init disabled 2024-05-04 17:33:05 -07:00
mbecker20
eeed94c8fd === PERIPHERY INSTALLER === 2024-05-04 16:45:39 -07:00
mbecker20
b2f771199d default repos path 2024-05-04 16:39:20 -07:00
mbecker20
4bceb97a66 comment out fields that are just being set to the defautl 2024-05-04 16:25:55 -07:00
mbecker20
8734e6fc4c clean up periphery example config 2024-05-04 16:22:01 -07:00
mbecker20
effd9315cb curl silent 2024-05-04 16:12:54 -07:00
mbecker20
d45f60f604 fix system case 2024-05-04 16:10:30 -07:00
mbecker20
1c7582d0a2 note about root user 2024-05-04 16:05:43 -07:00
mbecker20
566e1090da clearer 2024-05-04 15:46:36 -07:00
mbecker20
a9b3054de3 readme 2024-05-04 15:45:34 -07:00
mbecker20
73d179b355 docs for scripts 2024-05-04 15:45:24 -07:00
mbecker20
97486242a0 setup scripts 2024-05-04 15:43:09 -07:00
mbecker20
a0ddff0618 dashboard 2024-05-04 15:37:19 -07:00
mbecker20
f91e95bf63 readme 2024-05-04 15:35:57 -07:00
mbecker20
431ef82f3e add screenshots 2024-05-04 15:27:02 -07:00
mbecker20
a07bc9fbca make alert details text larger 2024-05-04 15:18:47 -07:00
mbecker20
1e13cd9261 add setup-periphery.py and readme 2024-05-04 15:09:50 -07:00
mbecker20
ae0b59179d version 1.0.0 2024-05-04 15:05:41 -07:00
mbecker20
a9ef12d687 finish api docs 2024-05-04 01:22:52 -07:00
mbecker20
b73c13172a docs 2024-05-04 00:54:54 -07:00
mbecker20
c9b41e7449 server and builder GetAccount includes account in core config 2024-05-03 04:59:16 -07:00
mbecker20
fccc15df4a add docsite (docusaurus v3) 2024-05-03 04:48:18 -07:00
Maxwell Becker
4f7eeacebc Merge pull request #5 from mbecker20/next
merge v1 into main
2024-05-03 04:10:16 -07:00
mbecker20
9b172a833a 1.1.0 2024-05-03 04:08:54 -07:00
mbecker20
fd6fc925d6 core can pass creds to peripheries 2024-05-03 04:06:37 -07:00
mbecker20
6318670b6c implement core pass tokens 2024-05-03 03:50:50 -07:00
mbecker20
7fa5fd83d2 ServerTemplate table 2024-05-03 03:12:49 -07:00
mbecker20
e286acb123 kind of implement server template frontend 2024-05-03 02:51:17 -07:00
mbecker20
ace6dd5a9a implement ServerTemplate api 2024-05-03 02:23:38 -07:00
mbecker20
de70ab5eda rework builder / server 2024-05-03 00:25:46 -07:00
mbecker20
56ab104ac5 frontend implement suggest for extra args 2024-05-02 00:42:43 -07:00
mbecker20
56da5c04f2 add refetch to a lot of important status 2024-05-02 00:14:17 -07:00
mbecker20
3d73f325fe ListCommonExtraArgs 2024-05-02 00:02:59 -07:00
mbecker20
7b778631f3 remove container user param 2024-05-01 23:41:33 -07:00
mbecker20
de4c657868 remove updated_at from ResourceToml 2024-05-01 23:22:19 -07:00
mbecker20
b6df4a08b1 improve export toml endpoint 2024-05-01 23:04:40 -07:00
mbecker20
923c1d6cf6 improve export styling 2024-05-01 22:23:16 -07:00
mbecker20
ff892afa16 add example toml for the configs 2024-04-28 23:24:26 -07:00
mbecker20
3e9e0a9be4 finish docs for config 2024-04-28 18:26:51 -07:00
mbecker20
dc796900cd document core config 2024-04-28 15:41:01 -07:00
mbecker20
b8afd43d07 move config entities to client for docs 2024-04-28 15:16:57 -07:00
mbecker20
ba52ce79fc description trigger placeholder 2024-04-28 14:57:57 -07:00
mbecker20
9936f9f357 fix process args placeholder 2024-04-28 14:56:40 -07:00
mbecker20
677e1a3830 move to sync mutex for action state 2024-04-28 14:44:48 -07:00
mbecker20
d46ff30540 doc 2024-04-28 13:47:47 -07:00
mbecker20
98453580c0 fmt. add procedure webhjook 2024-04-28 04:57:33 -07:00
mbecker20
5683929bbe monrun diffing 2024-04-28 04:33:27 -07:00
mbecker20
ca368340d5 diff resources in sync 2024-04-28 03:56:08 -07:00
mbecker20
e66d2fac95 implement all the diffs 2024-04-28 02:32:18 -07:00
mbecker20
1c74d388dc update partial_derive2 0.3.0 2024-04-28 02:26:09 -07:00
mbecker20
6e698fec05 prep for diffs 2024-04-28 02:22:57 -07:00
mbecker20
d06b2abea4 doccos 2024-04-28 01:04:36 -07:00
mbecker20
e7a4a364c2 rustdoc resource write api 2024-04-27 21:26:19 -07:00
mbecker20
31bcbf36dd finish documenting read api 2024-04-27 21:01:53 -07:00
mbecker20
afbf28668b fix fe 2024-04-27 20:17:27 -07:00
mbecker20
7427a6d6d1 trim down deployment api 2024-04-27 20:16:43 -07:00
mbecker20
fab4e8e534 improve the resource busy locks 2024-04-27 15:09:30 -07:00
mbecker20
0fc6e89ffe docs 2024-04-27 12:20:58 -07:00
mbecker20
9509b23dc1 doc more api 2024-04-27 02:20:09 -07:00
mbecker20
b5ea6d43f3 export to toml work. log scroll button better 2024-04-26 04:52:52 -07:00
mbecker20
92ef0addd5 ResourceToml 2024-04-26 03:09:17 -07:00
mbecker20
a61d50b049 update first 2024-04-26 02:27:29 -07:00
mbecker20
9021f1beea just install ca-certs 2024-04-26 02:23:41 -07:00
mbecker20
f3ecf30b3d install ssl in dockerfile 2024-04-26 02:19:18 -07:00
mbecker20
ea40073fcc mount 2024-04-26 01:55:04 -07:00
mbecker20
ed7e0a38d8 implement toml export 2024-04-26 01:51:45 -07:00
mbecker20
0d8d41f85d use debian:bullseye-slim for core 2024-04-26 01:51:36 -07:00
mbecker20
45bf8ae6b0 pb-12 2024-04-25 23:05:11 -07:00
mbecker20
db9c2d924c standard update text menu 2024-04-23 22:24:43 -07:00
mbecker20
62b34ab9a5 user group toml 2024-04-23 21:26:18 -07:00
mbecker20
85157ddfb9 block non existant resource hook 2024-04-23 21:26:09 -07:00
mbecker20
de746096ab user group sync 2024-04-22 23:38:38 -07:00
mbecker20
f272612e74 clean up logger init 2024-04-22 16:57:36 -07:00
mbecker20
d85bd25ed4 fix fe type 2024-04-22 01:47:54 -07:00
mbecker20
f39c786a64 mount SetUsersInUserGroup 2024-04-22 01:42:39 -07:00
mbecker20
26cae20505 Set user group users whole vec 2024-04-22 01:40:21 -07:00
mbecker20
58a8ebee0c push user to user group with $addToSet for idempotence 2024-04-22 01:23:31 -07:00
mbecker20
5c8adf031c better user group user modify and update permission api 2024-04-22 01:13:59 -07:00
mbecker20
9b168d35d6 works on docs 2024-04-21 23:56:30 -07:00
mbecker20
c4ece715f9 update frontend dockerfile 2024-04-21 23:03:20 -07:00
mbecker20
8e02de909f no use clietn 2024-04-21 23:02:13 -07:00
mbecker20
176b12f18c server stats chart 2024-04-21 19:22:22 -07:00
mbecker20
43514acc92 manage user groups 2024-04-21 17:37:31 -07:00
mbecker20
4894568651 ListApiKeysForServiceUser 2024-04-21 16:22:15 -07:00
mbecker20
f16d079e66 fix fe build 2024-04-21 15:08:33 -07:00
mbecker20
4ed71812a4 rename GetUsers to ListUsers for consistency 2024-04-21 15:01:58 -07:00
mbecker20
577b93e2dc able to query for UserGroup permissions 2024-04-21 14:36:32 -07:00
mbecker20
c676b5168a fmt. fix ws tracing span 2024-04-21 04:28:32 -07:00
mbecker20
4674af2f1b update list item fix username 2024-04-21 04:00:01 -07:00
mbecker20
2429ab050d fix query resource ids for non admin 2024-04-21 03:49:52 -07:00
mbecker20
8698c0f5be non admins able to get procedure updates 2024-04-21 03:22:03 -07:00
mbecker20
c583a5dc62 deployment actions show build if attached 2024-04-21 02:45:04 -07:00
mbecker20
528b74d156 only show actions when user has write or execute 2024-04-21 02:27:15 -07:00
mbecker20
ebedebd761 return write permission for all admin 2024-04-21 02:23:59 -07:00
mbecker20
ee2953b2e9 fix frontend errors 2024-04-21 02:21:49 -07:00
mbecker20
7a2044b395 pass through disabled 2024-04-21 02:07:31 -07:00
mbecker20
3123d021d9 GetPermissionLevel 2024-04-21 01:59:07 -07:00
mbecker20
c46b2cf59d add ListPermissions route 2024-04-21 01:46:01 -07:00
mbecker20
79b4bae40a fix query for user permission on resource 2024-04-21 00:59:30 -07:00
mbecker20
cb9e0ae252 sortable resource table 2024-04-21 00:18:47 -07:00
mbecker20
a7a7d0552b fix permissioning endpoint 2024-04-20 17:37:22 -07:00
mbecker20
01ea85e627 login flow nice 2024-04-20 15:29:12 -07:00
mbecker20
e81be79cb4 update aws deps 2024-04-20 13:06:24 -07:00
mbecker20
fca324480f update serror 2024-04-20 13:04:53 -07:00
mbecker20
762317e08a update deps 2024-04-18 14:57:25 -07:00
mbecker20
6d2f43e40a node 20.12 2024-04-18 04:49:23 -07:00
mbecker20
3823df8362 work on sortable tables 2024-04-18 04:42:32 -07:00
mbecker20
17398fc932 fix tags filter 2024-04-18 03:17:57 -07:00
mbecker20
9b0e96a59a design some improvements 2024-04-18 02:15:33 -07:00
mbecker20
c5fdb914ff refactor RequiredResourceComponents 2024-04-18 01:31:41 -07:00
mbecker20
65ae5d9465 fix ListUserPermissions 2024-04-17 22:58:10 -07:00
mbecker20
c4548f9e7e fix existing frontend for user permissions 2024-04-17 22:56:18 -07:00
mbecker20
86cfb2ebc7 update ts client 2024-04-17 18:25:46 -07:00
mbecker20
3bad049682 implement user group api 2024-04-17 18:19:52 -07:00
mbecker20
5352afee06 api check UserGroup for access 2024-04-17 02:23:41 -07:00
mbecker20
a83dedbcd0 work on docs 2024-04-17 01:30:35 -07:00
mbecker20
beee584cc2 rework / simplify procedure model 2024-04-15 23:17:55 -07:00
mbecker20
ccc7852576 prog on doc entity 2024-04-15 04:31:32 -07:00
mbecker20
0bdb3ddfea alert details use GetAlert 2024-04-15 03:56:54 -07:00
mbecker20
0f2b23bb6c prog on alert pages 2024-04-15 03:29:24 -07:00
mbecker20
a5537a0758 refactor server info 2024-04-14 02:27:47 -07:00
mbecker20
a64723269f update frontend with new stats api 2024-04-14 01:35:38 -07:00
mbecker20
2b93aa3dca protect system info 2024-04-14 01:27:48 -07:00
mbecker20
992054f943 rework / simplify server system stats 2024-04-14 01:22:06 -07:00
mbecker20
784aa754f7 disable stats monitoring on enabled server 2024-04-13 23:53:42 -07:00
mbecker20
5124d3aae8 log search work 2024-04-12 05:29:31 -07:00
mbecker20
0fb746bc03 fix log finally? maybe? 2024-04-12 05:00:17 -07:00
mbecker20
a2d301bfbc SearchLog allow for search Or / And the multiple terms 2024-04-12 04:36:10 -07:00
mbecker20
4523f3e112 writes shouldn't be debug 2024-04-12 03:54:31 -07:00
mbecker20
012cea8fce debug ExchangeForJwt 2024-04-12 03:48:48 -07:00
mbecker20
48a62232a7 skip user on the task instrumentation 2024-04-12 03:29:03 -07:00
mbecker20
46ee857c22 improve insturmentation / serror 2024-04-12 03:07:09 -07:00
mbecker20
7bbd22b4b2 mount the route periphery 2024-04-12 02:05:38 -07:00
mbecker20
862c5b7a7c further improve SearchLog 2024-04-12 01:05:23 -07:00
mbecker20
5eb7e27732 implement aws terminate instance with retry 2024-04-11 23:32:32 -07:00
mbecker20
1231193c89 search log 2024-04-11 22:45:36 -07:00
mbecker20
719938f442 tail docker logs search for some length safety 2024-04-11 22:33:01 -07:00
mbecker20
7b5f2ea69b update styling for light mode 2024-04-11 22:21:47 -07:00
mbecker20
01709deced stop all deployments adds update before finalize / update update 2024-04-11 22:18:27 -07:00
mbecker20
23c87bfaa4 frontend add labels config 2024-04-11 18:36:31 -07:00
mbecker20
0ef8e1861b ContainerSummary contains labels 2024-04-11 17:52:22 -07:00
mbecker20
10b278a141 implement docker tag support 2024-04-11 17:47:58 -07:00
mbecker20
72c884f9ec log elapsed at the end 2024-04-11 17:16:37 -07:00
mbecker20
ecdae0e8df add startup config log 2024-04-11 17:10:51 -07:00
mbecker20
2e67b16ba2 move logs outside of tokio spawn 2024-04-11 04:41:31 -07:00
mbecker20
18d31235d4 PeripheryRequest is debug 2024-04-11 03:55:06 -07:00
mbecker20
4f8c150c0b add deployment table to build page 2024-04-11 03:41:54 -07:00
mbecker20
f26c937747 finish instrumenting 2024-04-11 03:37:57 -07:00
mbecker20
34176c336e config examples 2024-04-11 02:49:10 -07:00
mbecker20
635f3678ef enable otlp exporting to url 2024-04-11 02:45:56 -07:00
mbecker20
12bd8cc265 skip user capture in tracing span 2024-04-11 01:22:02 -07:00
mbecker20
64316a8a61 update merge config files 2024-04-11 00:51:49 -07:00
mbecker20
b85c3b25f7 fix auth log spam 2024-04-11 00:43:01 -07:00
mbecker20
144344bcfc full env core configuration 2024-04-11 00:15:05 -07:00
mbecker20
7736ba8999 core and periphery both env overrides 2024-04-10 23:08:28 -07:00
mbecker20
90c7bd56bf version before repo 2024-04-10 22:53:10 -07:00
mbecker20
714edd70fb lame 2024-04-10 22:52:12 -07:00
mbecker20
9c31813a16 fix logger with global subscriber 2024-04-10 22:48:52 -07:00
mbecker20
d6b76134a3 update config examples 2024-04-10 22:24:40 -07:00
mbecker20
b7692e39c8 add log config to core and periphery config 2024-04-10 22:14:54 -07:00
mbecker20
60b5179b3e add json and loki logging options 2024-04-10 22:10:24 -07:00
mbecker20
46a1c86cb6 implement logger with loki support 2024-04-10 21:34:13 -07:00
mbecker20
393363b33e instrument most of core 2024-04-10 20:15:11 -07:00
mbecker20
e762363d96 add tracing to core client websocket subscription 2024-04-10 19:00:07 -07:00
mbecker20
5d2082b478 add tracing spans to periphery 2024-04-10 18:23:35 -07:00
mbecker20
ad09fd81b4 improve logging 2024-04-10 05:57:16 -07:00
mbecker20
54fdbf9fd0 one line logging 2024-04-10 04:13:41 -07:00
mbecker20
8c3de939da core serves the frontend 2024-04-10 01:39:26 -07:00
mbecker20
2f3a3f8f23 log on one line 2024-04-10 00:56:15 -07:00
mbecker20
daf13d693d imrpove tags config 2024-04-10 00:43:52 -07:00
mbecker20
bbd5384589 improve dashboard 2024-04-10 00:09:52 -07:00
mbecker20
fb72bbf81e fix short date formatting 2024-04-09 23:56:47 -07:00
mbecker20
29edd6b9b5 add set title hook to pages 2024-04-09 23:53:30 -07:00
mbecker20
215b35575d improve dark mode styling 2024-04-09 23:36:38 -07:00
mbecker20
6394275570 improve serverunreachable alerting behavior 2024-04-08 15:53:44 -07:00
mbecker20
eaca72991b alert logger 2024-04-08 05:47:39 -07:00
mbecker20
634e895469 fix default PORT 2024-04-08 02:19:25 -07:00
mbecker20
77c8033d22 only show redeploy on build if image type if Build 2024-04-08 01:43:26 -07:00
mbecker20
7508ae21b8 websocket show real connected 2024-04-08 01:13:56 -07:00
mbecker20
fd90a62af1 fix auto redeploy service user 2024-04-07 22:50:36 -07:00
mbecker20
707dd682ed override api host with build arg 2024-04-07 22:29:45 -07:00
mbecker20
7396988032 add frontend PORT override 2024-04-07 22:09:06 -07:00
mbecker20
58960cdc6e style 2024-04-07 21:59:42 -07:00
mbecker20
31a42dce7e new resource name reset on close 2024-04-07 21:58:13 -07:00
mbecker20
9b248a8fa4 clean up NewResource 2024-04-07 21:54:48 -07:00
mbecker20
f099e5c6b7 config alerter enabled 2024-04-07 21:26:25 -07:00
mbecker20
5c093c81ab enable / disable alerters backend 2024-04-07 21:23:51 -07:00
mbecker20
e73146533a GetCoreInfo give github webhook base url 2024-04-07 21:02:10 -07:00
mbecker20
db1237184f repo pull add latest commit hash log 2024-04-07 20:56:33 -07:00
mbecker20
1e5ed1d29e fix repo list item extra info 2024-04-07 17:37:19 -07:00
mbecker20
d68ea2c28f add repo info 2024-04-07 17:23:34 -07:00
mbecker20
9700ab2cb6 show / set description on resources 2024-04-07 16:53:24 -07:00
mbecker20
d8321c873e rename confirm button 2024-04-07 16:23:55 -07:00
mbecker20
44784487a0 consolidate Resource components 2024-04-07 16:08:23 -07:00
mbecker20
76471fa694 go back to resource type list on resource delete 2024-04-07 15:32:22 -07:00
mbecker20
6ec7b4305c add alerts to other pages 2024-04-07 06:10:33 -07:00
mbecker20
896a344ac7 add more resource 2024-04-07 05:35:57 -07:00
mbecker20
34cefcdaf6 frontend build work 2024-04-07 04:55:03 -07:00
mbecker20
4c8eb68611 add reason (err) server unreachable in alert 2024-04-07 04:52:58 -07:00
mbecker20
50866659ea overide signals 2024-04-07 04:09:42 -07:00
mbecker20
8262cb858e look into deployment term signal 2024-04-07 03:40:02 -07:00
mbecker20
e710317cac deployment log show ansi colors 2024-04-07 02:54:49 -07:00
mbecker20
138cf781f3 improve procedure update logs 2024-04-07 02:31:14 -07:00
mbecker20
70d315b2d7 remove nav actually 2024-04-07 02:13:55 -07:00
mbecker20
bca8ca52da navigate on table row click 2024-04-07 02:11:32 -07:00
mbecker20
42c769ed56 fix only show version if non none 2024-04-07 02:00:17 -07:00
mbecker20
592af39550 service user detection 2024-04-07 01:51:22 -07:00
mbecker20
79620030bc alert / update logger resources 2024-04-07 00:54:04 -07:00
kv
294bc8712b fix recents on safari 2024-04-06 19:41:39 -07:00
mbecker20
f75d30d8ce resource updates page open details 2024-04-06 05:41:26 -07:00
mbecker20
de81e1e790 backend resolve server alerts before deleting server 2024-04-06 04:32:10 -07:00
mbecker20
450f5c45a1 server stats 2024-04-06 04:23:28 -07:00
mbecker20
b864b32cb2 basic server stats 2024-04-06 03:58:40 -07:00
mbecker20
0d13ac8f38 start server stats 2024-04-06 03:31:17 -07:00
mbecker20
edd517e21c add git clone and pull 2024-04-06 03:17:19 -07:00
mbecker20
32d356600a fix server actions 2024-04-06 03:07:09 -07:00
mbecker20
9e290278d0 add Danger zone (delete + copy) 2024-04-06 02:56:41 -07:00
mbecker20
4140dcb9dc manage build version 2024-04-06 02:07:17 -07:00
mbecker20
1ed3d31011 alert details 2024-04-06 01:54:20 -07:00
mbecker20
3915f921f1 builder config 2024-04-04 22:17:00 -07:00
mbecker20
050571196c alerter 2024-04-04 22:03:24 -07:00
mbecker20
3c1a129ac9 repo config 2024-04-04 02:27:00 -07:00
mbecker20
57a3561aa8 finish build config 2024-04-04 01:45:10 -07:00
mbecker20
ec37bfc0c6 all server config 2024-04-03 01:30:05 -07:00
mbecker20
82387e95da periphery secrets in dep env working 2024-04-03 01:24:13 -07:00
mbecker20
36b018151d periphery secrets visible 2024-04-03 00:59:47 -07:00
mbecker20
b4754b64d5 improve PruneAll using docker system prune -a -f 2024-04-03 00:28:58 -07:00
mbecker20
3a5530a186 fix frontend 2024-04-01 04:31:17 -07:00
mbecker20
ee22044b34 add service user replacement in procedure 2024-04-01 04:18:19 -07:00
mbecker20
8f5570128d improve topbar navigation 2024-04-01 02:28:51 -07:00
mbecker20
24d2e744a4 actions 2024-03-31 23:20:13 -07:00
mbecker20
bc9877133c smth 2024-03-31 22:41:40 -07:00
mbecker20
05886bed71 fix update details too thin 2024-03-31 13:33:31 -07:00
mbecker20
c882c12890 fix combo box 2024-03-31 13:23:05 -07:00
mbecker20
12647896c4 update shadcn 2024-03-31 12:56:22 -07:00
mbecker20
ebbec7d68c clean up resource links 2024-03-31 12:24:47 -07:00
mbecker20
cf054395bb improve info 2024-03-31 12:04:36 -07:00
mbecker20
8c04cf3db2 finish standard fmt 2024-03-31 10:32:45 -07:00
mbecker20
516690b260 standardize coloring 2024-03-31 10:29:21 -07:00
mbecker20
fc602054ba add state 2024-03-31 09:39:16 -07:00
mbecker20
e2244429ce add repo to build list item 2024-03-30 18:56:24 -07:00
mbecker20
b60ac6e1be favicon and port 2024-03-30 18:37:14 -07:00
mbecker20
6999fb8c2a fix google scopes 2024-03-30 18:22:31 -07:00
mbecker20
fd84b5920b cleanup 2024-03-30 18:05:04 -07:00
mbecker20
d02df02bbe manage users 2024-03-30 18:04:45 -07:00
mbecker20
1f143b814e ListUserPermissions 2024-03-30 15:45:07 -07:00
mbecker20
65700ad70e login looking good 2024-03-30 14:42:20 -07:00
mbecker20
6a16f1344a oauth login should work 2024-03-30 14:37:55 -07:00
mbecker20
3ecacc69a5 update useUser 2024-03-30 04:45:10 -07:00
mbecker20
9db2e858d2 frotnend 2024-03-30 04:34:09 -07:00
mbecker20
f8cdf6bf45 update ts 2024-03-30 04:17:24 -07:00
mbecker20
1b97d85023 move GetUser to auth api. Doesn't require user enabled to call 2024-03-30 04:16:04 -07:00
mbecker20
74a5f429e9 resource query tags match on name or id, All or Any mode 2024-03-30 01:55:30 -07:00
mbecker20
7554055767 api sanitize update tags api with name->id replacement, tag auto create 2024-03-29 21:55:10 -07:00
mbecker20
f617f91d96 fix tags update validate 2024-03-29 21:05:53 -07:00
mbecker20
7b95e6b276 more logs use serror 2024-03-29 06:24:56 -07:00
mbecker20
1510ed2147 do stuff 2024-03-29 03:35:22 -07:00
mbecker20
f8237217b0 fix incorrect resource type 2024-03-29 03:19:05 -07:00
mbecker20
5f95fe16fd procedure working 2024-03-29 02:34:15 -07:00
mbecker20
cf8fd893fa getting there 2024-03-28 06:31:39 -07:00
mbecker20
1032629800 fix validate and prevent recurse a bit 2024-03-28 04:27:49 -07:00
mbecker20
12576711fa vailidate UpdateProcedure config 2024-03-28 04:18:37 -07:00
mbecker20
233c8085b5 work on procedure config 2024-03-28 04:13:53 -07:00
mbecker20
1b1650eba4 sync working clean 2024-03-27 06:35:57 -07:00
mbecker20
73c17c68c4 fix 2024-03-27 05:54:40 -07:00
mbecker20
3492eb3126 validate 2024-03-27 05:54:15 -07:00
mbecker20
6dcc4b30de validate attached resources by name / id 2024-03-27 05:33:34 -07:00
mbecker20
a5e15f9c5e fix the type change 2024-03-27 04:18:57 -07:00
mbecker20
851c1f450c update procedure 2024-03-27 03:59:51 -07:00
mbecker20
fd12921f6d refactor StateResource 2024-03-27 03:20:08 -07:00
mbecker20
1100e160de impl procedure monrun 2024-03-27 00:48:43 -07:00
mbecker20
bfcdf011c4 change default periphery port to 8120 2024-03-26 23:37:55 -07:00
mbecker20
48ff1ebefa monrun sync work 2024-03-26 04:42:24 -07:00
mbecker20
f1c9d05abc resources sync ready for test 2024-03-26 04:25:00 -07:00
mbecker20
73217c9178 change to tags. begin test monrun 2024-03-26 02:42:45 -07:00
mbecker20
7c5acb8c21 api for service users 2024-03-26 01:09:54 -07:00
mbecker20
10662ec679 use enum for UserConfig 2024-03-25 23:49:35 -07:00
mbecker20
65aba12dd0 permissions table 2024-03-25 23:19:08 -07:00
mbecker20
8d1410b181 more resource permissions to own table 2024-03-25 20:03:10 -07:00
mbecker20
d9fbccb644 guard local user empty username 2024-03-25 18:09:34 -07:00
mbecker20
c9334074f6 fix show updates 2024-03-25 16:34:15 -07:00
mbecker20
f4b77598c3 add updates page basic 2024-03-25 08:04:30 -07:00
mbecker20
478a046074 fix build cancel 2024-03-25 07:24:51 -07:00
mbecker20
c4cb1f9c66 update ts types 2024-03-25 06:54:32 -07:00
mbecker20
f84a3a9034 improve build cancel 2024-03-25 06:53:27 -07:00
mbecker20
fa389515cd dont show save if no change 2024-03-25 06:29:08 -07:00
mbecker20
06c480ef00 wrap table in section 2024-03-25 06:24:19 -07:00
mbecker20
71dd07ac28 add deployment table to server 2024-03-25 06:19:34 -07:00
mbecker20
30f15cf7e8 fix frontend build issue 2024-03-25 06:07:09 -07:00
mbecker20
cc99e91935 resource tags look better 2024-03-25 05:59:02 -07:00
mbecker20
6544c1887a implement redirect oauth 2024-03-25 05:11:01 -07:00
mbecker20
9636d4d347 build table tags 2024-03-25 04:49:21 -07:00
mbecker20
c7ce80c5e3 cancel build 2024-03-25 04:41:47 -07:00
mbecker20
5d0050535d all the dashboards 2024-03-25 04:00:37 -07:00
mbecker20
340d013bb3 deleting tags deletes across 2024-03-25 01:14:45 -07:00
mbecker20
f6c99c4c20 resource type dropdown handle keys and tree 2024-03-24 19:08:55 -07:00
mbecker20
7c6eecb0e9 manage api keys 2024-03-24 18:39:08 -07:00
mbecker20
59b09e580e api key page 2024-03-24 17:33:09 -07:00
mbecker20
4174eebffe TagsWithBadge 2024-03-24 16:07:52 -07:00
mbecker20
200aefac54 remove RequestUser 2024-03-24 15:44:48 -07:00
mbecker20
b200088093 monrun sync 2024-03-24 06:35:56 -07:00
mbecker20
8669ed8c73 need to add back async db 2024-03-24 05:14:52 -07:00
mbecker20
8a96724247 fix unused import 2024-03-24 04:40:30 -07:00
mbecker20
3a913fba44 fix frontend build 2024-03-24 04:35:18 -07:00
mbecker20
05a08d8640 run resource up 2024-03-24 04:26:45 -07:00
mbecker20
2bea16d003 parse resource files 2024-03-24 03:51:07 -07:00
mbecker20
475f0438f9 remove the await on tram_db call 2024-03-24 02:28:02 -07:00
mbecker20
c9720c15b9 extract periphery client lib from bin 2024-03-20 18:14:54 -07:00
mbecker20
0232fc1c2c update deps, remove db_client crate 2024-03-20 17:27:54 -07:00
mbecker20
9d5550bf5f default log level to logger::Level 2024-03-20 16:04:55 -07:00
mbecker20
873d9c2df6 remove more impl on State 2024-03-19 05:32:38 -07:00
mbecker20
5266b01e4c refactor core to use global &'statics 2024-03-19 05:18:42 -07:00
mbecker20
12315e90de clean up periphery 2024-03-19 02:12:04 -07:00
mbecker20
c32905cca4 support enable / disable components of sequence / parallel procedure 2024-01-21 15:43:46 -08:00
kv
71571e2625 update tags in deployment table 2024-01-17 00:45:19 -08:00
kv
25eb29946b update resource updates UI 2024-01-17 00:45:14 -08:00
kv
bb98d3209d clear input when opening tags dropdown 2024-01-17 00:12:58 -08:00
kv
73f624d6a1 invalidate list tags when creating a tag 2024-01-17 00:09:52 -08:00
kv
d1f8c130a1 fix tags behaviour 2024-01-17 00:01:24 -08:00
kv
93e16c4b7c tags ui 2024-01-16 11:40:52 -08:00
mbecker20
ee91d1d83e add use public id to build log 2024-01-16 03:33:35 -08:00
mbecker20
46d408056c add use_public_ip under builder config 2024-01-16 03:30:39 -08:00
mbecker20
d8ff64c1d9 difference between assign and use public ip 2024-01-16 03:25:57 -08:00
kv
453600a70c init tags 2024-01-16 02:43:59 -08:00
kv
597e466d84 update ts types 2024-01-16 02:43:56 -08:00
kv
65164bd8ef update ui components 2024-01-16 02:43:49 -08:00
kv
dbf589da91 update tags api 2024-01-16 02:43:41 -08:00
mbecker20
0857cbfa92 update serror and rustc build version 2024-01-16 02:20:40 -08:00
mbecker20
225edce50a update serror to 0.1.6 2024-01-15 12:09:15 -08:00
mbecker20
5893fdefaf remove line from log 2024-01-15 01:57:11 -08:00
mbecker20
2b4ebd6e10 implement tracing 2024-01-15 01:29:26 -08:00
mbecker20
f97e48e886 comment out recently viewed 2024-01-15 01:03:13 -08:00
mbecker20
b788bc6b6a comment out recently viewed 2024-01-15 01:00:15 -08:00
mbecker20
8c91b01dd9 fix up the tables 2024-01-15 00:56:40 -08:00
kv
30b6dac7dd improve Icon 2024-01-14 14:09:00 -08:00
kv
b6ec89e4aa merge kv changes 2024-01-14 13:57:37 -08:00
mbecker20
39a0fcb358 update deps and fix react query mutate isLoading -> isPending 2024-01-14 13:36:22 -08:00
mbecker20
9d353806a3 make some prog on the row data 2024-01-14 06:28:18 -08:00
mbecker20
d24f8d5c7c add deployment filtering by build id 2024-01-14 05:43:01 -08:00
mbecker20
cd7d51a16b update derive_default_builder 2024-01-14 04:30:10 -08:00
mbecker20
d438fbba49 builders for the resource queries 2024-01-14 03:19:12 -08:00
mbecker20
d73c71e18b fix typeshare error 2024-01-13 18:10:14 -08:00
kv
3f4d1983e1 improve some udpates stuff 2024-01-13 17:23:18 -08:00
kv
1c87ac0546 various 2024-01-13 17:16:51 -08:00
mbecker20
a11aa5c751 add created_at to ListItem 2024-01-13 17:16:29 -08:00
kv
5cae0b99c7 i fixed everything 2024-01-13 14:25:16 -08:00
mbecker20
58d0b6b458 implement new list api 2024-01-13 14:05:27 -08:00
mbecker20
4815d225d7 AddFilters for all resource queries 2024-01-13 13:51:04 -08:00
kv
fd487350f5 update ws login message 2024-01-13 13:42:26 -08:00
kv
dafcf22d49 fix ts types 2024-01-13 13:40:17 -08:00
kv
78cefe19bd update ts client 2024-01-13 13:40:14 -08:00
mbecker20
bbca105077 implement ResourceQuery 2024-01-13 13:10:33 -08:00
mbecker20
e3e4278206 sanitize the api keys 2024-01-13 01:12:58 -08:00
mbecker20
e479e62cce implement ListApiKeys 2024-01-13 00:44:05 -08:00
mbecker20
8f73b08fbf update logger 2024-01-12 23:50:38 -08:00
mbecker20
5d02a8874f client ws again 2024-01-12 23:49:50 -08:00
mbecker20
2ec5789d71 ws login support api keys 2024-01-12 23:40:47 -08:00
mbecker20
1c7a159e40 periphery use AppError 2024-01-08 03:13:10 -08:00
mbecker20
81a4caf23c update auth 2024-01-08 03:07:27 -08:00
kv
9065b6034b child procedures 2024-01-07 18:55:10 -08:00
kv
49104922c1 fix procedure selectors 2024-01-07 18:49:46 -08:00
kv
409e064452 start permissions UI etc 2024-01-07 18:28:56 -08:00
mbecker20
61f7efaa85 inserted_id as_object_id first before to_string 2024-01-07 15:57:34 -08:00
mbecker20
f0baa7496f stop log if fail to record server stats 2024-01-07 15:23:53 -08:00
mbecker20
7ed115cddb CRUD for procedures 2024-01-07 14:54:57 -08:00
mbecker20
cb6dc29469 get procedure action state 2024-01-07 14:48:20 -08:00
mbecker20
44032e2a9e Execution type None 2024-01-07 14:44:09 -08:00
mbecker20
7390a9421d gen types 2024-01-07 14:19:14 -08:00
mbecker20
f1cbe6fba9 replace ProcedureConfigVariant 2024-01-07 14:18:54 -08:00
mbecker20
5925cf5cc9 fix runfile 2024-01-07 14:17:49 -08:00
mbecker20
219a914cb1 update ts client 2024-01-07 14:15:48 -08:00
kv
6b6d6337c8 fix action type 2024-01-07 14:09:34 -08:00
kv
c511ae09a1 add stashed frontend updates 2024-01-07 13:50:05 -08:00
mbecker20
e658cb3aa0 procedure execution api 2024-01-07 01:12:50 -08:00
mbecker20
9f4cf475b6 move types into client 2024-01-06 19:44:29 -08:00
mbecker20
af53cbebed update axum to 0.7 2024-01-06 18:56:52 -08:00
mbecker20
1895ebcf25 update to sysinfo 0.30 2024-01-06 17:26:44 -08:00
mbecker20
02a313d70b move back to /frontend 2024-01-06 15:20:29 -08:00
mbecker20
57ed905140 rename frontend-v2 to frontend 2024-01-06 15:16:24 -08:00
mbecker20
d46741c5ae stricly handle unknown instance type 2024-01-06 15:15:08 -08:00
mbecker20
ba3692e085 update to mungos 0.5 2024-01-06 14:24:03 -08:00
mbecker20
593cad65dd update runfile 2023-11-26 12:44:58 -05:00
mbecker20
8bd05144da update api name 2023-11-03 01:16:56 -04:00
mbecker20
899ebcd681 update default monitor api url 2023-11-03 00:52:28 -04:00
mbecker20
f4917762c0 migrator finished 2023-10-23 02:20:53 -04:00
mbecker20
88c35281bf reformat to 2 spaces for tab 2023-10-23 01:46:29 -04:00
mbecker20
7e1810a0a7 implement conversion from legacy -> next for build, deployment, server 2023-10-22 14:44:38 -04:00
mbecker20
7eb62de74b control type of alerts that are enabled 2023-10-15 02:50:23 -04:00
mbecker20
69bb7ef1fd stop all containers on server action 2023-10-09 21:57:11 -04:00
mbecker20
57f0404a02 option to disable send_alerts on various servers + deployments 2023-10-09 21:14:50 -04:00
mbecker20
d8dccda1c0 update GetAvailableAccounts method name 2023-10-09 08:40:01 -04:00
mbecker20
16f3c4190d frontend use correct call for GetDockerNetworks 2023-10-09 00:41:02 -04:00
mbecker20
823878e963 add Read, Write, Execute traits on requests 2023-10-08 16:00:58 -04:00
mbecker20
f4246e3e0a terminate aws instance if fail to connect 2023-09-30 20:28:33 +03:00
mbecker20
9a8730a832 improve start aws builder log 2023-09-30 16:33:22 +03:00
kv
31d01aef11 header updates and resource updates show status better 2023-09-16 15:00:50 -07:00
mbecker20
a61da39038 fix build auto increment 2023-09-16 16:12:48 -04:00
kv
7b9def2aeb more permissions stuff 2023-09-15 13:46:44 -07:00
mbecker20
92b8cc9f6b tests cargo toml 2023-09-15 03:09:11 -04:00
kv
d3d5f7a745 invalidate update details on new update msg wiht same id 2023-09-14 12:27:31 -07:00
mbecker20
be78e4c48a add get users route (admin only) 2023-09-14 04:19:44 -04:00
karamvir
be9937355b hide permissions when only admins 2023-09-12 22:28:10 -07:00
karamvir
f1d36f0f4a fix version selector etc 2023-09-12 16:22:29 -07:00
kv
2a3f223a2b init repo config 2023-09-12 14:24:38 -07:00
karamvir
21401c544d add resource permissions 2023-09-12 13:14:09 -07:00
karamvir
6ceb7404bb slight signup 2023-09-11 23:01:34 -07:00
karamvir
a60d2e7f0f fix for build 2023-09-11 16:37:08 -07:00
karamvir
4480eff46d fix resources page responsive 2023-09-11 09:45:35 -07:00
karamvir
b265bd68b3 responsive header hotfix 2023-09-11 09:44:52 -07:00
karamvir
9e47eb3470 update header dropdowns 2023-09-11 09:34:51 -07:00
karamvir
a050f419e4 cleanup 2023-09-11 00:36:50 -07:00
kv
d1373aa5b3 add resource icon to dropdown 2023-09-11 00:28:43 -07:00
kv
c53704849a fix resource page header 2023-09-11 00:04:39 -07:00
kv
e27a25b147 cleanup type imports 2023-09-10 23:41:47 -07:00
karamvir
b1a88a9c2d add row mode to resources page 2023-09-10 22:23:53 -07:00
karamvir
780fd0490e improve styles, add notif dot for unseen updates 2023-09-09 14:11:11 -07:00
karamvir
b3d0ee7080 add home option to omnibar 2023-09-09 13:27:57 -07:00
karamvir
6d2287c4e5 force scrollbar, add header updates etc 2023-09-09 13:22:13 -07:00
karamvir
8ae8c43c3c cleanup 2023-09-09 04:39:27 -07:00
karamvir
2a18321225 redesign header slight 2023-09-09 04:14:28 -07:00
karamvir
2f67e4de94 improve invalidations etc 2023-09-09 03:50:51 -07:00
karamvir
84c6af1ee9 more cleanup and refactor 2023-09-09 03:46:32 -07:00
karamvir
caae39b2de cleanup 2023-09-09 03:25:51 -07:00
karamvir
2af30856ba refactor resources etc 2023-09-09 03:24:55 -07:00
karamvir
a85e842911 plenty more configs etc 2023-09-09 03:16:37 -07:00
karamvir
2f9805e97b refactor alerter 2023-09-09 03:09:06 -07:00
karamvir
2c66b9ecc5 new resource, slight cleanup etc 2023-09-09 02:30:09 -07:00
karamvir
35e5f8928a make charts clickable 2023-09-09 01:48:02 -07:00
mbecker20
4c647db584 refetch list deployments 2023-09-09 04:29:53 -04:00
karamvir
e523ea0cc3 lockfile 2023-09-09 01:18:55 -07:00
karamvir
404343e5e7 fix copy 2023-09-09 01:00:46 -07:00
karamvir
27f5353ee6 init frontend v2 2023-09-09 00:47:46 -07:00
karamvir
848fd4d4c8 fix node version 2023-09-07 04:44:16 -07:00
karamvir
2f58bf2dd1 fix button intent 2023-09-07 04:33:13 -07:00
karamvir
dbe32056d0 update some components, maybe fix invalidations? 2023-09-07 04:27:56 -07:00
karamvir
c88e210136 init new shadcn cli 2023-09-06 20:36:18 -07:00
mbecker20
c002ba2f00 update deps 2023-09-05 03:44:01 -04:00
mbecker20
a8c076f2c9 fmt 2023-09-04 02:57:46 -04:00
mbecker20
99da264f31 update api host 2023-09-02 16:37:18 -04:00
mbecker20
6c862be9b9 fix update server 2023-08-29 23:56:10 -04:00
mbecker20
6c75d64b5d fix copy build / deployment to enforce to_monitor_name 2023-08-29 23:51:02 -04:00
mbecker20
bf59390f68 slow reconnect 2023-08-29 23:46:59 -04:00
karamvir
406cfaefef cleanup 2023-08-29 20:07:27 -07:00
karamvir
1a1919f38c fuck me 2023-08-29 20:04:28 -07:00
mbecker20
3d99c300a4 rename server 2023-08-29 22:50:55 -04:00
karamvir
1a63be7746 rename server, deployment 2023-08-29 19:31:58 -07:00
mbecker20
ae7e50396e fix config parse error 2023-08-29 22:30:33 -04:00
mbecker20
f7ffb41dea server name on DeploymentStateChange 2023-08-29 02:27:53 -04:00
mbecker20
1e6987a229 send message on connect 2023-08-29 02:09:00 -04:00
mbecker20
d0bc5caebf wrap env values in quotes 2023-08-29 02:06:48 -04:00
mbecker20
59c7481fd7 add update logger dockerfile 2023-08-29 01:59:05 -04:00
mbecker20
70a11de316 monitor client update ws subscription with reconnect 2023-08-29 01:57:42 -04:00
mbecker20
3da88135c2 prune state and alerts based on config 2023-08-28 02:33:27 -04:00
mbecker20
d2a706fd9d properly forward periphery trace into core error trace 2023-08-27 13:59:59 -04:00
mbecker20
8f57af0667 uniform alert ts 2023-08-27 02:40:27 -04:00
mbecker20
cb46029f35 dont call create many on empty alerts 2023-08-27 02:24:24 -04:00
mbecker20
e35c63ff5d alert logger needs 1.71.1 2023-08-27 02:21:16 -04:00
mbecker20
440d54f500 use rust 1.71.1 to build 2023-08-27 06:13:35 +00:00
mbecker20
f4a78ef397 workspace resolver 2 2023-08-27 06:02:37 +00:00
mbecker20
96831e5973 update list alert api to take mongo document query 2023-08-27 01:54:55 -04:00
mbecker20
c5f9cc104c create deployment status alerts on db 2023-08-27 01:45:53 -04:00
mbecker20
dc6c391798 implement serror for core and periphery errors 2023-08-27 01:36:19 -04:00
mbecker20
80e5879c43 stop requests to disabled periphery 2023-08-27 01:07:59 -04:00
mbecker20
d9864f31c1 GetCoreInfo api 2023-08-22 00:08:08 -04:00
mbecker20
75b0d25d3e fix include resolved 2023-08-21 03:41:29 -04:00
mbecker20
625de2e38e resolved shoudl be tru 2023-08-21 03:40:40 -04:00
mbecker20
0f7d64350f alert logger 7000. fix alert open when empty 2023-08-21 03:21:42 -04:00
mbecker20
63620f246b update startup log 2023-08-21 03:17:40 -04:00
mbecker20
f416d628ae alert logger 2023-08-21 02:55:13 -04:00
mbecker20
24bf642dfa fix ts 2023-08-21 02:42:33 -04:00
mbecker20
8a0321bf67 alerting should work 2023-08-21 02:40:52 -04:00
mbecker20
5e033e1f76 handle cpu and mem alerts 2023-08-20 22:41:49 -04:00
mbecker20
ec47fa42b8 ListAlerts api 2023-08-20 17:16:20 -04:00
mbecker20
080ecf9fd9 prog on monitoring 2023-08-20 04:29:55 -04:00
mbecker20
f0deda32dc prog on alerting 2023-08-20 03:55:30 -04:00
mbecker20
7b3298e9e3 update aws sdk 2023-08-18 18:06:58 +00:00
mbecker20
f26661459d format periphery request error 2023-08-18 18:04:53 +00:00
mbecker20
b4c0047d45 prog on alerting 2023-08-18 03:12:18 -04:00
karamvir
4e03cf9675 add refresh to logs 2023-08-17 21:03:33 -07:00
karamvir
33eb5fc020 reduce config button gap 2023-08-17 21:03:29 -07:00
karamvir
01d62a6a15 shrink omniar on mobile 2023-08-17 21:03:06 -07:00
karamvir
bae22e0104 update favicon 2023-08-17 21:02:40 -07:00
karamvir
33b49d46ce add standalone display mode 2023-08-17 20:38:48 -07:00
karamvir
c6beda1b8f add pwa icons 2023-08-17 20:38:08 -07:00
karamvir
8faba6ec5b init manifest 2023-08-17 19:47:41 -07:00
karamvir
e2824da846 update builder config 2023-08-17 19:00:12 -07:00
karamvir
b4dcadbf17 add docker accounts and github accounts to builder config 2023-08-17 18:18:30 -07:00
karamvir
e96e1ebb9c update configs 2023-08-17 17:38:49 -07:00
karamvir
b59158d314 split server warnigs by type 2023-08-17 17:24:19 -07:00
karamvir
82588a0f83 split configs into cards within each tab 2023-08-17 17:17:04 -07:00
karamvir
c6c91b6b11 add keys to auto config components 2023-08-17 16:59:29 -07:00
karamvir
a8918156f6 cleanup some styles etc 2023-08-17 16:52:06 -07:00
karamvir
df56123ceb upgrade server stats to shadcn progress 2023-08-17 16:34:11 -07:00
karamvir
76d331ef03 re-cast to any to ensure build 2023-08-17 15:38:02 -07:00
karamvir
71524b2073 cleanup 2023-08-17 15:34:51 -07:00
karamvir
f286efb174 ensure proper number handling by configinput 2023-08-17 15:34:44 -07:00
karamvir
feea9384c1 add security group ids 2023-08-17 15:31:02 -07:00
karamvir
7dfcc9a3b6 update to consts 2023-08-17 15:05:47 -07:00
karamvir
43efd8e60b remove any cast 2023-08-17 15:05:36 -07:00
karamvir
28fce80476 upgrade env component 2023-08-17 15:05:26 -07:00
karamvir
cf4c4664e1 rm console log 2023-08-17 15:05:19 -07:00
karamvir
9a8b2a308d integrate action state to deployment 2023-08-17 15:04:59 -07:00
karamvir
188a70de21 rm conditonal hook call, make consistent 2023-08-17 15:04:43 -07:00
karamvir
5fccd1064f improve types 2023-08-17 15:03:58 -07:00
karamvir
9f58943dfc improve types, remove any cast 2023-08-17 15:03:43 -07:00
karamvir
9111f8b8c7 provide loader for deployment actions 2023-08-17 15:03:28 -07:00
karamvir
89dba2dbc0 improve recently viewed to avoid calling conditionally 2023-08-17 15:03:12 -07:00
karamvir
0b79f125ed fix confirm button onclick behaviour 2023-08-17 10:41:07 -07:00
karamvir
59bc429ce5 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-17 10:36:05 -07:00
karamvir
46db31c695 upgrade environment to textarea 2023-08-17 10:36:02 -07:00
karamvir
c45a617a90 pass classname to config items 2023-08-17 10:34:43 -07:00
karamvir
2982ac051f cleanup main 2023-08-17 10:34:25 -07:00
karamvir
a6b1b385e5 always toast write and exec fails 2023-08-17 10:33:53 -07:00
karamvir
3f8c62c1a4 defer invalidation by 100ms 2023-08-17 10:33:08 -07:00
mbecker20
e31443b285 1.0.1 2023-08-17 01:36:11 -04:00
mbecker20
10beeab4b5 progress on alerting 2023-08-16 23:31:23 -04:00
karamvir
a5fdfc3b67 add log tails 2023-08-16 09:30:11 -07:00
karamvir
73d0e2589b add process args and extra args 2023-08-16 08:45:15 -07:00
karamvir
2c6d2a30d7 add volumes, fix double inputs, etc 2023-08-16 08:34:03 -07:00
karamvir
d0566bf336 add placeholders to doubl input, fix ports congi 2023-08-16 08:14:42 -07:00
karamvir
e93e00f4f7 fix restart and network 2023-08-16 07:55:44 -07:00
karamvir
720252f6ae add keys to tabs 2023-08-16 07:55:37 -07:00
karamvir
43ee9b05a3 fix restart mode selector values, and format names 2023-08-16 07:49:36 -07:00
karamvir
bcfb847412 rm old config stuff 2023-08-15 15:17:00 -07:00
karamvir
402a7a730d Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-15 14:26:20 -07:00
karamvir
5e975a6acd major cleanup and refactor 2023-08-15 14:26:15 -07:00
mbecker20
f1639126df work on alerting logic 2023-08-13 01:54:14 -04:00
karamvir
7aa048a512 refactor accounts selector 2023-08-12 16:39:36 -07:00
karamvir
76b958ed89 add accounts selectors for github and docker 2023-08-12 16:37:39 -07:00
karamvir
d0c8e0bb49 modify to generic account selector, and enable docker accounts 2023-08-12 16:16:25 -07:00
karamvir
7f1df2d24c add github account selector 2023-08-12 16:12:12 -07:00
mbecker20
7331fdc083 move some files around 2023-08-12 01:59:57 -04:00
karamvir
980d675572 cast to any to prevent awful recursive type 2023-08-11 18:08:53 -07:00
karamvir
749df9e554 add image type selector for depl config 2023-08-11 18:06:13 -07:00
karamvir
7bb97dadf4 move config etc 2023-08-11 01:38:56 -07:00
karamvir
a223357ef1 more cleanup 2023-08-11 01:36:34 -07:00
karamvir
9ef1e3cd9b cleanup build config 2023-08-11 01:28:54 -07:00
karamvir
1459b05189 init cleanup build config 2023-08-11 01:21:15 -07:00
karamvir
2c992a0d23 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-11 01:11:25 -07:00
karamvir
f6a712c17b generic resource selector, what a revelation 2023-08-11 01:11:23 -07:00
mbecker20
97c03d97ad implement get builder avaialabel accounts 2023-08-11 03:54:35 -04:00
mbecker20
b80589ea63 get available accounts 2023-08-11 03:49:35 -04:00
mbecker20
4fd3e606fd LaunchServer 2023-08-11 03:17:58 -04:00
karamvir
d1ae925454 give action buttons 150px 2023-08-10 23:36:01 -07:00
karamvir
3e260e4464 delete server 2023-08-10 23:35:21 -07:00
karamvir
b7d841be14 decent build config 2023-08-10 23:31:27 -07:00
karamvir
854ce07e8f add username to toast 2023-08-10 23:10:59 -07:00
mbecker20
5e572ce5ef clean up ws correctly 2023-08-11 02:09:47 -04:00
karamvir
bec8b3ce0f cleanup for build 2023-08-10 22:46:00 -07:00
karamvir
df302015a3 migrate deployment configs 2023-08-10 22:17:28 -07:00
karamvir
fd9c7848c2 fix defaults on variant configs 2023-08-10 21:23:49 -07:00
karamvir
3c6a2e2546 config.... again? 2023-08-10 21:13:55 -07:00
karamvir
af7229acb0 updates 2023-08-10 20:30:54 -07:00
karamvir
e1fbbe7c4e updatse 2023-08-10 18:14:41 -07:00
karamvir
cdc7827b7e config..... again? 2023-08-10 16:43:39 -07:00
mbecker20
f24005e02e generic list item 2023-08-10 02:27:36 -04:00
mbecker20
0ff9a7e50d load migrator config 2023-08-10 01:46:07 -04:00
mbecker20
3ec810ed82 start migrator 2023-08-10 01:37:14 -04:00
karamvir
93fc8f7452 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-09 21:36:42 -07:00
karamvir
172b3daeab init some manual configs 2023-08-09 21:36:39 -07:00
karamvir
1b3e3b19cf rm unnecessary console log 2023-08-09 17:09:54 -07:00
mbecker20
29b31ffa1e create / delete docker network on server api 2023-08-09 01:48:29 -04:00
karamvir
f1aca3df4d add icons to create resource dropdown 2023-08-08 03:01:29 -07:00
karamvir
180ac60d21 add keydown listner to omnibar 2023-08-08 02:56:15 -07:00
karamvir
b22621e3c4 cleanup 2023-08-08 02:39:53 -07:00
karamvir
db1d61f1e3 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-08 02:33:05 -07:00
karamvir
8e0c5b3792 update serach button 2023-08-08 02:32:58 -07:00
karamvir
1b30a8edd4 add theme toggle to header 2023-08-08 02:32:41 -07:00
mbecker20
fe1376a70c SetLastSeenUpdate 2023-08-08 03:52:50 -04:00
karamvir
2dbce8f695 update login page 2023-08-07 00:10:14 -07:00
karamvir
b1700f3d14 update login flow 2023-08-07 00:10:02 -07:00
karamvir
63e3e0b9d3 rm old useuser hooks 2023-08-07 00:09:54 -07:00
karamvir
cdd55ee61d disble builder config for the moment 2023-08-07 00:08:06 -07:00
karamvir
c72050d329 add new builder selection config 2023-08-06 22:23:12 -07:00
mbecker20
9a89b82f83 builder tyeps 2023-08-07 01:10:29 -04:00
mbecker20
37becd9327 make server a builder type 2023-08-07 01:06:20 -04:00
karamvir
d5f2ae7871 cleanup 2023-08-06 21:22:30 -07:00
karamvir
08b74b5304 fix security group ids override 2023-08-06 21:21:28 -07:00
karamvir
6183dbf653 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-06 21:19:46 -07:00
karamvir
939f3db719 always pass type for builder config 2023-08-06 21:19:44 -07:00
karamvir
d3e2ee2974 ensure confirm update closes on click 2023-08-06 21:19:27 -07:00
mbecker20
0b6d955672 client 2023-08-06 23:17:15 -04:00
mbecker20
6884453b0f GetAvailableAccounts / GetAvailableNetworks 2023-08-06 23:13:00 -04:00
mbecker20
0d37600cda send UpdateListItem over ws 2023-08-06 22:57:05 -04:00
mbecker20
c32211aa7a Merge branch 'feat/Resource' into next 2023-08-06 21:40:42 -04:00
mbecker20
e0776952ee use generic Resource struct 2023-08-06 21:40:29 -04:00
karamvir
d92e4a014a improve new resource dialog 2023-08-06 15:10:34 -07:00
karamvir
48fc51a89a add label to builder type selector 2023-08-06 14:47:37 -07:00
karamvir
0af29e146d fix styles 2023-08-06 14:46:03 -07:00
karamvir
4467c64edd builder config is mostly go 2023-08-06 14:44:18 -07:00
karamvir
1314d7744f init builder config 2023-08-06 14:29:26 -07:00
karamvir
9a984e7e41 fix builder configs 2023-08-06 13:52:56 -07:00
karamvir
89e4010566 builder config 2023-08-06 13:44:03 -07:00
karamvir
1ecfc56f19 init builder config 2023-08-06 13:32:38 -07:00
karamvir
579c6eaf77 add volumes config 2023-08-06 13:32:28 -07:00
karamvir
4cecfd1611 improve button text for clarity 2023-08-06 11:24:05 -07:00
karamvir
5a9899c63d add ports config override 2023-08-06 11:23:00 -07:00
karamvir
a1d3c46127 improve state mapping for deployment status icon 2023-08-06 11:19:09 -07:00
karamvir
902f3f7d00 hide logs when deployment not deployed 2023-08-06 10:56:10 -07:00
karamvir
8ef32c50c2 cleanup for build 2023-08-06 10:49:31 -07:00
karamvir
e0d0512777 add image config, confirm update, etc 2023-08-06 10:48:52 -07:00
karamvir
9c73ffd7da fix dom nesting err 2023-08-06 10:48:39 -07:00
karamvir
0e2ca7c0d9 cleanup, add keyed fragment 2023-08-06 10:48:29 -07:00
karamvir
2d8f5ef337 add confirm update dialog 2023-08-06 10:48:09 -07:00
karamvir
a02e9b8318 deployment image config override component 2023-08-06 01:37:26 -07:00
karamvir
4f844f673f init alerter config 2023-08-06 01:20:07 -07:00
karamvir
1a068ed82b add image selector stuff 2023-08-06 01:20:02 -07:00
karamvir
b8df4caf75 fix resource updates 2023-08-06 01:19:32 -07:00
karamvir
11da11c326 fix deployment action button show states 2023-08-05 23:22:56 -07:00
karamvir
3e41a5568f fix omnibar types 2023-08-05 23:14:39 -07:00
karamvir
3f73ee6cff cleanup 2023-08-05 23:14:15 -07:00
karamvir
866137e128 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-05 23:13:35 -07:00
karamvir
66b0ac7d51 add build args and extra args 2023-08-05 23:13:34 -07:00
mbecker20
7ac7edb437 to monitor name create 2023-08-06 01:54:52 -04:00
mbecker20
82ed74429c fix delete resource also remove from user recently viewed 2023-08-06 01:49:12 -04:00
karamvir
417dc7dac1 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-08-05 22:45:19 -07:00
karamvir
999ce044cb add ui for extra args override 2023-08-05 22:45:17 -07:00
mbecker20
8759508c86 update resource flatten set 2023-08-06 01:25:45 -04:00
karamvir
56a40edc66 add alerter and repo cards to recently viewed 2023-08-05 22:17:06 -07:00
karamvir
5021c77e53 ensure all resource cards are equal height 2023-08-05 22:16:34 -07:00
karamvir
3fbe859c28 fix types for resources 2023-08-05 22:14:41 -07:00
karamvir
560e94a232 update builder card 2023-08-05 22:14:29 -07:00
karamvir
d982ccd4d1 update alerter card 2023-08-05 22:14:21 -07:00
karamvir
45b38c23a5 add resource updates to build page 2023-08-05 22:06:40 -07:00
karamvir
442180014d fix missing fields in delpoyment card 2023-08-05 22:04:37 -07:00
karamvir
867a968b5b add proper server region to card 2023-08-05 22:00:51 -07:00
karamvir
334529ab67 improve build config 2023-08-05 21:58:24 -07:00
mbecker20
c66fc2d6de add fields to list resources 2023-08-04 20:43:57 -04:00
mbecker20
5eea95d118 update description api 2023-08-04 04:24:31 -04:00
karamvir
804a215190 server config 2023-08-04 00:29:22 -07:00
karamvir
da6dfc9bcc refactor new config component, apply to server 2023-08-04 00:20:11 -07:00
karamvir
0d4ec308d6 flex responsive for last 3 dashboard resource cards 2023-08-04 00:05:43 -07:00
karamvir
24437e7b10 new config who dis 2023-08-04 00:00:15 -07:00
karamvir
1abaff6625 cleanup for build 2023-08-03 21:57:21 -07:00
karamvir
0c1692866c add operator username to update card 2023-08-03 21:53:25 -07:00
karamvir
d28e9bb51c init new config layout 2023-08-03 21:51:40 -07:00
karamvir
760c5d521b refactor config 2023-08-03 21:48:37 -07:00
karamvir
6ce14ffc80 place servers first on dashboard 2023-08-03 17:22:51 -07:00
karamvir
cc54725011 add new repo, complete resource adding options 2023-08-03 17:04:54 -07:00
karamvir
c1a6f2b957 fix alerter and repo routing, alerter card link 2023-08-03 17:01:17 -07:00
karamvir
5d5578f89f add all resources to omnibar 2023-08-03 16:56:32 -07:00
karamvir
e7bb58397e cleanup for build 2023-08-03 16:19:02 -07:00
karamvir
04690f1949 add clickable escape context to confirm button 2023-08-03 16:18:28 -07:00
karamvir
4df449c724 add confirm button, update actions 2023-08-03 16:10:27 -07:00
karamvir
315be1b61d update config pages 2023-08-03 15:58:42 -07:00
karamvir
4c30fb09ab fix new resource buttons 2023-08-03 15:26:00 -07:00
karamvir
2a24d1d9b7 only display update operation 2023-08-03 15:18:19 -07:00
karamvir
fe771928ee cleanup message registrars 2023-08-03 15:16:04 -07:00
karamvir
768baccea7 refactor create resource button 2023-08-03 15:04:21 -07:00
karamvir
c5a6f06c35 cleanup for build 2023-08-03 14:57:32 -07:00
karamvir
e57a90ca01 improve dashboard, overview cards etc 2023-08-03 14:56:11 -07:00
karamvir
9d95750092 improve layout standardisation, dashboard 2023-08-03 10:50:52 -07:00
karamvir
3c5a218152 refactor add to recently viewed to own hook 2023-08-03 09:20:59 -07:00
karamvir
cf6148eca5 upgrade to new resource component 2023-08-03 00:25:03 -07:00
karamvir
df1945fb38 cleanup files and imports 2023-08-03 00:10:51 -07:00
karamvir
ed93efd1bd add script to regen client 2023-08-02 23:55:44 -07:00
karamvir
2d48073262 init alerters 2023-08-02 23:52:25 -07:00
karamvir
8bba27361f fix responsive 2023-08-02 23:41:31 -07:00
karamvir
de54794506 make server card info text sm 2023-08-02 23:38:29 -07:00
karamvir
0aee4d8466 refactor updates to use new apis 2023-08-02 23:35:42 -07:00
karamvir
1d8e0c7b1d add update username 2023-08-02 23:32:13 -07:00
karamvir
6fbec9fbd7 add event listeners in a useeffect 2023-08-02 23:28:04 -07:00
mbecker20
6d20457f0e GetUpdate 2023-08-03 02:09:34 -04:00
karamvir
73dc18fb78 fix recents hooks 2023-08-02 22:37:34 -07:00
mbecker20
440284663a fix update list nonsense 2023-08-03 01:16:46 -04:00
mbecker20
796793eefd get username and list updates list item 2023-08-03 01:13:59 -04:00
karamvir
260fd572bf improve recents, builder card 2023-08-02 22:09:24 -07:00
mbecker20
d6c03a51b1 add recently viewed max count 2023-08-03 00:54:21 -04:00
mbecker20
fe89c5835c push recently viewed 2023-08-03 00:48:54 -04:00
karamvir
ddcc35ec58 add resource updates to builder 2023-08-02 21:34:47 -07:00
karamvir
0f39497bee add res target type to resource udpates 2023-08-02 21:34:40 -07:00
karamvir
58f1346124 add new builder, builder page, etc 2023-08-02 21:31:38 -07:00
karamvir
997366655c add recently viewed 2023-08-02 21:05:47 -07:00
karamvir
de057fd08f update server card 2023-08-02 21:05:35 -07:00
karamvir
d15607048e reduce gap 2023-08-02 20:40:07 -07:00
karamvir
5460d19f80 hide tags 2023-08-02 20:39:59 -07:00
karamvir
ed4ae5efb8 hide tags 2023-08-02 20:30:15 -07:00
karamvir
530da2589d add 1 to month ffs 2023-08-02 20:29:00 -07:00
karamvir
05b2a16746 copy name when click it 2023-08-02 20:28:49 -07:00
mbecker20
35cbad2177 rename post_image to process_args 2023-08-02 17:05:16 +00:00
karamvir
f13ac340b1 cleanup files 2023-08-02 09:59:12 -07:00
karamvir
14dc5e40fe init refactor 2023-08-01 21:14:39 -07:00
karamvir
4820313158 fix tabs value for logs 2023-08-01 11:35:42 -07:00
karamvir
9d573932f5 update resource cards, deployment logs 2023-08-01 10:42:44 -07:00
karamvir
ab88fbc1bd init confirmation dialog 2023-08-01 08:43:34 -07:00
karamvir
1a806c209b cleanup for build 2023-07-31 21:59:55 -07:00
karamvir
f15540e860 add basic socket toast and invalidate 2023-07-31 21:50:04 -07:00
mbecker20
d1be56923f testss 2023-08-01 00:20:03 -04:00
karamvir
493044f4e4 deployments chart uses summary 2023-07-31 21:02:20 -07:00
mbecker20
d15ca72e07 update should sort by start_ts 2023-07-31 23:53:55 -04:00
karamvir
9a16634c2e add max height to dropdown 2023-07-31 20:50:13 -07:00
karamvir
997940b4ab add omnibar etc 2023-07-31 20:44:00 -07:00
karamvir
fee4585ca4 cleanup for build 2023-07-31 19:40:26 -07:00
karamvir
94c1df79ef improve depl config 2023-07-31 19:37:50 -07:00
karamvir
b4e6626feb init server stats page 2023-07-31 12:29:55 -07:00
mbecker20
b8ad77085c add the configs 2023-07-30 15:51:59 -04:00
mbecker20
750f1fc379 collection name Tag 2023-07-30 14:04:32 -04:00
mbecker20
8b93b5d273 rename system stats collection 2023-07-30 14:02:01 -04:00
mbecker20
2ffbb9d9fa work on monitoring 2023-07-30 13:52:31 -04:00
mbecker20
97180c2f04 server stats history api 2023-07-30 01:08:34 -04:00
mbecker20
a9b9ed2d99 resource to list item 2023-07-29 22:17:13 -04:00
mbecker20
097f6c3b64 new search method 2023-07-29 14:50:35 -04:00
mbecker20
a279982187 implement list resources with ListItem on Resource trait 2023-07-29 14:35:36 -04:00
mbecker20
26c692978a paginated list updates 2023-07-29 04:08:36 -04:00
mbecker20
482f8bb862 build monthly stats and build versions 2023-07-28 01:54:29 -04:00
kv
3ac7cc1a59 cleanup for build 2023-07-27 14:02:59 -07:00
kv
7c5aff44a9 resource update cards now clickable to reveal details 2023-07-27 14:02:32 -07:00
kv
a55d4e3c6f fix deployment update cards 2023-07-27 13:55:48 -07:00
kv
485b337c72 fix update sheet trigger, scrolling 2023-07-27 13:55:43 -07:00
kv
02f16168d3 cleanup for build 2023-07-27 13:42:32 -07:00
kv
bd42af9c3e update deployment page, more to come 2023-07-27 13:41:44 -07:00
karamvir
ce5a5a6e50 Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-07-27 12:55:40 -07:00
karamvir
bf893ca8b4 first draft of updates and config pages 2023-07-27 12:55:38 -07:00
mbecker20
20a450d1d4 alerter api ts. fmt 2023-07-27 03:03:03 -04:00
mbecker20
70d11c3f5d replace those helpers with Resource trait 2023-07-27 03:00:36 -04:00
mbecker20
dc7690f4cf Config 2023-07-26 03:36:27 -04:00
karamvir
603260df5d Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-07-25 22:29:05 -07:00
karamvir
51bdc6b221 cleanup unnecessary type cast 2023-07-25 22:29:04 -07:00
mbecker20
4f6a9545e9 builder and repo summary 2023-07-26 00:59:20 -04:00
mbecker20
14edb4f913 finalize create build update before add 2023-07-25 21:06:11 -04:00
mbecker20
ffe0459994 remove solid snippets 2023-07-25 19:56:19 -04:00
karamvir
5138ce2040 cleanup 2023-07-25 16:45:34 -07:00
karamvir
db85de0722 update cards, fix responsive layouts 2023-07-25 16:44:43 -07:00
karamvir
082a76bfb7 improve updates 2023-07-25 16:06:37 -07:00
karamvir
9a3b160e62 improve cards, add tags and accordion 2023-07-25 14:27:48 -07:00
karamvir
c8b312bcb1 update cards etc 2023-07-25 13:23:55 -07:00
karamvir
9cc3fa0897 improve updates 2023-07-25 12:24:36 -07:00
karamvir
49063cac26 cleanup 2023-07-25 11:58:18 -07:00
karamvir
59a06c84a1 consolidate page layouts, cleanup 2023-07-25 11:56:57 -07:00
karamvir
6f5a7b97b5 update use read to now take config as well 2023-07-25 10:23:58 -07:00
karamvir
0db97fd5d5 refactor useRead, improve dx alot 2023-07-25 02:39:15 -07:00
karamvir
b31f74f0c5 cleanup for build 2023-07-25 01:27:43 -07:00
karamvir
ffb3957d4f playing around with new resource page, etc 2023-07-25 01:20:28 -07:00
karamvir
3b46e9a6bb Merge branch 'next' of https://github.com/mbecker20/monitor into next 2023-07-25 00:42:38 -07:00
karamvir
b67cb3bd9e fix dockerfile 2023-07-25 00:42:36 -07:00
mbecker20
91a6485c20 add other status cases for summaries 2023-07-25 02:53:34 -04:00
mbecker20
da24634774 implement the summaries 2023-07-25 02:51:46 -04:00
karamvir
88198f7696 add summary stuff, cleanup vite build errors 2023-07-24 23:41:34 -07:00
mbecker20
2bdc7cd39a list updates for admin 2023-07-25 02:33:48 -04:00
karamvir
ec4fcbc553 add summaries 2023-07-24 23:20:14 -07:00
mbecker20
30820a8d81 add summary response types 2023-07-24 23:10:29 -04:00
mbecker20
1c439a0392 add summary and update api 2023-07-24 22:31:38 -04:00
mbecker20
d6fe3461f7 get resource summaries 2023-07-24 22:28:47 -04:00
mbecker20
18a211592d fix default url 2023-07-24 21:39:11 -04:00
karamvir
8396c04b4a ooooo thats a sexy dashboard 2023-07-24 16:51:20 -07:00
karamvir
872254e865 cleanup header layout 2023-07-24 15:43:45 -07:00
karamvir
2a8fb84dc3 cleanup 2023-07-24 13:56:58 -07:00
karamvir
008dab08f8 normalise resources, better card hoverability 2023-07-24 13:55:07 -07:00
karamvir
781a3958d8 add template for deployment info 2023-07-24 11:37:19 -07:00
karamvir
ec344cafb5 improve styles, actions 2023-07-24 10:30:50 -07:00
karamvir
ec20b288be improve execute hooks, cleanup a bit etc 2023-07-24 09:24:02 -07:00
karamvir
9206e00a99 refactor component layout 2023-07-24 09:07:32 -07:00
karamvir
438a172e77 improve hoverable prop behavior, add link to /builds on dashboard 2023-07-24 01:24:50 -07:00
karamvir
8aaa559fbb slightly improve login / logout 2023-07-24 01:18:35 -07:00
karamvir
79b3d30c1e fixes 2023-07-24 01:09:49 -07:00
karamvir
3c5293b60b add actions, refactor deployments 2023-07-24 00:59:17 -07:00
karamvir
d16da528af fixes 2023-07-24 00:34:00 -07:00
karamvir
6407a3a06d add builds page, build page, etc 2023-07-24 00:21:20 -07:00
karamvir
145804c05d add deployments back to header, fix pie variants 2023-07-23 23:53:54 -07:00
karamvir
7b5da511fe ok better dashboard 2023-07-23 23:49:19 -07:00
karamvir
cc65d5ac23 add servers list page, upgrade server card with stats 2023-07-23 23:38:29 -07:00
karamvir
6c41362bfa add single deployment page etc 2023-07-23 23:31:19 -07:00
karamvir
b57688033e add some basic dashboard components, add server page 2023-07-23 16:08:02 -07:00
mbecker20
9c120118e9 fix types imports 2023-07-22 12:30:52 -04:00
mbecker20
70ce321c75 remove server to_notify 2023-07-22 11:42:46 -04:00
mbecker20
ed3df2beba fix 2 responses 2023-07-22 11:25:38 -04:00
mbecker20
613d445c99 vite config resolve monitor client 2023-07-22 02:53:21 -04:00
mbecker20
5cc3885e32 fix the ts client 2023-07-22 02:25:02 -04:00
mbecker20
7e66eca6ae Merge branch 'next-fe' into next 2023-07-21 23:16:57 -04:00
Karamvir
eef1c3f41d init frontend 2023-07-20 14:36:01 -07:00
Karamvir
09defb60a5 update client 2023-07-20 14:35:51 -07:00
mbecker20
4fd931c508 add cors layer 2023-07-20 21:02:14 +00:00
mbecker20
9b656a13e6 add container log search via | grep 2023-07-18 18:41:53 +00:00
mbecker20
e95fe8f410 fix doc indexes 2023-07-17 06:14:16 +00:00
mbecker20
91628c61ee update tests 2023-07-17 05:18:51 +00:00
mbecker20
0747bc9bc1 env override config port 2023-07-17 04:40:57 +00:00
mbecker20
2df881d43f include commit msg in commit hash log 2023-07-16 22:51:23 -04:00
mbecker20
cd10eaf9db smth 2023-07-16 22:39:55 -04:00
mbecker20
e0f69a6420 work on monitoring 2023-07-16 22:24:44 -04:00
mbecker20
e8828ed74b update slack version 2023-07-16 12:21:03 -04:00
mbecker20
ce29e0685c move helpers around 2023-07-16 15:38:01 +00:00
mbecker20
6c7dda2b67 alert enum and send alert logic 2023-07-16 05:51:45 +00:00
mbecker20
7694e3f565 update ts lib 2023-07-15 04:21:10 -04:00
mbecker20
b8c9731081 work on ts client 2023-07-15 04:14:46 -04:00
mbecker20
14ec8b2bcb Merge branch 'temp' into next 2023-07-15 03:54:58 -04:00
mbecker20
e9d7543c43 tag crud 2023-07-15 03:54:49 -04:00
mbecker20
311e090826 update update after build clone 2023-07-14 06:37:18 +00:00
mbecker20
1c1172e0e7 fix core dockerfile 2023-07-14 06:24:22 +00:00
mbecker20
2a1b719f67 move core, periphery, tests under /bin 2023-07-14 06:06:16 +00:00
mbecker20
9b9cf8ad9f add commit hash to clone log 2023-07-14 04:32:38 +00:00
mbecker20
946f427c4b standard logger 2023-07-13 23:42:28 -04:00
beckerinj
100f34a32b pretty debug print error 2023-07-13 17:12:24 -04:00
beckerinj
7a9b506958 update mungos version, remove created_at (its in _id) 2023-07-13 16:34:03 -04:00
mbecker20
4cd074ee2a implement cancel build 2023-07-13 08:24:47 +00:00
mbecker20
e2a76274ef update partial derive version 2023-07-12 08:24:00 +00:00
beckerinj
aa197bd89e cleanup 2023-07-11 14:55:19 -04:00
beckerinj
fde1165408 update startup log 2023-07-11 14:54:51 -04:00
beckerinj
731b91e0a9 startup log after logger init 2023-07-11 14:46:22 -04:00
mbecker20
60205ece9d use partial derive to impl Froms 2023-07-11 03:12:52 -04:00
mbecker20
6b8fc5d624 alerter api 2023-07-11 03:03:19 -04:00
mbecker20
54df0af48e implement some alerter crud 2023-07-10 05:10:13 +00:00
mbecker20
f1101cae5a update mungos & fmt 2023-07-09 18:26:03 +00:00
mbecker20
f7a1edb7e5 update mungos to 0.4.10 2023-07-09 18:11:19 +00:00
mbecker20
4e7311bdfe define more api for alerter / tag 2023-07-09 06:03:21 +00:00
mbecker20
818210805f create alerter resource 2023-07-09 05:46:20 +00:00
mbecker20
0e1026a100 more dynamic mongo info loading from config 2023-07-08 05:54:12 +00:00
mbecker20
553a2e3dad create the example configs 2023-07-08 03:17:50 +00:00
mbecker20
c12eb40459 add api to search resources by tag 2023-07-07 06:55:47 +00:00
mbecker20
9b68283eb3 improve typeshare types 2023-07-06 20:54:27 +00:00
mbecker20
e9ee467940 derive EnumVariant enum 2023-07-06 20:30:27 +00:00
mbecker20
91f1de5d95 change tags api, add tags requests, get list items 2023-07-06 06:06:14 +00:00
mbecker20
6ce6b55b68 get server / deployment status from cache 2023-07-05 23:48:07 +00:00
mbecker20
d0b5e3e241 split api into read, write, execute 2023-07-04 05:34:13 +00:00
mbecker20
f16176f298 get build action states 2023-07-04 03:56:48 +00:00
mbecker20
2dc4d41524 move github listener to potentially add more listeners 2023-07-04 03:44:45 +00:00
mbecker20
2bf2901dcd move clients 2023-07-04 03:20:11 +00:00
mbecker20
99c2d6195d crud for builder 2023-07-04 00:02:27 +00:00
mbecker20
c5b1079be9 repo api 2023-07-03 22:12:54 +00:00
mbecker20
c25e05fde7 work on repo api 2023-07-03 20:09:44 +00:00
mbecker20
c6290cb7f6 start repo crud / actions 2023-07-03 07:05:01 +00:00
mbecker20
b08d5090a2 github build listener 2023-07-03 06:48:58 +00:00
mbecker20
3da824b4c3 finish permissions routes 2023-07-03 06:27:44 +00:00
mbecker20
9a1619c554 test periphery api 2023-07-03 06:09:30 +00:00
mbecker20
312f7a9003 work on permissions api 2023-07-03 01:11:36 -04:00
mbecker20
a197948837 change bin name for monitor core 2023-07-02 07:58:54 +00:00
mbecker20
765d510127 some more core routes 2023-07-02 07:49:13 +00:00
mbecker20
b5179e443b setup some more deployment reqs 2023-06-30 05:41:32 +00:00
mbecker20
fe6c371a9f fmt. copy deployment / build 2023-06-30 05:29:15 +00:00
mbecker20
d66c5ed618 ts client 2023-06-30 04:52:08 +00:00
mbecker20
70ba120cfb ts client 2023-06-29 07:40:35 +00:00
mbecker20
33f1d85194 work on typescript client 2023-06-29 06:55:32 +00:00
mbecker20
e3f1b449e2 add the deployment actions 2023-06-28 12:16:25 -04:00
beckerinj
7e580efc77 add temp monitoring stuff and start of builder crud 2023-06-25 02:38:16 -04:00
mbecker20
b33501cce6 implement basic deployment api 2023-06-24 07:21:15 +00:00
mbecker20
402259ef11 implement build request 2023-06-24 00:42:14 +00:00
mbecker20
2d4c682fac work on Aws builds / RunBuild 2023-06-23 08:07:35 +00:00
mbecker20
2affc5f555 work on the state change monitoring 2023-06-22 07:57:25 +00:00
mbecker20
9ce1dded38 crud for deployment and build 2023-06-22 06:39:57 +00:00
mbecker20
383b0d125c implement some deployment crud 2023-06-20 08:02:48 +00:00
mbecker20
8023710702 make deployment request resolver file 2023-06-20 07:23:20 +00:00
mbecker20
82c0cbc8bd refactor api router lcoation 2023-06-20 07:20:44 +00:00
mbecker20
893a7f22a4 refactor api router out of main 2023-06-20 07:15:53 +00:00
mbecker20
b903e511b5 finish server api 2023-06-20 07:13:14 +00:00
mbecker20
86379d1c79 rename server 2023-06-19 08:03:58 +00:00
mbecker20
6320d6fbeb implement the ws 2023-06-19 07:34:36 +00:00
mbecker20
636023167d start implementing core server api 2023-06-18 21:12:26 +00:00
mbecker20
8a86825357 fix useless conversion 2023-06-18 17:55:58 +00:00
mbecker20
f5d68b4d78 add api secret create / delete methods 2023-06-18 17:55:37 +00:00
mbecker20
7295e656eb fix dockerfile 2023-06-18 06:18:52 +00:00
mbecker20
5e51050fa3 initialize monitor client 2023-06-18 06:15:31 +00:00
mbecker20
c510a64207 add dockerfile 2023-06-18 05:54:21 +00:00
mbecker20
7269050cf4 auth should work 2023-06-18 05:49:32 +00:00
mbecker20
5730e5d2ae implement db and some auth functions 2023-06-18 05:07:05 +00:00
mbecker20
76ddbe1869 start working on core 2023-06-16 06:20:26 +00:00
mbecker20
b5ef174312 finish basic periphery resolvers 2023-06-15 08:02:51 +00:00
mbecker20
31007f3d78 template the entities 2023-06-11 17:51:46 +00:00
mbecker20
c5a66709ad partial derive working 2023-06-11 17:49:14 +00:00
mbecker20
aa4815b01d restructure and add more requests 2023-06-11 05:48:46 +00:00
mbecker20
7d6b975592 implement resolver api 2023-06-10 22:34:42 +00:00
mbecker20
3a57712dd2 add periphery client to core deps 2023-06-10 08:42:25 +00:00
mbecker20
62eb35340e move periphery api requests to seperate crate 2023-06-10 08:41:34 +00:00
mbecker20
e9880d00ab resolve request 2023-06-10 08:15:07 +00:00
mbecker20
73c1792fea implement the periphery stats api 2023-06-10 07:14:08 +00:00
mbecker20
d36f399d08 rename core AppState to State 2023-06-09 15:50:37 +00:00
mbecker20
c8a24dbcbc work to make backend Req -> Res typesafe using Resolve trait 2023-06-09 15:50:13 +00:00
mbecker20
fecc5134ad resolver trait 2023-06-09 09:01:55 +00:00
mbecker20
8ad84a8cec rework the stats routes 2023-06-09 08:31:46 +00:00
mbecker20
51ebee2910 add types for system stats 2023-06-08 16:17:48 +00:00
mbecker20
20d496e617 implement the periphery request guard 2023-06-08 06:38:36 +00:00
mbecker20
44a16dd214 init monitor next (v5) 2023-06-07 07:30:50 +00:00
mbecker20
4695b79b73 delete everything for next rewrite 2023-06-07 04:59:00 +00:00
mbecker20
9c0be07ae1 refactor caching to use custom Cache struct 2023-05-28 08:31:18 +00:00
mbecker20
ab945aadde fix set termination timeout 0 2023-05-25 20:18:39 +00:00
mbecker20
c1c461c273 add command crud and run api 2023-05-24 07:45:52 +00:00
mbecker20
336742ee69 update author and clap args 2023-05-24 05:28:46 +00:00
beckerinj
405dacce1c sanitize container logs for any script tags 2023-05-13 02:23:05 -04:00
beckerinj
9acd45aa93 fix log whitespace non preservation issue 2023-05-13 02:14:14 -04:00
beckerinj
c889c2cc03 clean up log component imports 2023-05-13 01:49:06 -04:00
mbecker20
7ac91ef416 view images on server 2023-05-12 07:22:56 +00:00
mbecker20
8e28669aa1 potentially fix deployment update getting crossed with another deployment 2023-05-09 21:17:10 +00:00
mbecker20
6cdb91f8b8 more readable container state in header 2023-05-04 01:12:55 +00:00
mbecker20
e892474713 modify create deployment initializer 2023-05-03 21:03:57 +00:00
mbecker20
abdae98816 core handle term signal 2023-05-03 20:02:37 +00:00
mbecker20
ab4fe49f33 deployment / build config reset 2023-05-03 19:31:59 +00:00
mbecker20
1ace35103b fix ansi-to-html install 2023-05-03 07:21:38 +00:00
beckerinj
dbee729eee show ansi colors in the logs correctly 2023-05-03 03:13:42 -04:00
mbecker20
792576ce59 add auto redeploy user 2023-05-02 17:21:25 +00:00
mbecker20
a07624e9b9 0.3.4 fix docker stop --signal on older docker versions 2023-05-01 21:03:25 +00:00
mbecker20
bb8054af8a log version first 2023-05-01 08:43:00 +00:00
mbecker20
7738f3e066 core logs version on startup 2023-05-01 08:34:20 +00:00
mbecker20
5dee16a100 0.3.3 add default term signal and timeout to deployment 2023-05-01 08:28:12 +00:00
mbecker20
35f3bcdf2f update core version 2023-05-01 03:05:51 +00:00
mbecker20
130ca8e1f1 bump versions to 0.3.2 2023-05-01 01:53:58 +00:00
mbecker20
ced4c21688 update monitor client to 0.3.1 2023-05-01 01:41:23 +00:00
mbecker20
6ec7078024 custom termination signals 2023-04-30 06:52:27 +00:00
mbecker20
b28d8f2506 update frontend types 2023-04-30 03:26:01 +00:00
mbecker20
c88a9291a0 support auto redeploy and custom stop signals 2023-04-30 00:10:59 +00:00
mbecker20
1e82d19306 build summary defaults to time view 2023-04-21 16:34:03 +00:00
mbecker20
dd87e50cb2 build stats summary 2023-04-21 08:52:17 +00:00
mbecker20
4c8f96a30f build stats card 2023-04-21 08:08:15 +00:00
mbecker20
c4f45e05f1 finish build stats api 2023-04-21 08:08:01 +00:00
mbecker20
6aa382c7c1 finish build stats api 2023-04-20 16:34:14 +00:00
mbecker20
ccb9f059e6 get build stats api 2023-04-20 07:34:49 +00:00
mbecker20
1cdcea0771 start on route to get daily build stats (time, count) 2023-04-19 07:02:21 +00:00
mbecker20
88dda0de80 update rename deployment to check whether deployment has repo attached, and if so, reclone it to account for name change. 2023-04-19 06:44:58 +00:00
mbecker20
30ed99e2b0 publish monitor client 0.3.1 with Readme 2023-04-18 07:56:30 +00:00
mbecker20
e5953b7541 monitor client readme 2023-04-18 07:55:05 +00:00
mbecker20
1f9d01c59f new home servers png 2023-04-18 06:31:23 +00:00
mbecker20
cc5210a3d8 fix server children add new button 2023-04-18 06:17:42 +00:00
mbecker20
26559e2d3b delete builds screenshots 2023-04-18 03:20:20 +00:00
mbecker20
7eeddb300f add link to screenshots docsite 2023-04-18 02:59:37 +00:00
mbecker20
1e01bae16b add screenshots to monitor readme 2023-04-18 02:47:46 +00:00
mbecker20
87c03924e5 remove second universal search 2023-04-18 02:46:46 +00:00
mbecker20
f0998b1d43 add universal search screenshot 2023-04-18 02:42:21 +00:00
mbecker20
1995a04244 add screenshots 2023-04-18 02:38:07 +00:00
mbecker20
420fe6bcd5 add build time to version selector 2023-04-17 08:26:34 +00:00
mbecker20
d4e26c0553 fix docker repo reference 2023-04-16 19:33:28 +00:00
mbecker20
5f5e7cb45e add note about oauth 2023-04-16 07:44:34 +00:00
beckerinj
8aa0304738 core setup doc 2023-04-16 03:32:06 -04:00
beckerinj
8ec98c33a4 first user is auto enabled and made admin 2023-04-16 03:23:30 -04:00
beckerinj
2667182ca3 update core example config 2023-04-16 02:49:00 -04:00
mbecker20
1cd0018b93 0.3.0 check whether pre_build / on_clone / on_pull are non empty before running 2023-04-14 22:58:10 +00:00
beckerinj
359789ee29 update aws sdk version 2023-04-14 12:52:52 -04:00
mbecker20
e79c860c0f make update hover 2023-04-14 15:06:05 +00:00
mbecker20
765f53f30e types doc 2023-04-14 06:14:33 +00:00
beckerinj
3c3c21d7f5 move header to top. redesign build 2023-04-14 01:57:10 -04:00
mbecker20
eb700cb500 improve search functionality 2023-04-14 03:53:21 +00:00
beckerinj
b3b723a717 build show unknown if builds[id] cannot be found 2023-04-13 11:19:51 -04:00
beckerinj
555c230d2e update menu stays open through updates 2023-04-13 03:07:31 -04:00
beckerinj
adf4b97aef lots of api docs 2023-04-13 02:23:40 -04:00
beckerinj
32c38d796b docs 2023-04-12 18:19:13 -04:00
beckerinj
c8829e15ed get most of servers api docs 2023-04-10 17:58:52 -04:00
beckerinj
453df417d0 finish deployment api doc 2023-04-10 04:39:47 -04:00
mbecker20
02a7741a9c don't show group editing ui if user doesn't have permissions 2023-04-09 22:55:30 +00:00
mbecker20
96fc5b0ca8 group page / edit work with non admin users 2023-04-09 22:38:36 +00:00
mbecker20
b13e624a66 support manage user permissions on groups 2023-04-09 19:24:07 +00:00
beckerinj
6a8f66f272 work on deployment api docs 2023-04-09 02:15:08 -04:00
mbecker20
0c638a08fd fix problems with build config page breaking 2023-04-07 20:15:21 +00:00
mbecker20
b07f8af8e5 deployment extra args fix as well 2023-04-07 19:50:16 +00:00
mbecker20
3bbb2a985f extra args frontend needs to account for when they don't exist 2023-04-07 19:50:06 +00:00
beckerinj
afdf71c545 work on api docs 2023-04-07 11:12:44 -04:00
beckerinj
8de8d2df9a work on API docs 2023-04-06 01:03:01 -04:00
mbecker20
1dffdbddc2 sort by ts increasing 2023-04-05 18:40:12 +00:00
mbecker20
11fff633b0 don't use $mod in stats query 2023-04-05 18:17:56 +00:00
mbecker20
61bc44d1f4 add hover class to home tree build 2023-04-04 21:18:13 +00:00
beckerinj
e8fabb8cfa Update index.mdx 2023-04-03 16:40:15 -04:00
mbecker20
7a50885847 exit group view with Escape in addition to ArrowLeft 2023-04-03 16:35:44 +00:00
beckerinj
6239da45f4 delete old docs and provide link to docsite 2023-04-03 11:28:50 -04:00
mbecker20
af597eb3c7 clean up builds 2023-04-03 05:55:37 +00:00
mbecker20
d66cda068c connect up delete group 2023-04-03 05:52:58 +00:00
beckerinj
91fcd07c1c make group behavior more sensible frontned 2023-04-03 01:50:45 -04:00
mbecker20
85aa470da1 publish docs 2023-04-03 02:48:00 +00:00
mbecker20
6f0d5f37a5 update home page description for sso 2023-04-02 22:30:37 +00:00
beckerinj
1b4d604404 deploymentBranch 2023-04-02 18:22:04 -04:00
beckerinj
a7f6cbe0b9 small docsite fixes 2023-04-02 12:44:35 -04:00
beckerinj
9cf28bf123 improve docs 2023-04-02 04:15:06 -04:00
beckerinj
c92e04294a monitor docs site working 2023-04-02 04:03:01 -04:00
beckerinj
36f059b455 docsite 2023-04-02 01:40:56 -04:00
mbecker20
4aac301852 0.2.14 only try to merge files, filter nested directories 2023-04-01 19:35:04 +00:00
mbecker20
b375708bbd 0.2.13 support config directories 2023-04-01 19:02:43 +00:00
mbecker20
10b6a9482b update aws sdk verison and implement merge_config_files 2023-04-01 07:06:17 +00:00
mbecker20
84d45c5df8 0.2.12 fix docker build command interp 2023-03-31 18:06:18 +00:00
mbecker20
c6559814b1 frontend for docker build extra args and use buildx 2023-03-31 17:31:39 +00:00
mbecker20
c8c080183f remove publish for cli 2023-03-31 17:04:15 +00:00
mbecker20
597b67f799 0.2.11 support buildx and arbitrary extra args 2023-03-31 17:03:38 +00:00
mbecker20
ec52d5f422 support docker buildx build and passing arbitrary extra args 2023-03-31 16:57:02 +00:00
mbecker20
34806304d6 add center menu title bottom border and adjust copy menu 2023-03-31 05:41:35 +00:00
beckerinj
87953d5495 menu padding 2rem 2023-03-31 01:27:17 -04:00
beckerinj
b6c7c80c95 full width input for copy menu 2023-03-31 01:26:19 -04:00
beckerinj
77e568d5c3 small 2023-03-27 12:41:59 -04:00
mbecker20
699fc51cf7 link to build if click on image deployment header 2023-03-27 15:30:11 +00:00
mbecker20
21029c90b7 info page on stats page 2023-03-27 05:13:12 +00:00
mbecker20
6b0530eb7f brush up server stats page 2023-03-26 23:15:58 +00:00
beckerinj
f7061c7225 toggle to show absolutes for mem and disk stat graphs 2023-03-26 18:47:21 -04:00
mbecker20
750f698369 updates page 2023-03-26 02:20:39 +00:00
mbecker20
ec5ef42298 add max height / scrolling to copy menu target selector 2023-03-24 00:45:47 +00:00
beckerinj
46820b0044 increase the tab title padding 2023-03-23 20:36:31 -04:00
beckerinj
425a6648f7 improve summary styling 2023-03-23 03:13:19 -04:00
mbecker20
349fc297ce 0.2.10 add renaming functionality 2023-03-22 20:33:26 +00:00
mbecker20
5ad87c03ed show none when none 2023-03-22 07:16:04 +00:00
mbecker20
d16006f28f improve design 2023-03-22 07:03:28 +00:00
beckerinj
7f0452a5f5 improve pie chart home page 2023-03-22 02:59:29 -04:00
mbecker20
c605b2f6fc implement pie chart summary 2023-03-22 06:41:57 +00:00
beckerinj
6c2d8a8494 unnecessary import 2023-03-21 23:10:01 -07:00
mbecker20
874691f729 add a pie chart component 2023-03-21 09:44:00 +00:00
beckerinj
cdf702e17d orange 2023-03-21 00:52:25 -07:00
mbecker20
25fdb32627 rename deployments 2023-03-19 08:14:54 +00:00
mbecker20
e976ea0a3a improve the behavior 2023-03-17 20:55:37 +00:00
mbecker20
34e6b4fc69 rename server working 2023-03-17 20:40:19 +00:00
mbecker20
a2d77567b3 dont need to 'to_monitor_name' servers 2023-03-15 07:35:54 +00:00
mbecker20
ecb460f9b5 add rename deployment to monitor client 2023-03-14 20:15:27 +00:00
mbecker20
63444b089c rename deployment func 2023-03-12 23:36:20 +00:00
mbecker20
c787984b77 initialize mongo with builder 2023-03-12 22:03:31 +00:00
mbecker20
bf3d03e801 fix problem of repeated query for docker accounts, secrets, etc 2023-03-12 05:07:55 +00:00
mbecker20
bc2e69b975 use resource to load stuff 2023-03-12 03:45:49 +00:00
mbecker20
7b94fcf3da 0.2.9 finish implement secret helpers on frontend 2023-03-12 00:48:18 +00:00
mbecker20
9cf03b8b88 add route to get available secret keys 2023-03-12 00:16:03 +00:00
mbecker20
a288edcf61 0.2.8 implement secret interpolation on builds and deployments 2023-03-11 23:34:17 +00:00
mbecker20
89cc18ad37 update tokio version 2023-03-10 19:27:40 +00:00
mbecker20
ffa3b671e1 change default alerting thresholds 2023-03-09 07:08:38 +00:00
beckerinj
f32eeb413b add label to home sort by 2023-03-08 16:10:23 -05:00
mbecker20
b5a5103cfc move core dockerfile 2023-03-08 18:26:42 +00:00
mbecker20
c5697e59f3 delete sample file 2023-03-08 18:24:15 +00:00
mbecker20
f030667ff4 update image in deployment header as well 2023-03-07 17:41:00 +00:00
mbecker20
e9fef5d97c change get_deployment_deployed_version to 'unknown' if not known 2023-03-07 17:39:59 +00:00
beckerinj
f5818ac7ea actually return image 2023-03-07 12:37:45 -05:00
mbecker20
c85ab4110d show derived image is container.image is sha256: 2023-03-07 16:30:59 +00:00
mbecker20
9690ea35b8 make description text area larger 2023-03-07 08:44:31 +00:00
mbecker20
6300c8011b fix modify global user permissions operator - make operator the admin, instead of the target 2023-03-06 17:09:12 +00:00
mbecker20
97f582b381 customizable page title 2023-03-06 02:07:08 +00:00
mbecker20
5135a9c228 show server name under deployment on admin user manage page 2023-03-06 01:46:40 +00:00
mbecker20
b7d1212a82 make resources links in account page 2023-03-05 21:50:15 +00:00
mbecker20
7d9d0a9fc4 add view of resources you can access on account page 2023-03-05 21:42:11 +00:00
beckerinj
ed9aef4321 add resources to account page 2023-03-05 16:33:40 -05:00
mbecker20
0aa638bdf4 only do daily update if servers not empty 2023-03-05 20:19:06 +00:00
mbecker20
0ec39d793d one page to view all permissions for user 2023-03-05 09:24:59 +00:00
mbecker20
5579ba869c v0.2.7 remove passkeys from periphery startup log 2023-03-03 17:27:42 +00:00
mbecker20
210940038c hide passkeys on periphery startup config log 2023-03-03 17:24:28 +00:00
mbecker20
98a1a60362 /home/ubuntu/example 2023-03-03 08:15:17 +00:00
mbecker20
86cf9116ba update builds and deployments docs with link to file paths doc 2023-03-03 08:09:41 +00:00
mbecker20
8b2defe0d9 add doc about file paths 2023-03-03 07:58:09 +00:00
mbecker20
50b14b3ce5 0.2.6 store ami name instead of ami_id (because the id has to change sometimes) 2023-03-03 07:11:55 +00:00
820 changed files with 86819 additions and 32690 deletions

View File

@@ -1,4 +1,8 @@
/target
/config_example
config.*
.env
readme.md
typeshare.toml
LICENSE
*.code-workspace
*/node_modules
*/dist

21
.gitignore vendored
View File

@@ -1,12 +1,9 @@
target
/frontend/build
node_modules
dist
.env
.env.development
repos
config.json
config.toml
secrets.json
secrets.toml
target
/frontend/build
node_modules
/lib/ts_client/build
dist
.env
.env.development
creds.toml
core.config.toml

25
.vscode/resolver.code-snippets vendored Normal file
View File

@@ -0,0 +1,25 @@
{
"resolve": {
"scope": "rust",
"prefix": "resolve",
"body": [
"impl Resolve<${1}, User> for State {",
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
"\t\ttodo!()",
"\t}",
"}"
]
},
"static": {
"scope": "rust",
"prefix": "static",
"body": [
"fn ${1}() -> &'static ${2} {",
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
"\t${3}.get_or_init(|| {",
"\t\t${0}",
"\t})",
"}"
]
}
}

View File

@@ -1,64 +0,0 @@
{
"component": {
"scope": "typescriptreact,javascriptreact",
"prefix": "comp",
"body": [
"import { Component } from \"solid-js\";",
"",
"const ${1:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
"\treturn (",
"\t\t<div>",
"\t\t\t${0}",
"\t\t</div>",
"\t);",
"}",
"",
"export default ${1:$TM_FILENAME_BASE};"
]
},
"component-with-css": {
"scope": "typescriptreact,javascriptreact",
"prefix": "css-comp",
"body": [
"import { Component } from \"solid-js\";",
"import s from \"./${1:$TM_FILENAME_BASE}.module.scss\";",
"",
"const ${2:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
"\treturn (",
"\t\t<div class={s.${2:$TM_FILENAME_BASE}} >",
"\t\t\t${0}",
"\t\t</div>",
"\t);",
"}",
"",
"export default ${2:$TM_FILENAME_BASE};"
]
},
"context": {
"scope": "typescriptreact,javascriptreact",
"prefix": "provider",
"body": [
"import { ParentComponent, createContext, useContext } from \"solid-js\";",
"",
"const value = () => {",
"\treturn {};",
"}",
"",
"export type Value = ReturnType<typeof value>;",
"",
"const context = createContext<Value>();",
"",
"export const Provider: ParentComponent<{}> = (p) => {",
"\treturn (",
"\t\t<context.Provider value={value()}>",
"\t\t\t{p.children}",
"\t\t</context.Provider>",
"\t);",
"}",
"",
"export function useValue() {",
"\treturn useContext(context) as Value;",
"}"
]
}
}

97
.vscode/tasks.json vendored
View File

@@ -24,14 +24,14 @@
"label": "start dev",
"dependsOn": [
"run core",
"yarn: start frontend"
"start frontend"
],
"problemMatcher": []
},
{
"type": "shell",
"command": "yarn start",
"label": "yarn: start frontend",
"label": "start frontend",
"options": {
"cwd": "${workspaceFolder}/frontend"
},
@@ -44,7 +44,7 @@
"command": "run",
"label": "run core",
"options": {
"cwd": "${workspaceFolder}/core"
"cwd": "${workspaceFolder}/bin/core"
},
"presentation": {
"group": "start"
@@ -55,24 +55,7 @@
"command": "run",
"label": "run periphery",
"options": {
"cwd": "${workspaceFolder}/periphery"
}
},
{
"type": "shell",
"command": "cargo install --path . && if pgrep periphery; then pkill periphery; fi && periphery --daemon --config-path ~/.monitor/local.periphery.config.toml",
"label": "run periphery daemon",
"options": {
"cwd": "${workspaceFolder}/periphery"
},
"problemMatcher": []
},
{
"type": "cargo",
"command": "run",
"label": "run cli",
"options": {
"cwd": "${workspaceFolder}/cli"
"cwd": "${workspaceFolder}/bin/periphery"
}
},
{
@@ -80,14 +63,14 @@
"command": "run",
"label": "run tests",
"options": {
"cwd": "${workspaceFolder}/tests"
"cwd": "${workspaceFolder}/bin/tests"
}
},
{
"type": "cargo",
"command": "publish",
"args": ["--allow-dirty"],
"label": "publish monitor types",
"label": "publish types",
"options": {
"cwd": "${workspaceFolder}/lib/types"
}
@@ -95,76 +78,14 @@
{
"type": "cargo",
"command": "publish",
"label": "publish monitor client",
"label": "publish rs client",
"options": {
"cwd": "${workspaceFolder}/lib/monitor_client"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish monitor cli",
"options": {
"cwd": "${workspaceFolder}/cli"
"cwd": "${workspaceFolder}/lib/rs_client"
}
},
{
"type": "shell",
"command": "docker compose up -d",
"label": "docker compose up",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose down",
"label": "docker compose down",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build",
"label": "docker compose build",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose down && docker compose up -d",
"label": "docker compose restart",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build && docker compose down && docker compose up -d",
"label": "docker compose build and restart",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build periphery",
"label": "docker compose build periphery",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "typeshare ./lib/types --lang=typescript --output-file=./frontend/src/types.ts && typeshare ./core --lang=typescript --output-file=./frontend/src/util/client_types.ts",
"command": "node ./client/ts/generate_types.mjs",
"label": "generate typescript types",
"problemMatcher": []
}

3412
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,104 @@
[workspace]
members = [
"cli",
"core",
"periphery",
"tests",
"lib/axum_oauth2",
"lib/db_client",
"lib/helpers",
"lib/periphery_client",
"lib/types",
"lib/monitor_client"
]
[workspace]
resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.9.0"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/mbecker20/monitor"
homepage = "https://docs.monitor.mogh.tech"
[patch.crates-io]
monitor_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.9.0"
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
logger = { path = "lib/logger" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.4.3", default-features = false }
slack = { version = "0.1.0", package = "slack_client_rs" }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
async_timing_util = "0.1.14"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "0.3.0"
resolver_api = "1.1.0"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "0.5.6"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.38.0", features = ["full"] }
reqwest = { version = "0.12.4", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
# SERVER
axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.23.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.203", features = ["derive"] }
strum = { version = "0.26.2", features = ["derive"] }
serde_json = "1.0.117"
toml = "0.8.14"
# ERROR
anyhow = "1.0.86"
thiserror = "1.0.61"
# LOGGING
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.18", features = ["json"] }
tracing-opentelemetry = "0.24.0"
opentelemetry-otlp = "0.16.0"
opentelemetry = "0.23.0"
tracing = "0.1.40"
# CONFIG
clap = { version = "4.5.7", features = ["derive"] }
dotenv = "0.15.0"
envy = "0.4.2"
# CRYPTO
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
urlencoding = "2.1.3"
bcrypt = "0.15.1"
base64 = "0.22.1"
hmac = "0.12.1"
sha2 = "0.10.8"
rand = "0.8.5"
jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
bollard = "0.16.1"
sysinfo = "0.30.12"
# CLOUD
aws-config = "1.5.1"
aws-sdk-ec2 = "1.51.1"
aws-sdk-ecr = "1.33.0"
# MISC
derive_builder = "0.20.0"
typeshare = "1.0.3"
colored = "2.1.0"
bson = "2.11.0"

View File

@@ -1,23 +0,0 @@
FROM rust:latest as builder
WORKDIR /builder
COPY ./core ./core
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
COPY ./lib/db_client ./lib/db_client
COPY ./lib/periphery_client ./lib/periphery_client
COPY ./lib/axum_oauth2 ./lib/axum_oauth2
RUN cd core && cargo build --release
FROM gcr.io/distroless/cc
COPY ./frontend/build /frontend
COPY --from=builder /builder/core/target/release/core /
EXPOSE 9000
CMD ["./core"]

View File

@@ -1,22 +0,0 @@
FROM rust:latest as builder
WORKDIR /builder
COPY ./periphery ./periphery
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
RUN cd periphery && cargo build --release
FROM debian:stable-slim
ARG DEPS_INSTALLER
COPY ./${DEPS_INSTALLER}.sh ./
RUN sh ./${DEPS_INSTALLER}.sh
COPY --from=builder /builder/periphery/target/release/periphery /usr/local/bin/periphery
EXPOSE 8000
CMD "periphery"

23
bin/alerter/Cargo.toml Normal file
View File

@@ -0,0 +1,23 @@
[package]
name = "alerter"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
monitor_client.workspace = true
logger.workspace = true
# external
tokio.workspace = true
tracing.workspace = true
axum.workspace = true
anyhow.workspace = true
serde.workspace = true
dotenv.workspace = true
envy.workspace = true

14
bin/alerter/Dockerfile Normal file
View File

@@ -0,0 +1,14 @@
FROM rust:1.71.1 as builder
WORKDIR /builder
COPY . .
RUN cargo build -p alert_logger --release
FROM gcr.io/distroless/cc
COPY --from=builder /builder/target/release/alert_logger /
EXPOSE 7000
CMD ["./alert_logger"]

4
bin/alerter/README.md Normal file
View File

@@ -0,0 +1,4 @@
# Alerter
This crate sets up a basic axum server that listens for incoming alert POSTs.
It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations.

71
bin/alerter/src/main.rs Normal file
View File

@@ -0,0 +1,71 @@
#[macro_use]
extern crate tracing;
use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::{routing::post, Json, Router};
use monitor_client::entities::{
alert::Alert, server::stats::SeverityLevel,
};
use serde::Deserialize;
#[derive(Deserialize)]
struct Env {
#[serde(default = "default_port")]
port: u16,
}
fn default_port() -> u16 {
7000
}
async fn app() -> anyhow::Result<()> {
dotenv::dotenv().ok();
logger::init(&Default::default())?;
let Env { port } =
envy::from_env().context("failed to parse env")?;
let socket_addr = SocketAddr::from_str(&format!("0.0.0.0:{port}"))
.context("invalid socket addr")?;
info!("v {} | {socket_addr}", env!("CARGO_PKG_VERSION"));
let app = Router::new().route(
"/",
post(|Json(alert): Json<Alert>| async move {
if alert.resolved {
info!("Alert Resolved!: {alert:?}");
return;
}
match alert.level {
SeverityLevel::Ok => info!("{alert:?}"),
SeverityLevel::Warning => warn!("{alert:?}"),
SeverityLevel::Critical => error!("{alert:?}"),
}
}),
);
let listener = tokio::net::TcpListener::bind(socket_addr)
.await
.context("failed to bind tcp listener")?;
axum::serve(listener, app).await.context("server crashed")
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
_ = term_signal.recv() => {},
}
Ok(())
}

34
bin/cli/Cargo.toml Normal file
View File

@@ -0,0 +1,34 @@
[package]
name = "monitor_cli"
description = "Command line tool to sync monitor resources and execute file defined procedures"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "monitor"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
monitor_client.workspace = true
# mogh
partial_derive2.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true
serde_json.workspace = true
futures.workspace = true
tracing.workspace = true
colored.workspace = true
anyhow.workspace = true
tokio.workspace = true
serde.workspace = true
strum.workspace = true
toml.workspace = true
clap.workspace = true

92
bin/cli/README.md Normal file
View File

@@ -0,0 +1,92 @@
# Monitor CLI
Monitor CLI is a tool to sync monitor resources and execute operations.
## Install
```sh
cargo install monitor_cli
```
## Usage
### Credentials
Configure a file `~/.config/monitor/creds.toml` file with contents:
```toml
url = "https://your.monitor.address"
key = "YOUR-API-KEY"
secret = "YOUR-API-SECRET"
```
Note. You can specify a different creds file by using `--creds ./other/path.toml`.
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
```sh
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
```
### Run Syncs
```sh
## Sync resources in a single file
monitor sync ./resources/deployments.toml
## Sync resources gathered across multiple files in a directory
monitor sync ./resources
## Path defaults to './resources', in this case you can just use:
monitor sync
```
#### Manual
```md
Runs syncs on resource files
Usage: monitor sync [OPTIONS] [PATH]
Arguments:
[PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources]
Options:
--delete Will delete any resources that aren't included in the resource files
-h, --help Print help
```
### Run Executions
```sh
# Triggers an example build
monitor execute run-build test_build
```
#### Manual
```md
Runs an execution
Usage: monitor execute <COMMAND>
Commands:
none The "null" execution. Does nothing
run-procedure Runs the target procedure. Response: [Update]
run-build Runs the target build. Response: [Update]
deploy Deploys the container for the target deployment. Response: [Update]
start-container Starts the container for the target deployment. Response: [Update]
stop-container Stops the container for the target deployment. Response: [Update]
stop-all-containers Stops all deployments on the target server. Response: [Update]
remove-container Stops and removes the container for the target deployment. Reponse: [Update]
clone-repo Clones the target repo. Response: [Update]
pull-repo Pulls the target repo. Response: [Update]
prune-networks Prunes the docker networks on the target server. Response: [Update]
prune-images Prunes the docker images on the target server. Response: [Update]
prune-containers Prunes the docker containers on the target server. Response: [Update]
help Print this message or the help of the given subcommand(s)
Options:
-h, --help Print help
```
### --yes
You can use `--yes` to avoid any human prompt to continue, for use in automated environments.

66
bin/cli/src/args.rs Normal file
View File

@@ -0,0 +1,66 @@
use clap::{Parser, Subcommand};
use monitor_client::api::execute::Execution;
use serde::Deserialize;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct CliArgs {
/// Sync or Exec
#[command(subcommand)]
pub command: Command,
/// The path to a creds file.
///
/// Note: If each of `url`, `key` and `secret` are passed,
/// no file is required at this path.
#[arg(long, default_value_t = default_creds())]
pub creds: String,
/// Pass url in args instead of creds file
#[arg(long)]
pub url: Option<String>,
/// Pass api key in args instead of creds file
#[arg(long)]
pub key: Option<String>,
/// Pass api secret in args instead of creds file
#[arg(long)]
pub secret: Option<String>,
/// Always continue on user confirmation prompts.
#[arg(long, short, default_value_t = false)]
pub yes: bool,
}
fn default_creds() -> String {
let home = std::env::var("HOME")
.expect("no HOME env var. cannot get default config path.");
format!("{home}/.config/monitor/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Runs syncs on resource files
Sync {
/// The path of the resource folder / file
/// Folder paths will recursively incorporate all the resources it finds under the folder
#[arg(default_value_t = String::from("./resources"))]
path: String,
/// Will delete any resources that aren't included in the resource files.
#[arg(long, default_value_t = false)]
delete: bool,
},
/// Runs an execution
Execute {
#[command(subcommand)]
execution: Execution,
},
}
#[derive(Debug, Deserialize)]
pub struct CredsFile {
pub url: String,
pub key: String,
pub secret: String,
}

130
bin/cli/src/exec.rs Normal file
View File

@@ -0,0 +1,130 @@
use std::time::Duration;
use colored::Colorize;
use monitor_client::api::execute::Execution;
use crate::{
helpers::wait_for_enter,
state::{cli_args, monitor_client},
};
pub async fn run(execution: Execution) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
tokio::time::sleep(Duration::from_secs(3)).await;
println!("Finished doing nothing. Exiting...");
std::process::exit(0);
}
println!("\n{}: Execution", "Mode".dimmed());
match &execution {
Execution::None(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneNetworks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneImages(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
}
if !cli_args().yes {
wait_for_enter("run execution")?;
}
info!("Running Execution...");
let res = match execution {
Execution::RunProcedure(request) => {
monitor_client().execute(request).await
}
Execution::RunBuild(request) => {
monitor_client().execute(request).await
}
Execution::Deploy(request) => {
monitor_client().execute(request).await
}
Execution::StartContainer(request) => {
monitor_client().execute(request).await
}
Execution::StopContainer(request) => {
monitor_client().execute(request).await
}
Execution::StopAllContainers(request) => {
monitor_client().execute(request).await
}
Execution::RemoveContainer(request) => {
monitor_client().execute(request).await
}
Execution::CloneRepo(request) => {
monitor_client().execute(request).await
}
Execution::PullRepo(request) => {
monitor_client().execute(request).await
}
Execution::PruneNetworks(request) => {
monitor_client().execute(request).await
}
Execution::PruneImages(request) => {
monitor_client().execute(request).await
}
Execution::PruneContainers(request) => {
monitor_client().execute(request).await
}
Execution::RunSync(request) => {
monitor_client().execute(request).await
}
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);
tokio::time::sleep(duration).await;
println!("Finished sleeping!");
std::process::exit(0)
}
Execution::None(_) => unreachable!(),
};
match res {
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}
Ok(())
}

17
bin/cli/src/helpers.rs Normal file
View File

@@ -0,0 +1,17 @@
use std::io::Read;
use anyhow::Context;
use colored::Colorize;
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}

32
bin/cli/src/main.rs Normal file
View File

@@ -0,0 +1,32 @@
#[macro_use]
extern crate tracing;
use colored::Colorize;
use monitor_client::api::read::GetVersion;
mod args;
mod exec;
mod helpers;
mod maps;
mod state;
mod sync;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt().with_target(false).init();
let version =
state::monitor_client().read(GetVersion {}).await?.version;
info!("monitor version: {}", version.to_string().blue().bold());
match &state::cli_args().command {
args::Command::Sync { path, delete } => {
sync::run(path, *delete).await?
}
args::Command::Execute { execution } => {
exec::run(execution.to_owned()).await?
}
}
Ok(())
}

328
bin/cli/src/maps.rs Normal file
View File

@@ -0,0 +1,328 @@
use std::{collections::HashMap, sync::OnceLock};
use monitor_client::{
api::read,
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup,
variable::Variable,
},
};
use crate::state::monitor_client;
pub fn name_to_build() -> &'static HashMap<String, Build> {
static NAME_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
NAME_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
.map(|build| (build.name.clone(), build))
.collect()
})
}
pub fn id_to_build() -> &'static HashMap<String, Build> {
static ID_TO_BUILD: OnceLock<HashMap<String, Build>> =
OnceLock::new();
ID_TO_BUILD.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilds::default()),
)
.expect("failed to get builds from monitor")
.into_iter()
.map(|build| (build.id.clone(), build))
.collect()
})
}
pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
static NAME_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
NAME_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
.map(|deployment| (deployment.name.clone(), deployment))
.collect()
})
}
pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
static ID_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
OnceLock::new();
ID_TO_DEPLOYMENT.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullDeployments::default()),
)
.expect("failed to get deployments from monitor")
.into_iter()
.map(|deployment| (deployment.id.clone(), deployment))
.collect()
})
}
pub fn name_to_server() -> &'static HashMap<String, Server> {
static NAME_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
NAME_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
.map(|server| (server.name.clone(), server))
.collect()
})
}
pub fn id_to_server() -> &'static HashMap<String, Server> {
static ID_TO_SERVER: OnceLock<HashMap<String, Server>> =
OnceLock::new();
ID_TO_SERVER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServers::default()),
)
.expect("failed to get servers from monitor")
.into_iter()
.map(|server| (server.id.clone(), server))
.collect()
})
}
pub fn name_to_builder() -> &'static HashMap<String, Builder> {
static NAME_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
NAME_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
.map(|builder| (builder.name.clone(), builder))
.collect()
})
}
pub fn id_to_builder() -> &'static HashMap<String, Builder> {
static ID_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
OnceLock::new();
ID_TO_BUILDER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullBuilders::default()),
)
.expect("failed to get builders from monitor")
.into_iter()
.map(|builder| (builder.id.clone(), builder))
.collect()
})
}
pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
.map(|alerter| (alerter.name.clone(), alerter))
.collect()
})
}
pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullAlerters::default()),
)
.expect("failed to get alerters from monitor")
.into_iter()
.map(|alerter| (alerter.id.clone(), alerter))
.collect()
})
}
pub fn name_to_repo() -> &'static HashMap<String, Repo> {
static NAME_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
NAME_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
.map(|repo| (repo.name.clone(), repo))
.collect()
})
}
pub fn id_to_repo() -> &'static HashMap<String, Repo> {
static ID_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
OnceLock::new();
ID_TO_ALERTER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullRepos::default()),
)
.expect("failed to get repos from monitor")
.into_iter()
.map(|repo| (repo.id.clone(), repo))
.collect()
})
}
pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
static NAME_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
NAME_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
})
}
pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
static ID_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
OnceLock::new();
ID_TO_PROCEDURE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullProcedures::default()),
)
.expect("failed to get procedures from monitor")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
})
}
pub fn name_to_server_template(
) -> &'static HashMap<String, ServerTemplate> {
static NAME_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplate>,
> = OnceLock::new();
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
.map(|procedure| (procedure.name.clone(), procedure))
.collect()
})
}
pub fn id_to_server_template(
) -> &'static HashMap<String, ServerTemplate> {
static ID_TO_SERVER_TEMPLATE: OnceLock<
HashMap<String, ServerTemplate>,
> = OnceLock::new();
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullServerTemplates::default()),
)
.expect("failed to get server templates from monitor")
.into_iter()
.map(|procedure| (procedure.id.clone(), procedure))
.collect()
})
}
pub fn name_to_resource_sync(
) -> &'static HashMap<String, ResourceSync> {
static NAME_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
NAME_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.name.clone(), sync))
.collect()
})
}
pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
{
static ID_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
OnceLock::new();
ID_TO_SYNC.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListFullResourceSyncs::default()),
)
.expect("failed to get syncs from monitor")
.into_iter()
.map(|sync| (sync.id.clone(), sync))
.collect()
})
}
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
OnceLock::new();
NAME_TO_USER_GROUP.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListUserGroups::default()),
)
.expect("failed to get user groups from monitor")
.into_iter()
.map(|user_group| (user_group.name.clone(), user_group))
.collect()
})
}
pub fn name_to_variable() -> &'static HashMap<String, Variable> {
static NAME_TO_VARIABLE: OnceLock<HashMap<String, Variable>> =
OnceLock::new();
NAME_TO_VARIABLE.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListVariables::default()),
)
.expect("failed to get user groups from monitor")
.variables
.into_iter()
.map(|variable| (variable.name.clone(), variable))
.collect()
})
}
pub fn id_to_user() -> &'static HashMap<String, User> {
static ID_TO_USER: OnceLock<HashMap<String, User>> =
OnceLock::new();
ID_TO_USER.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListUsers::default()),
)
.expect("failed to get users from monitor")
.into_iter()
.map(|user| (user.id.clone(), user))
.collect()
})
}
pub fn id_to_tag() -> &'static HashMap<String, Tag> {
static ID_TO_TAG: OnceLock<HashMap<String, Tag>> = OnceLock::new();
ID_TO_TAG.get_or_init(|| {
futures::executor::block_on(
monitor_client().read(read::ListTags::default()),
)
.expect("failed to get tags from monitor")
.into_iter()
.map(|tag| (tag.id.clone(), tag))
.collect()
})
}

46
bin/cli/src/state.rs Normal file
View File

@@ -0,0 +1,46 @@
use std::sync::OnceLock;
use clap::Parser;
use merge_config_files::parse_config_file;
use monitor_client::MonitorClient;
pub fn cli_args() -> &'static crate::args::CliArgs {
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
}
pub fn monitor_client() -> &'static MonitorClient {
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
MONITOR_CLIENT.get_or_init(|| {
let args = cli_args();
let crate::args::CredsFile { url, key, secret } =
match (&args.url, &args.key, &args.secret) {
(Some(url), Some(key), Some(secret)) => {
crate::args::CredsFile {
url: url.clone(),
key: key.clone(),
secret: secret.clone(),
}
}
(url, key, secret) => {
let mut creds: crate::args::CredsFile =
parse_config_file(cli_args().creds.as_str())
.expect("failed to parse monitor credentials");
if let Some(url) = url {
creds.url.clone_from(url);
}
if let Some(key) = key {
creds.key.clone_from(key);
}
if let Some(secret) = secret {
creds.secret.clone_from(secret);
}
creds
}
};
futures::executor::block_on(MonitorClient::new(url, key, secret))
.expect("failed to initialize monitor client")
})
}

80
bin/cli/src/sync/file.rs Normal file
View File

@@ -0,0 +1,80 @@
use std::{
fs,
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::{anyhow, Context};
use colored::Colorize;
use monitor_client::entities::toml::ResourcesToml;
use serde::de::DeserializeOwned;
pub fn read_resources(path: &str) -> anyhow::Result<ResourcesToml> {
let mut res = ResourcesToml::default();
let path =
PathBuf::from_str(path).context("invalid resources path")?;
read_resources_recursive(&path, &mut res)?;
Ok(res)
}
fn read_resources_recursive(
path: &Path,
resources: &mut ResourcesToml,
) -> anyhow::Result<()> {
let res =
fs::metadata(path).context("failed to get path metadata")?;
if res.is_file() {
if !path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let more = match parse_toml_file::<ResourcesToml>(path) {
Ok(res) => res,
Err(e) => {
warn!("failed to parse {:?}. skipping file | {e:#}", path);
return Ok(());
}
};
info!(
"{} from {}",
"adding resources".green().bold(),
path.display().to_string().blue().bold()
);
resources.servers.extend(more.servers);
resources.deployments.extend(more.deployments);
resources.builds.extend(more.builds);
resources.repos.extend(more.repos);
resources.procedures.extend(more.procedures);
resources.builders.extend(more.builders);
resources.alerters.extend(more.alerters);
resources.server_templates.extend(more.server_templates);
resources.resource_syncs.extend(more.resource_syncs);
resources.user_groups.extend(more.user_groups);
resources.variables.extend(more.variables);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
if let Err(e) =
read_resources_recursive(&entry.path(), resources)
{
warn!("failed to read additional resources at path | {e:#}");
}
}
Ok(())
} else {
Err(anyhow!("resources path is neither file nor directory"))
}
}
fn parse_toml_file<T: DeserializeOwned>(
path: impl AsRef<std::path::Path>,
) -> anyhow::Result<T> {
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
toml::from_str(&contents).context("failed to parse toml contents")
}

174
bin/cli/src/sync/mod.rs Normal file
View File

@@ -0,0 +1,174 @@
use colored::Colorize;
use monitor_client::entities::{
self, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
};
use crate::{helpers::wait_for_enter, state::cli_args};
mod file;
mod resource;
mod resources;
mod user_group;
mod variables;
use resource::ResourceSync;
pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> {
info!("resources path: {}", path.blue().bold());
if delete {
warn!("Delete mode {}", "enabled".bold());
}
let resources = file::read_resources(path)?;
info!("computing sync actions...");
let (server_creates, server_updates, server_deletes) =
resource::get_updates::<Server>(resources.servers, delete)?;
let (deployment_creates, deployment_updates, deployment_deletes) =
resource::get_updates::<Deployment>(
resources.deployments,
delete,
)?;
let (build_creates, build_updates, build_deletes) =
resource::get_updates::<Build>(resources.builds, delete)?;
let (repo_creates, repo_updates, repo_deletes) =
resource::get_updates::<Repo>(resources.repos, delete)?;
let (procedure_creates, procedure_updates, procedure_deletes) =
resource::get_updates::<Procedure>(resources.procedures, delete)?;
let (builder_creates, builder_updates, builder_deletes) =
resource::get_updates::<Builder>(resources.builders, delete)?;
let (alerter_creates, alerter_updates, alerter_deletes) =
resource::get_updates::<Alerter>(resources.alerters, delete)?;
let (
server_template_creates,
server_template_updates,
server_template_deletes,
) = resource::get_updates::<ServerTemplate>(
resources.server_templates,
delete,
)?;
let (
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
) = resource::get_updates::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
)?;
let (variable_creates, variable_updates, variable_deletes) =
variables::get_updates(resources.variables, delete)?;
let (user_group_creates, user_group_updates, user_group_deletes) =
user_group::get_updates(resources.user_groups, delete).await?;
if resource_sync_creates.is_empty()
&& resource_sync_updates.is_empty()
&& resource_sync_deletes.is_empty()
&& server_template_creates.is_empty()
&& server_template_updates.is_empty()
&& server_template_deletes.is_empty()
&& server_creates.is_empty()
&& server_updates.is_empty()
&& server_deletes.is_empty()
&& deployment_creates.is_empty()
&& deployment_updates.is_empty()
&& deployment_deletes.is_empty()
&& build_creates.is_empty()
&& build_updates.is_empty()
&& build_deletes.is_empty()
&& builder_creates.is_empty()
&& builder_updates.is_empty()
&& builder_deletes.is_empty()
&& alerter_creates.is_empty()
&& alerter_updates.is_empty()
&& alerter_deletes.is_empty()
&& repo_creates.is_empty()
&& repo_updates.is_empty()
&& repo_deletes.is_empty()
&& procedure_creates.is_empty()
&& procedure_updates.is_empty()
&& procedure_deletes.is_empty()
&& user_group_creates.is_empty()
&& user_group_updates.is_empty()
&& user_group_deletes.is_empty()
&& variable_creates.is_empty()
&& variable_updates.is_empty()
&& variable_deletes.is_empty()
{
info!("{}. exiting.", "nothing to do".green().bold());
return Ok(());
}
if !cli_args().yes {
wait_for_enter("run sync")?;
}
// No deps
entities::sync::ResourceSync::run_updates(
resource_sync_creates,
resource_sync_updates,
resource_sync_deletes,
)
.await;
ServerTemplate::run_updates(
server_template_creates,
server_template_updates,
server_template_deletes,
)
.await;
Server::run_updates(server_creates, server_updates, server_deletes)
.await;
Alerter::run_updates(
alerter_creates,
alerter_updates,
alerter_deletes,
)
.await;
// Dependant on server
Builder::run_updates(
builder_creates,
builder_updates,
builder_deletes,
)
.await;
Repo::run_updates(repo_creates, repo_updates, repo_deletes).await;
// Dependant on builder
Build::run_updates(build_creates, build_updates, build_deletes)
.await;
// Dependant on server / build
Deployment::run_updates(
deployment_creates,
deployment_updates,
deployment_deletes,
)
.await;
// Dependant on everything
Procedure::run_updates(
procedure_creates,
procedure_updates,
procedure_deletes,
)
.await;
variables::run_updates(
variable_creates,
variable_updates,
variable_deletes,
)
.await;
user_group::run_updates(
user_group_creates,
user_group_updates,
user_group_deletes,
)
.await;
Ok(())
}

View File

@@ -0,0 +1,358 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
resource::Resource, toml::ResourceToml, update::ResourceTarget,
},
};
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use serde::Serialize;
use crate::maps::id_to_tag;
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
/// Vec of resource names
pub type ToDelete = Vec<String>;
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
pub struct ToUpdateItem<T: Default> {
pub id: String,
pub resource: ResourceToml<T>,
pub update_description: bool,
pub update_tags: bool,
}
pub trait ResourceSync: Sized {
type Config: Clone
+ Default
+ Send
+ From<Self::PartialConfig>
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
+ 'static;
type Info: Default + 'static;
type PartialConfig: std::fmt::Debug
+ Clone
+ Send
+ Default
+ From<Self::Config>
+ From<Self::ConfigDiff>
+ Serialize
+ MaybeNone
+ 'static;
type ConfigDiff: Diff + MaybeNone;
fn display() -> &'static str;
fn resource_target(id: String) -> ResourceTarget;
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>;
/// Creates the resource and returns created id.
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String>;
/// Updates the resource at id with the partial config.
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()>;
/// Apply any changes to incoming toml partial config
/// before it is diffed against existing config
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
/// Diffs the declared toml (partial) against the full existing config.
/// Removes all fields from toml (partial) that haven't changed.
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff>;
/// Apply any changes to computed config diff
/// before logging
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
/// Deletes the target resource
async fn delete(id_or_name: String) -> anyhow::Result<()>;
async fn run_updates(
to_create: ToCreate<Self::PartialConfig>,
to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for resource in to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource).await {
Ok(id) => id,
Err(e) => {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
continue;
}
};
run_update_tags::<Self>(id.clone(), &name, tags).await;
run_update_description::<Self>(id, &name, description).await;
info!(
"{} {} '{}'",
"created".green().bold(),
Self::display(),
name.bold(),
);
}
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if update_description {
run_update_description::<Self>(
id.clone(),
&name,
description,
)
.await;
}
if update_tags {
run_update_tags::<Self>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) = Self::update(id, resource).await {
warn!(
"failed to update config on {} {name} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}' configuration",
"updated".blue().bold(),
Self::display(),
name.bold(),
);
}
}
}
for resource in to_delete {
if let Err(e) = Self::delete(resource.clone()).await {
warn!(
"failed to delete {} {resource} | {e:#}",
Self::display()
);
} else {
info!(
"{} {} '{}'",
"deleted".red().bold(),
Self::display(),
resource.bold(),
);
}
}
}
}
/// Gets all the resources to update, logging along the way.
pub fn get_updates<Resource: ResourceSync>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
let map = Resource::name_to_resource();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
let mut to_delete = ToDelete::new();
if delete {
for resource in map.values() {
if !resources.iter().any(|r| r.name == resource.name) {
to_delete.push(resource.name.clone());
}
}
}
for mut resource in resources {
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: Resource::Config = resource.config.into();
resource.config = config.into();
Resource::validate_partial_config(&mut resource.config);
let mut diff = Resource::get_diff(
original.config.clone(),
resource.config,
)?;
Resource::validate_diff(&mut diff);
let original_tags = original
.tags
.iter()
.filter_map(|id| {
id_to_tag().get(id).map(|t| t.name.clone())
})
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
// or a change to tags / description
if diff.is_none()
&& resource.description == original.description
&& resource.tags == original_tags
{
continue;
}
println!(
"\n{}: {}: '{}'\n-------------------",
"UPDATE".blue(),
Resource::display(),
resource.name.bold(),
);
let mut lines = Vec::<String>::new();
if resource.description != original.description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
resource.description.green()
))
}
if resource.tags != original_tags {
let from = format!("{:?}", original_tags).red();
let to = format!("{:?}", resource.tags).green();
lines.push(format!(
"{}: 'tags'\n{}: {from}\n{}: {to}",
"field".dimmed(),
"from".dimmed(),
"to".dimmed(),
));
}
lines.extend(diff.iter_field_diffs().map(
|FieldDiff { field, from, to }| {
format!(
"{}: '{field}'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
from.red(),
"to".dimmed(),
to.green()
)
},
));
println!("{}", lines.join("\n-------------------\n"));
// Minimizes updates through diffing.
resource.config = diff.into();
let update = ToUpdateItem {
id: original.id.clone(),
update_description: resource.description
!= original.description,
update_tags: resource.tags != original_tags,
resource,
};
to_update.push(update);
}
None => {
println!(
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
"CREATE".green(),
Resource::display(),
resource.name.bold().green(),
"description".dimmed(),
resource.description,
"tags".dimmed(),
resource.tags,
"config".dimmed(),
serde_json::to_string_pretty(&resource.config)?
);
to_create.push(resource);
}
}
}
for name in &to_delete {
println!(
"\n{}: {}: '{}'\n-------------------",
"DELETE".red(),
Resource::display(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_update_tags<Resource: ResourceSync>(
id: String,
name: &str,
tags: Vec<String>,
) {
// Update tags
if let Err(e) = crate::state::monitor_client()
.write(UpdateTagsOnResource {
target: Resource::resource_target(id),
tags,
})
.await
{
warn!(
"failed to update tags on {} {name} | {e:#}",
Resource::display(),
);
} else {
info!(
"{} {} '{}' tags",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}
pub async fn run_update_description<Resource: ResourceSync>(
id: String,
name: &str,
description: String,
) {
if let Err(e) = crate::state::monitor_client()
.write(UpdateDescription {
target: Resource::resource_target(id.clone()),
description,
})
.await
{
warn!("failed to update resource {id} description | {e:#}");
} else {
info!(
"{} {} '{}' description",
"updated".blue().bold(),
Resource::display(),
name.bold(),
);
}
}

View File

@@ -0,0 +1,77 @@
use partial_derive2::PartialDiff;
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter},
entities::{
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use crate::{
maps::name_to_alerter, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Alerter {
type Config = AlerterConfig;
type Info = ();
type PartialConfig = PartialAlerterConfig;
type ConfigDiff = AlerterConfigDiff;
fn display() -> &'static str {
"alerter"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Alerter(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_alerter()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateAlerter {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateAlerter {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteAlerter { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,93 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateBuild, DeleteBuild, UpdateBuild},
entities::{
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo,
PartialBuildConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_builder, name_to_build},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Build {
type Config = BuildConfig;
type Info = BuildInfo;
type PartialConfig = PartialBuildConfig;
type ConfigDiff = BuildConfigDiff;
fn display() -> &'static str {
"build"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Build(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_build()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateBuild {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateBuild {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the builder id with name
original.builder_id = id_to_builder()
.get(&original.builder_id)
.map(|b| b.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
fn validate_diff(diff: &mut Self::ConfigDiff) {
if let Some((_, to)) = &diff.version {
if to.is_none() {
diff.version = None;
}
}
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuild { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,86 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder},
entities::{
builder::{
Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_builder},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Builder {
type Config = BuilderConfig;
type Info = ();
type PartialConfig = PartialBuilderConfig;
type ConfigDiff = BuilderConfigDiff;
fn display() -> &'static str {
"builder"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Builder(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_builder()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateBuilder {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateBuilder {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace server builder id with name
if let BuilderConfig::Server(config) = &mut original {
config.server_id = id_to_server()
.get(&config.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteBuilder { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,98 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{self, DeleteDeployment},
entities::{
deployment::{
Deployment, DeploymentConfig, DeploymentConfigDiff,
DeploymentImage, PartialDeploymentConfig,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_build, id_to_server, name_to_deployment},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Deployment {
type Config = DeploymentConfig;
type Info = ();
type PartialConfig = PartialDeploymentConfig;
type ConfigDiff = DeploymentConfigDiff;
fn display() -> &'static str {
"deployment"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Deployment(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_deployment()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(write::CreateDeployment {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(write::UpdateDeployment {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// need to replace the server id with name
original.server_id = id_to_server()
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
// need to replace the build id with name
if let DeploymentImage::Build { build_id, version } =
&original.image
{
original.image = DeploymentImage::Build {
build_id: id_to_build()
.get(build_id)
.map(|b| b.name.clone())
.unwrap_or_default(),
version: *version,
};
}
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteDeployment { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,9 @@
mod alerter;
mod build;
mod builder;
mod deployment;
mod procedure;
mod repo;
mod server;
mod server_template;
mod sync;

View File

@@ -0,0 +1,275 @@
use std::collections::HashMap;
use colored::Colorize;
use monitor_client::{
api::{
execute::Execution,
write::{CreateProcedure, DeleteProcedure, UpdateProcedure},
},
entities::{
procedure::{
PartialProcedureConfig, Procedure, ProcedureConfig,
ProcedureConfigDiff,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
use crate::{
maps::{
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
id_to_resource_sync, id_to_server, name_to_procedure,
},
state::monitor_client,
sync::resource::{
run_update_description, run_update_tags, ResourceSync, ToCreate,
ToDelete, ToUpdate, ToUpdateItem,
},
};
impl ResourceSync for Procedure {
type Config = ProcedureConfig;
type Info = ();
type PartialConfig = PartialProcedureConfig;
type ConfigDiff = ProcedureConfigDiff;
fn display() -> &'static str {
"procedure"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Procedure(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_procedure()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateProcedure {
name: resource.name,
config: resource.config,
})
.await
.map(|p| p.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateProcedure {
id,
config: resource.config,
})
.await?;
Ok(())
}
async fn run_updates(
mut to_create: ToCreate<Self::PartialConfig>,
mut to_update: ToUpdate<Self::PartialConfig>,
to_delete: ToDelete,
) {
for name in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteProcedure { id: name.clone() })
.await
{
warn!("failed to delete procedure {name} | {e:#}",);
} else {
info!(
"{} procedure '{}'",
"deleted".red().bold(),
name.bold(),
);
}
}
if to_update.is_empty() && to_create.is_empty() {
return;
}
for i in 0..10 {
let mut to_pull = Vec::new();
for ToUpdateItem {
id,
resource,
update_description,
update_tags,
} in &to_update
{
// Update resource
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
if *update_description {
run_update_description::<Procedure>(
id.clone(),
&name,
description,
)
.await;
}
if *update_tags {
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
}
if !resource.config.is_none() {
if let Err(e) =
Self::update(id.clone(), resource.clone()).await
{
if i == 9 {
warn!(
"failed to update {} {name} | {e:#}",
Self::display()
);
}
continue;
}
}
info!("{} {name} updated", Self::display());
// have to clone out so to_update is mutable
to_pull.push(id.clone());
}
//
to_update.retain(|resource| !to_pull.contains(&resource.id));
let mut to_pull = Vec::new();
for resource in &to_create {
let name = resource.name.clone();
let tags = resource.tags.clone();
let description = resource.description.clone();
let id = match Self::create(resource.clone()).await {
Ok(id) => id,
Err(e) => {
if i == 9 {
warn!(
"failed to create {} {name} | {e:#}",
Self::display(),
);
}
continue;
}
};
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
run_update_description::<Procedure>(id, &name, description)
.await;
info!("{} {name} created", Self::display());
to_pull.push(name);
}
to_create.retain(|resource| !to_pull.contains(&resource.name));
if to_update.is_empty() && to_create.is_empty() {
// info!("all procedures synced");
return;
}
}
warn!("procedure sync loop exited after max iterations");
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
for stage in &mut original.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::None(_) | Execution::Sleep(_) => {}
Execution::RunProcedure(config) => {
config.procedure = id_to_procedure()
.get(&config.procedure)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = id_to_build()
.get(&config.build)
.map(|b| b.name.clone())
.unwrap_or_default();
}
Execution::Deploy(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
config.deployment = id_to_deployment()
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::CloneRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PullRepo(config) => {
config.repo = id_to_repo()
.get(&config.repo)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneNetworks(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneImages(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
config.server = id_to_server()
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RunSync(config) => {
config.sync = id_to_resource_sync()
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
}
}
}
Ok(original.partial_diff(update))
}
async fn delete(_: String) -> anyhow::Result<()> {
unreachable!()
}
}

View File

@@ -0,0 +1,84 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateRepo, DeleteRepo, UpdateRepo},
entities::{
repo::{
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
},
resource::Resource,
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::{id_to_server, name_to_repo},
state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Repo {
type Config = RepoConfig;
type Info = RepoInfo;
type PartialConfig = PartialRepoConfig;
type ConfigDiff = RepoConfigDiff;
fn display() -> &'static str {
"repo"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Repo(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_repo()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateRepo {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateRepo {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
mut original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
// Need to replace server id with name
original.server_id = id_to_server()
.get(&original.server_id)
.map(|s| s.name.clone())
.unwrap_or_default();
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteRepo { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,77 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{CreateServer, DeleteServer, UpdateServer},
entities::{
resource::Resource,
server::{
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_server, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for Server {
type Config = ServerConfig;
type Info = ();
type PartialConfig = PartialServerConfig;
type ConfigDiff = ServerConfigDiff;
fn display() -> &'static str {
"server"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Server(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateServer {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateServer {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServer { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,80 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{
CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate,
},
entities::{
resource::Resource,
server_template::{
PartialServerTemplateConfig, ServerTemplate,
ServerTemplateConfig, ServerTemplateConfigDiff,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_server_template, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for ServerTemplate {
type Config = ServerTemplateConfig;
type Info = ();
type PartialConfig = PartialServerTemplateConfig;
type ConfigDiff = ServerTemplateConfigDiff;
fn display() -> &'static str {
"server template"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ServerTemplate(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_server_template()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateServerTemplate {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateServerTemplate {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteServerTemplate { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,81 @@
use std::collections::HashMap;
use monitor_client::{
api::write::{
CreateResourceSync, DeleteResourceSync, UpdateResourceSync,
},
entities::{
self,
resource::Resource,
sync::{
PartialResourceSyncConfig, ResourceSyncConfig,
ResourceSyncConfigDiff, ResourceSyncInfo,
},
toml::ResourceToml,
update::ResourceTarget,
},
};
use partial_derive2::PartialDiff;
use crate::{
maps::name_to_resource_sync, state::monitor_client,
sync::resource::ResourceSync,
};
impl ResourceSync for entities::sync::ResourceSync {
type Config = ResourceSyncConfig;
type Info = ResourceSyncInfo;
type PartialConfig = PartialResourceSyncConfig;
type ConfigDiff = ResourceSyncConfigDiff;
fn display() -> &'static str {
"resource sync"
}
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
fn name_to_resource(
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
{
name_to_resource_sync()
}
async fn create(
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<String> {
monitor_client()
.write(CreateResourceSync {
name: resource.name,
config: resource.config,
})
.await
.map(|res| res.id)
}
async fn update(
id: String,
resource: ResourceToml<Self::PartialConfig>,
) -> anyhow::Result<()> {
monitor_client()
.write(UpdateResourceSync {
id,
config: resource.config,
})
.await?;
Ok(())
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
async fn delete(id: String) -> anyhow::Result<()> {
monitor_client().write(DeleteResourceSync { id }).await?;
Ok(())
}
}

View File

@@ -0,0 +1,388 @@
use std::cmp::Ordering;
use anyhow::Context;
use colored::Colorize;
use monitor_client::{
api::{
read::ListUserTargetPermissions,
write::{
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
UpdatePermissionOnTarget,
},
},
entities::{
permission::UserTarget,
toml::{PermissionToml, UserGroupToml},
update::ResourceTarget,
},
};
use crate::maps::{
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server,
id_to_server_template, id_to_user, name_to_user_group,
};
pub struct UpdateItem {
user_group: UserGroupToml,
update_users: bool,
update_permissions: bool,
}
pub struct DeleteItem {
id: String,
name: String,
}
pub async fn get_updates(
user_groups: Vec<UserGroupToml>,
delete: bool,
) -> anyhow::Result<(
Vec<UserGroupToml>,
Vec<UpdateItem>,
Vec<DeleteItem>,
)> {
let map = name_to_user_group();
let mut to_create = Vec::<UserGroupToml>::new();
let mut to_update = Vec::<UpdateItem>::new();
let mut to_delete = Vec::<DeleteItem>::new();
if delete {
for user_group in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
to_delete.push(DeleteItem {
id: user_group.id.clone(),
name: user_group.name.clone(),
});
}
}
}
let id_to_user = id_to_user();
for mut user_group in user_groups {
let original = match map.get(&user_group.name).cloned() {
Some(original) => original,
None => {
println!(
"\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
"CREATE".green(),
user_group.name.bold().green(),
"users".dimmed(),
user_group.users,
"permissions".dimmed(),
user_group.permissions,
);
to_create.push(user_group);
continue;
}
};
let mut original_users = original
.users
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = crate::state::monitor_client()
.read(ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id),
})
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = id_to_build()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = id_to_builder()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = id_to_deployment()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = id_to_server()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = id_to_repo()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = id_to_alerter()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = id_to_procedure()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = id_to_server_template()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = id_to_resource_sync()
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_permissions =
user_group.permissions != original_permissions;
// only push update after failed diff
if update_users || update_permissions {
println!(
"\n{}: user group: '{}'\n-------------------",
"UPDATE".blue(),
user_group.name.bold(),
);
let mut lines = Vec::<String>::new();
if update_users {
let adding = user_group
.users
.iter()
.filter(|user| !original_users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_users
.iter()
.filter(|user| !user_group.users.contains(user))
.map(|user| user.as_str())
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'users'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed(),
))
}
if update_permissions {
let adding = user_group
.permissions
.iter()
.filter(|permission| {
!original_permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let adding = if adding.is_empty() {
String::from("None").into()
} else {
adding.join(", ").green()
};
let removing = original_permissions
.iter()
.filter(|permission| {
!user_group.permissions.contains(permission)
})
.map(|permission| format!("{permission:?}"))
.collect::<Vec<_>>();
let removing = if removing.is_empty() {
String::from("None").into()
} else {
removing.join(", ").red()
};
lines.push(format!(
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
"field".dimmed(),
"removing".dimmed(),
"adding".dimmed()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(UpdateItem {
user_group,
update_users,
update_permissions,
});
}
}
for d in &to_delete {
println!(
"\n{}: user group: '{}'\n-------------------",
"DELETE".red(),
d.name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
/// order permissions in deterministic way
fn sort_permissions(
a: &PermissionToml,
b: &PermissionToml,
) -> Ordering {
let (a_t, a_id) = a.target.extract_variant_id();
let (b_t, b_id) = b.target.extract_variant_id();
match (a_t.cmp(&b_t), a_id.cmp(b_id)) {
(Ordering::Greater, _) => Ordering::Greater,
(Ordering::Less, _) => Ordering::Less,
(_, Ordering::Greater) => Ordering::Greater,
(_, Ordering::Less) => Ordering::Less,
_ => Ordering::Equal,
}
}
pub async fn run_updates(
to_create: Vec<UserGroupToml>,
to_update: Vec<UpdateItem>,
to_delete: Vec<DeleteItem>,
) {
// Create the non-existant user groups
for user_group in to_create {
// Create the user group
if let Err(e) = crate::state::monitor_client()
.write(CreateUserGroup {
name: user_group.name.clone(),
})
.await
{
warn!(
"failed to create user group {} | {e:#}",
user_group.name
);
continue;
} else {
info!(
"{} user group '{}'",
"created".green().bold(),
user_group.name.bold(),
);
};
set_users(user_group.name.clone(), user_group.users).await;
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
// Update the existing user groups
for UpdateItem {
user_group,
update_users,
update_permissions,
} in to_update
{
if update_users {
set_users(user_group.name.clone(), user_group.users).await;
}
if update_permissions {
run_update_permissions(user_group.name, user_group.permissions)
.await;
}
}
for user_group in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteUserGroup { id: user_group.id })
.await
{
warn!(
"failed to delete user group {} | {e:#}",
user_group.name
);
} else {
info!(
"{} user group '{}'",
"deleted".red().bold(),
user_group.name.bold(),
);
}
}
}
async fn set_users(user_group: String, users: Vec<String>) {
if let Err(e) = crate::state::monitor_client()
.write(SetUsersInUserGroup {
user_group: user_group.clone(),
users,
})
.await
{
warn!("failed to set users in group {user_group} | {e:#}");
} else {
info!(
"{} user group '{}' users",
"updated".blue().bold(),
user_group.bold(),
);
}
}
async fn run_update_permissions(
user_group: String,
permissions: Vec<PermissionToml>,
) {
for PermissionToml { target, level } in permissions {
if let Err(e) = crate::state::monitor_client()
.write(UpdatePermissionOnTarget {
user_target: UserTarget::UserGroup(user_group.clone()),
resource_target: target.clone(),
permission: level,
})
.await
{
warn!(
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
);
} else {
info!(
"{} user group '{}' permissions",
"updated".blue().bold(),
user_group.bold(),
);
}
}
}

View File

@@ -0,0 +1,206 @@
use colored::Colorize;
use monitor_client::{
api::write::{
CreateVariable, DeleteVariable, UpdateVariableDescription,
UpdateVariableValue,
},
entities::variable::Variable,
};
use crate::{maps::name_to_variable, state::monitor_client};
pub struct ToUpdateItem {
pub variable: Variable,
pub update_value: bool,
pub update_description: bool,
}
pub fn get_updates(
variables: Vec<Variable>,
delete: bool,
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
let map = name_to_variable();
let mut to_create = Vec::<Variable>::new();
let mut to_update = Vec::<ToUpdateItem>::new();
let mut to_delete = Vec::<String>::new();
if delete {
for variable in map.values() {
if !variables.iter().any(|v| v.name == variable.name) {
to_delete.push(variable.name.clone());
}
}
}
for variable in variables {
match map.get(&variable.name) {
Some(original) => {
let item = ToUpdateItem {
update_value: original.value != variable.value,
update_description: original.description
!= variable.description,
variable,
};
if !item.update_value && !item.update_description {
continue;
}
println!(
"\n{}: variable: '{}'\n-------------------",
"UPDATE".blue(),
item.variable.name.bold(),
);
let mut lines = Vec::<String>::new();
if item.update_value {
lines.push(format!(
"{}: 'value'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.value.red(),
"to".dimmed(),
item.variable.value.green()
))
}
if item.update_description {
lines.push(format!(
"{}: 'description'\n{}: {}\n{}: {}",
"field".dimmed(),
"from".dimmed(),
original.description.red(),
"to".dimmed(),
item.variable.description.green()
))
}
println!("{}", lines.join("\n-------------------\n"));
to_update.push(item);
}
None => {
if variable.description.is_empty() {
println!(
"\n{}: variable: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"value".dimmed(),
variable.value,
);
} else {
println!(
"\n{}: variable: {}\n{}: {}\n{}: {}",
"CREATE".green(),
variable.name.bold().green(),
"description".dimmed(),
variable.description,
"value".dimmed(),
variable.value,
);
}
to_create.push(variable)
}
}
}
for name in &to_delete {
println!(
"\n{}: variable: '{}'\n-------------------",
"DELETE".red(),
name.bold(),
);
}
Ok((to_create, to_update, to_delete))
}
pub async fn run_updates(
to_create: Vec<Variable>,
to_update: Vec<ToUpdateItem>,
to_delete: Vec<String>,
) {
for variable in to_create {
if let Err(e) = monitor_client()
.write(CreateVariable {
name: variable.name.clone(),
value: variable.value,
description: variable.description,
})
.await
{
warn!("failed to create variable {} | {e:#}", variable.name);
} else {
info!(
"{} variable '{}'",
"created".green().bold(),
variable.name.bold(),
);
};
}
for ToUpdateItem {
variable,
update_value,
update_description,
} in to_update
{
if update_value {
if let Err(e) = monitor_client()
.write(UpdateVariableValue {
name: variable.name.clone(),
value: variable.value,
})
.await
{
warn!(
"failed to update variable value for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' value",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
if update_description {
if let Err(e) = monitor_client()
.write(UpdateVariableDescription {
name: variable.name.clone(),
description: variable.description,
})
.await
{
warn!(
"failed to update variable description for {} | {e:#}",
variable.name
);
} else {
info!(
"{} variable '{}' description",
"updated".blue().bold(),
variable.name.bold(),
);
};
}
}
for variable in to_delete {
if let Err(e) = crate::state::monitor_client()
.write(DeleteVariable {
name: variable.clone(),
})
.await
{
warn!("failed to delete variable {variable} | {e:#}",);
} else {
info!(
"{} variable '{}'",
"deleted".red().bold(),
variable.bold(),
);
}
}
}

67
bin/core/Cargo.toml Normal file
View File

@@ -0,0 +1,67 @@
[package]
name = "monitor_core"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
[[bin]]
name = "core"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
monitor_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
formatting.workspace = true
logger.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
async_timing_util.workspace = true
partial_derive2.workspace = true
derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
parse_csl.workspace = true
mungos.workspace = true
slack.workspace = true
svi.workspace = true
# external
ordered_hash_map.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-sdk-ecr.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
typeshare.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
anyhow.workspace = true
dotenv.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true
envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
jwt.workspace = true
hex.workspace = true

37
bin/core/Dockerfile Normal file
View File

@@ -0,0 +1,37 @@
# Build Core
FROM rust:1.79.0-bookworm as core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
# Build Frontend
FROM node:20.12-alpine as frontend-builder
WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
# Final Image
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git curl unzip ca-certificates && \
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
unzip awscliv2.zip && \
./aws/install
# Copy
COPY ./config_example/core.config.example.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /
COPY --from=frontend-builder /builder/frontend/dist /frontend
# Hint at the port
EXPOSE 9000
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="A tool to build and deploy software across many servers"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./core"]

128
bin/core/src/api/auth.rs Normal file
View File

@@ -0,0 +1,128 @@
use std::{sync::OnceLock, time::Instant};
use anyhow::anyhow;
use axum::{http::HeaderMap, routing::post, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::auth::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::{
get_user_id_from_headers,
github::{self, client::github_oauth_client},
google::{self, client::google_oauth_client},
},
config::core_config,
helpers::query::get_user,
state::{jwt_client, State},
};
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(HeaderMap)]
#[serde(tag = "type", content = "params")]
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
pub enum AuthRequest {
GetLoginOptions(GetLoginOptions),
CreateLocalUser(CreateLocalUser),
LoginLocalUser(LoginLocalUser),
ExchangeForJwt(ExchangeForJwt),
GetUser(GetUser),
}
pub fn router() -> Router {
let mut router = Router::new().route("/", post(handler));
if github_oauth_client().is_some() {
router = router.nest("/github", github::router())
}
if google_oauth_client().is_some() {
router = router.nest("/google", google::router())
}
router
}
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
async fn handler(
headers: HeaderMap,
Json(request): Json<AuthRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!("/auth request {req_id} | METHOD: {}", request.req_type());
let res = State.resolve_request(request, headers).await.map_err(
|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
},
);
if let Err(e) = &res {
debug!("/auth request {req_id} | error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/auth request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
static GET_LOGIN_OPTIONS_RESPONSE: OnceLock<
GetLoginOptionsResponse,
> = OnceLock::new();
GET_LOGIN_OPTIONS_RESPONSE.get_or_init(|| {
let config = core_config();
GetLoginOptionsResponse {
local: config.local_auth,
github: config.github_oauth.enabled
&& !config.github_oauth.id.is_empty()
&& !config.github_oauth.secret.is_empty(),
google: config.google_oauth.enabled
&& !config.google_oauth.id.is_empty()
&& !config.google_oauth.secret.is_empty(),
}
})
}
impl Resolve<GetLoginOptions, HeaderMap> for State {
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
async fn resolve(
&self,
_: GetLoginOptions,
_: HeaderMap,
) -> anyhow::Result<GetLoginOptionsResponse> {
Ok(*login_options_reponse())
}
}
impl Resolve<ExchangeForJwt, HeaderMap> for State {
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
async fn resolve(
&self,
ExchangeForJwt { token }: ExchangeForJwt,
_: HeaderMap,
) -> anyhow::Result<ExchangeForJwtResponse> {
let jwt = jwt_client().redeem_exchange_token(&token).await?;
let res = ExchangeForJwtResponse { jwt };
Ok(res)
}
}
impl Resolve<GetUser, HeaderMap> for State {
#[instrument(name = "GetUser", level = "debug", skip(self))]
async fn resolve(
&self,
GetUser {}: GetUser,
headers: HeaderMap,
) -> anyhow::Result<User> {
let user_id = get_user_id_from_headers(&headers).await?;
get_user(&user_id).await
}
}

View File

@@ -0,0 +1,800 @@
use std::{collections::HashSet, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{format_serror, muted};
use futures::future::join_all;
use monitor_client::{
api::execute::{
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
},
entities::{
alert::{Alert, AlertData},
all_logs_success,
build::{Build, CloudRegistryConfig, ImageRegistry},
builder::{AwsBuilderConfig, Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
permission::PermissionLevel,
server::{stats::SeverityLevel, Server},
server_template::aws::AwsServerTemplateConfig,
to_monitor_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
},
};
use mungos::{
by_id::update_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, to_bson, to_document},
options::FindOneOptions,
},
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
};
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use crate::{
cloud::{
aws::{
ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
ecr,
},
BuildCleanupData,
},
config::core_config,
helpers::{
alert::send_alerts,
channel::build_cancel_channel,
periphery_client,
query::{get_deployment_state, get_global_variables},
update::update_update,
},
resource::{self, refresh_build_state_cache},
state::{action_states, db_client, State},
};
use crate::helpers::update::init_execution_update;
use super::ExecuteRequest;
impl Resolve<RunBuild, (User, Update)> for State {
#[instrument(name = "RunBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunBuild { build }: RunBuild,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
let (registry_token, aws_ecr) =
validate_account_extract_registry_token_aws_ecr(&build).await?;
// get the action state for the build (or insert default).
let action_state =
action_states().build.get_or_insert_default(&build.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure build not already busy before updating.
let _action_guard =
action_state.update(|state| state.building = true)?;
build.config.version.increment();
update.version = build.config.version;
update_update(update.clone()).await?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
let mut cancel_recv =
build_cancel_channel().receiver.resubscribe();
let build_id = build.id.clone();
tokio::spawn(async move {
let poll = async {
loop {
let (incoming_build_id, mut update) = tokio::select! {
_ = cancel_clone.cancelled() => return Ok(()),
id = cancel_recv.recv() => id?
};
if incoming_build_id == build_id {
update.push_simple_log(
"cancel acknowledged",
"the build cancellation has been queued, it may still take some time",
);
update.finalize();
let id = update.id.clone();
if let Err(e) = update_update(update).await {
warn!("failed to update Update {id} | {e:#}");
}
cancel_clone.cancel();
return Ok(());
}
}
#[allow(unreachable_code)]
anyhow::Ok(())
};
tokio::select! {
_ = cancel_clone.cancelled() => {}
_ = poll => {}
}
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) =
match get_build_builder(&build, &mut update).await {
Ok(builder) => {
info!("got builder for build");
builder
}
Err(e) => {
warn!("failed to get builder | {e:#}");
update.logs.push(Log::error(
"get builder",
format_serror(&e.context("failed to get builder").into()),
));
return handle_early_return(update, build.id, build.name)
.await;
}
};
let core_config = core_config();
let variables = get_global_variables().await?;
// CLONE REPO
let github_token = core_config
.github_accounts
.get(&build.config.github_account)
.cloned();
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
github_token,
}) => res,
_ = cancel.cancelled() => {
info!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name).await
},
};
match res {
Ok(clone_logs) => {
info!("finished repo clone");
update.logs.extend(clone_logs);
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
}
}
update_update(update.clone()).await?;
if all_logs_success(&update.logs) {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
arg.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
let res = tokio::select! {
res = periphery
.request(api::build::Build {
build: build.clone(),
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name).await
},
};
match res {
Ok(logs) => {
info!("finished build");
update.logs.extend(logs);
}
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log(
"build",
format_serror(&e.context("failed to build").into()),
)
}
};
}
update.finalize();
let db = db_client().await;
if update.success {
let _ = db
.builds
.update_one(
doc! { "name": &build.name },
doc! {
"$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": monitor_timestamp(),
}
},
None,
)
.await;
}
// stop the cancel listening task from going forever
cancel.cancel();
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if update.success {
// don't hold response up for user
tokio::spawn(async move {
handle_post_build_redeploy(&build.id).await;
info!("post build redeploy handled");
});
} else {
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build.id,
name: build.name,
err,
version,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
#[instrument(skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
build_name: String,
) -> anyhow::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_build_state_cache().await;
}
update_update(update.clone()).await?;
if !update.success {
let target = update.target.clone();
let version = update.version;
let err = update.logs.iter().find(|l| !l.success).cloned();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
id: build_id,
name: build_name,
version,
err,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
if let ExecuteRequest::CancelBuild(req) = request {
let build = resource::get::<Build>(&req.build).await?;
let db = db_client().await;
let (latest_build, latest_cancel) = tokio::try_join!(
db.updates.find_one(
doc! {
"operation": "RunBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
),
db.updates.find_one(
doc! {
"operation": "CancelBuild",
"target.id": &build.id,
},
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
)?;
match (latest_build, latest_cancel) {
(Some(build), Some(cancel)) => {
if cancel.start_ts > build.start_ts {
return Err(anyhow!("Build has already been cancelled"));
}
}
(None, _) => return Err(anyhow!("No build in progress")),
_ => {}
};
}
Ok(())
}
impl Resolve<CancelBuild, (User, Update)> for State {
#[instrument(name = "CancelBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CancelBuild { build }: CancelBuild,
(user, mut update): (User, Update),
) -> anyhow::Result<CancelBuildResponse> {
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Execute,
)
.await?;
// make sure the build is building
if !action_states()
.build
.get(&build.id)
.await
.and_then(|s| s.get().ok().map(|s| s.building))
.unwrap_or_default()
{
return Err(anyhow!("Build is not building."));
}
update.push_simple_log(
"cancel triggered",
"the build cancel has been triggered",
);
update_update(update.clone()).await?;
let update_id = update.id.clone();
build_cancel_channel()
.sender
.lock()
.await
.send((build.id, update))?;
// Make sure cancel is set to complete after some time in case
// no reciever is there to do it. Prevents update stuck in InProgress.
tokio::spawn(async move {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = update_one_by_id(
&db_client().await.updates,
&update_id,
doc! { "$set": { "status": "Complete" } },
None,
)
.await
{
warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}")
}
});
Ok(CancelBuildResponse {})
}
}
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_build_builder(
build: &Build,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
if build.config.builder_id.is_empty() {
return Err(anyhow!("build has not configured a builder"));
}
let builder =
resource::get::<Builder>(&build.config.builder_id).await?;
match builder.config {
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((
periphery,
BuildCleanupData::Server {
repo_name: build.name.clone(),
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(build, config, update).await
}
}
}
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
async fn get_aws_builder(
build: &Build,
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = monitor_timestamp();
let instance_name =
format!("BUILDER-{}-v{}", build.name, build.config.version);
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(log);
update_update(update.clone()).await?;
let periphery_address = format!("http://{ip}:{}", config.port);
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = monitor_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = periphery
.request(api::GetVersion {})
.await
.context("failed to reach periphery client on builder");
if let Ok(GetVersionResponse { version }) = &version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!(
"established contact with periphery on builder\nperiphery version: v{}",
version
),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.logs.push(connect_log);
update_update(update.clone()).await?;
return Ok((
periphery,
BuildCleanupData::Aws {
instance_id,
region: config.region,
},
));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
.await;
}
// Spawn terminate task in failure case (if loop is passed without return)
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(config.region, &instance_id)
.await;
});
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
Err(
res.err().unwrap().context(
"failed to start usable builder. terminating instance.",
),
)
}
#[instrument(skip(periphery, update))]
async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =
terminate_ec2_instance_with_retry(region, &_instance_id)
.await;
});
update.push_simple_log(
"terminate instance",
format!("termination queued for instance id {instance_id}"),
);
}
}
}
#[instrument]
async fn handle_post_build_redeploy(build_id: &str) {
let Ok(redeploy_deployments) = find_collect(
&db_client().await.deployments,
doc! {
"config.image.params.build_id": build_id,
"config.redeploy_on_build": true
},
None,
)
.await
else {
return;
};
let futures =
redeploy_deployments
.into_iter()
.map(|deployment| async move {
let state =
get_deployment_state(&deployment).await.unwrap_or_default();
if state == DeploymentState::Running {
let req = super::ExecuteRequest::Deploy(Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
});
let user = auto_redeploy_user().to_owned();
let res = async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
Deploy {
deployment: deployment.id.clone(),
stop_signal: None,
stop_time: None,
},
(user, update),
)
.await
}
.await;
Some((deployment.id.clone(), res))
} else {
None
}
});
for res in join_all(futures).await {
let Some((id, res)) = res else {
continue;
};
if let Err(e) = res {
warn!("failed post build redeploy for deployment {id}: {e:#}");
}
}
}
fn start_aws_builder_log(
instance_id: &str,
ip: &str,
config: &AwsBuilderConfig,
) -> String {
let AwsBuilderConfig {
ami_id,
instance_type,
volume_gb,
subnet_id,
assign_public_ip,
security_group_ids,
use_public_ip,
..
} = config;
let readable_sec_group_ids = security_group_ids.join(", ");
[
format!("{}: {instance_id}", muted("instance id")),
format!("{}: {ip}", muted("ip")),
format!("{}: {ami_id}", muted("ami id")),
format!("{}: {instance_type}", muted("instance type")),
format!("{}: {volume_gb} GB", muted("volume size")),
format!("{}: {subnet_id}", muted("subnet id")),
format!("{}: {readable_sec_group_ids}", muted("security groups")),
format!("{}: {assign_public_ip}", muted("assign public ip")),
format!("{}: {use_public_ip}", muted("use public ip")),
]
.join("\n")
}
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token / aws ecr config matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token_aws_ecr(
build: &Build,
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
match &build.config.image_registry {
ImageRegistry::None(_) => Ok((None, None)),
ImageRegistry::DockerHub(CloudRegistryConfig {
account, ..
}) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use DockerHub image registry"
));
}
Ok((core_config().docker_accounts.get(account).cloned(), None))
}
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
if account.is_empty() {
return Err(anyhow!(
"Must attach account to use GithubContainerRegistry"
));
}
Ok((core_config().github_accounts.get(account).cloned(), None))
}
ImageRegistry::AwsEcr(label) => {
let config = core_config().aws_ecr_registries.get(label);
let token = match config {
Some(AwsEcrConfigWithCredentials {
region,
access_key_id,
secret_access_key,
..
}) => {
let token = ecr::get_ecr_token(
region,
access_key_id,
secret_access_key,
)
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_monitor_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,
)
.await
.context("failed to create aws ecr repo")?;
Some(token)
}
None => None,
};
Ok((token, config.map(AwsEcrConfig::from)))
}
ImageRegistry::Custom(_) => {
Err(anyhow!("Custom image registry is not implemented"))
}
}
}

View File

@@ -0,0 +1,532 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
api::execute::*,
entities::{
build::{Build, ImageRegistry},
config::core::AwsEcrConfig,
deployment::{Deployment, DeploymentImage},
get_image_name,
permission::PermissionLevel,
server::ServerState,
update::{Log, Update},
user::User,
Version,
},
};
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
periphery_client,
query::{get_global_variables, get_server_with_status},
update::update_update,
},
monitor::update_cache_for_server,
resource,
state::{action_states, db_client, State},
};
use crate::helpers::update::init_execution_update;
impl Resolve<Deploy, (User, Update)> for State {
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
Deploy {
deployment,
stop_signal,
stop_time,
}: Deploy,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut deployment =
resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.deploying = true)?;
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let periphery = periphery_client(&server)?;
// This block gets the version of the image to deploy in the Build case.
// It also gets the name of the image from the build and attaches it directly.
let version = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build, |label| {
core_config()
.aws_ecr_registries
.get(label)
.map(AwsEcrConfig::from)
})
.context("failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
version
};
// replace image with corresponding build image.
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version}"),
};
// set image registry to match build docker account if it's not overridden by deployment
if matches!(
&deployment.config.image_registry,
ImageRegistry::None(_)
) {
deployment.config.image_registry =
build.config.image_registry;
}
version
}
DeploymentImage::Image { .. } => Version::default(),
};
let variables = get_global_variables().await?;
let core_config = core_config();
// Interpolate variables into environment
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for env in &mut deployment.config.environment {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
update.version = version;
update_update(update.clone()).await?;
let (registry_token, aws_ecr) = match &deployment
.config
.image_registry
{
ImageRegistry::None(_) => (None, None),
ImageRegistry::DockerHub(params) => (
core_config.docker_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::Ghcr(params) => (
core_config.github_accounts.get(&params.account).cloned(),
None,
),
ImageRegistry::AwsEcr(label) => {
let config = core_config
.aws_ecr_registries
.get(label)
.with_context(|| {
format!(
"did not find config for aws ecr registry {label}"
)
})?;
(
Some(
ecr::get_ecr_token(
&config.region,
&config.access_key_id,
&config.secret_access_key,
)
.await
.context("failed to create aws ecr login token")?,
),
Some(AwsEcrConfig::from(config)),
)
}
ImageRegistry::Custom(_) => {
return Err(anyhow!("Custom ImageRegistry not yet supported"))
}
};
match periphery
.request(api::container::Deploy {
deployment,
stop_signal,
stop_time,
registry_token,
aws_ecr,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(log) => update.logs.push(log),
Err(e) => {
update.push_error_log(
"deploy container",
format_serror(
&e.context("failed to deploy container").into(),
),
);
}
};
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartContainer, (User, Update)> for State {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { deployment }: StartContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.starting = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::StartContainer {
name: deployment.name.clone(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
deployment,
signal,
time,
}: StopContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.stopping = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::StopContainer {
name: deployment.name.clone(),
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),
time: time
.unwrap_or(deployment.config.termination_timeout)
.into(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopAllContainers { server }: StopAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.stopping_containers = true)?;
let deployments = find_collect(
&db_client().await.deployments,
doc! {
"config.server_id": &server.id
},
None,
)
.await
.context("failed to find deployments on server")?;
let futures = deployments.iter().map(|deployment| async {
let req = super::ExecuteRequest::StopContainer(StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
});
(
async {
let update = init_execution_update(&req, &user).await?;
State
.resolve(
StopContainer {
deployment: deployment.id.clone(),
signal: None,
time: None,
},
(user.clone(), update),
)
.await
}
.await,
deployment.name.clone(),
deployment.id.clone(),
)
});
let results = join_all(futures).await;
let deployment_names = deployments
.iter()
.map(|d| format!("{} ({})", d.name, d.id))
.collect::<Vec<_>>()
.join("\n");
update.push_simple_log("stopping containers", deployment_names);
for (res, name, id) in results {
if let Err(e) = res {
update.push_error_log(
"stop container failure",
format_serror(
&e.context(format!(
"failed to stop container {name} ({id})"
))
.into(),
),
);
}
}
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RemoveContainer, (User, Update)> for State {
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RemoveContainer {
deployment,
signal,
time,
}: RemoveContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.removing = true)?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::RemoveContainer {
name: deployment.name.clone(),
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),
time: time
.unwrap_or(deployment.config.termination_timeout)
.into(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -0,0 +1,156 @@
use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
update::{Log, Update},
user::User,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
state::{db_client, State},
};
mod build;
mod deployment;
mod procedure;
mod repo;
mod server;
mod server_template;
mod sync;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args((User, Update))]
#[serde(tag = "type", content = "params")]
pub enum ExecuteRequest {
// ==== SERVER ====
PruneContainers(PruneContainers),
PruneImages(PruneImages),
PruneNetworks(PruneNetworks),
// ==== DEPLOYMENT ====
Deploy(Deploy),
StartContainer(StartContainer),
StopContainer(StopContainer),
StopAllContainers(StopAllContainers),
RemoveContainer(RemoveContainer),
// ==== BUILD ====
RunBuild(RunBuild),
CancelBuild(CancelBuild),
// ==== REPO ====
CloneRepo(CloneRepo),
PullRepo(PullRepo),
// ==== PROCEDURE ====
RunProcedure(RunProcedure),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
// ==== SYNC ====
RunSync(RunSync),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> serror::Result<Json<Update>> {
let req_id = Uuid::new_v4();
// need to validate no cancel is active before any update is created.
build::validate_cancel_build(&request).await?;
let update = init_execution_update(&request, &user).await?;
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
tokio::spawn({
let update_id = update.id.clone();
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
Log::error("spawn error", format!("{e:#?}"))
}
_ => return,
};
let res = async {
let mut update =
find_one_by_id(&db_client().await.updates, &update_id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update).await
}
.await;
if let Err(e) = res {
warn!("failed to update update with task error log | {e:#}");
}
}
});
Ok(Json(update))
}
#[instrument(name = "ExecuteRequest", skip(user, update), fields(user_id = user.id, update_id = update.id))]
async fn task(
req_id: Uuid,
request: ExecuteRequest,
user: User,
update: Update,
) -> anyhow::Result<String> {
info!(
"/execute request {req_id} | user: {} ({})",
user.username, user.id
);
let timer = Instant::now();
let res = State
.resolve_request(request, (user, update))
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/execute request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -0,0 +1,114 @@
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use monitor_client::{
api::execute::RunProcedure,
entities::{
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{
helpers::{procedure::execute_procedure, update::update_update},
resource::{self, refresh_procedure_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<RunProcedure, (User, Update)> for State {
#[instrument(name = "RunProcedure", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunProcedure { procedure }: RunProcedure,
(user, update): (User, Update),
) -> anyhow::Result<Update> {
resolve_inner(procedure, user, update).await
}
}
fn resolve_inner(
procedure: String,
user: User,
mut update: Update,
) -> Pin<
Box<
dyn std::future::Future<Output = anyhow::Result<Update>> + Send,
>,
> {
Box::pin(async move {
let procedure = resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Execute,
)
.await?;
// Need to push the initial log, as execute_procedure
// assumes first log is already created
// and will panic otherwise.
update.push_simple_log(
"execute_procedure",
format!(
"{}: executing procedure '{}'",
muted("INFO"),
bold(&procedure.name)
),
);
// get the action state for the procedure (or insert default).
let action_state = action_states()
.procedure
.get_or_insert_default(&procedure.id)
.await;
// This will set action state back to default when dropped.
// Will also check to ensure procedure not already busy before updating.
let _action_guard =
action_state.update(|state| state.running = true)?;
let update = Mutex::new(update);
let res = execute_procedure(&procedure, &update).await;
let mut update = update.into_inner();
match res {
Ok(_) => {
update.push_simple_log(
"execution ok",
format!(
"{}: the procedure has {} with no errors",
muted("INFO"),
colored("completed", Color::Green)
),
);
}
Err(e) => update
.push_error_log("execution error", format_serror(&e.into())),
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_procedure_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
})
}

View File

@@ -0,0 +1,192 @@
use anyhow::anyhow;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
monitor_timestamp, optional_string,
permission::PermissionLevel,
repo::Repo,
server::Server,
update::{Log, Update},
user::User,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{periphery_client, update::update_update},
resource::{self, refresh_repo_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<CloneRepo, (User, Update)> for State {
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
CloneRepo { repo }: CloneRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.cloning = true)?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let github_token = core_config()
.github_accounts
.get(&repo.config.github_account)
.cloned();
let logs = match periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
github_token,
})
.await
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
)]
}
};
update.logs.extend(logs);
update.finalize();
if update.success {
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
}
}
impl Resolve<PullRepo, (User, Update)> for State {
#[instrument(name = "PullRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PullRepo { repo }: PullRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure repo not already busy before updating.
let _action_guard =
action_state.update(|state| state.pulling = true)?;
if repo.config.server_id.is_empty() {
return Err(anyhow!("repo has no server attached"));
}
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let logs = match periphery
.request(api::git::PullRepo {
name: repo.name.clone(),
branch: optional_string(&repo.config.branch),
commit: optional_string(&repo.config.commit),
on_pull: repo.config.on_pull.into_option(),
})
.await
{
Ok(logs) => logs,
Err(e) => {
vec![Log::error(
"pull repo",
format_serror(&e.context("failed to pull repo").into()),
)]
}
};
update.logs.extend(logs);
update.finalize();
if update.success {
update_last_pulled_time(&repo.name).await;
}
handle_update_return(update).await
}
}
#[instrument(skip_all, fields(update_id = update.id))]
async fn handle_update_return(
update: Update,
) -> anyhow::Result<Update> {
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().await.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_repo_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
#[instrument]
async fn update_last_pulled_time(repo_name: &str) {
let res = db_client()
.await
.repos
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
None,
)
.await;
if let Err(e) = res {
warn!(
"failed to update repo last_pulled_at | repo: {repo_name} | {e:#}",
);
}
}

View File

@@ -0,0 +1,175 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::execute::*,
entities::{
monitor_timestamp,
permission::PermissionLevel,
server::Server,
update::{Log, Update, UpdateStatus},
user::User,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{periphery_client, update::update_update},
resource,
state::{action_states, State},
};
impl Resolve<PruneContainers, (User, Update)> for State {
#[instrument(name = "PruneContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneContainers { server }: PruneContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_containers = true)?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::PruneContainers {})
.await
.context(format!(
"failed to prune containers on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"prune containers",
format_serror(
&e.context("failed to prune containers").into(),
),
),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PruneNetworks, (User, Update)> for State {
#[instrument(name = "PruneNetworks", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneNetworks { server }: PruneNetworks,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_networks = true)?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::network::PruneNetworks {})
.await
.context(format!(
"failed to prune networks on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"prune networks",
format_serror(&e.context("failed to prune networks").into()),
),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PruneImages, (User, Update)> for State {
#[instrument(name = "PruneImages", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneImages { server }: PruneImages,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_images = true)?;
let periphery = periphery_client(&server)?;
let log =
match periphery.request(api::build::PruneImages {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune images",
format!(
"failed to prune images on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -0,0 +1,144 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
permission::PermissionLevel,
server::PartialServerConfig,
server_template::{ServerTemplate, ServerTemplateConfig},
update::Update,
user::User,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
helpers::update::update_update,
resource,
state::{db_client, State},
};
impl Resolve<LaunchServer, (User, Update)> for State {
#[instrument(name = "LaunchServer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
LaunchServer {
name,
server_template,
}: LaunchServer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
// validate name isn't already taken by another server
if db_client()
.await
.servers
.find_one(
doc! {
"name": &name
},
None,
)
.await
.context("failed to query db for servers")?
.is_some()
{
return Err(anyhow!("name is already taken"));
}
let template = resource::get_check_permissions::<ServerTemplate>(
&server_template,
&user,
PermissionLevel::Execute,
)
.await?;
update.push_simple_log(
"launching server",
format!("{:#?}", template.config),
);
update_update(update.clone()).await?;
let config = match template.config {
ServerTemplateConfig::Aws(config) => {
let region = config.region.clone();
let instance = match launch_ec2_instance(&name, config).await
{
Ok(instance) => instance,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {name} on ip {}",
instance.ip
),
);
PartialServerConfig {
address: format!("http://{}:8120", instance.ip).into(),
region: region.into(),
..Default::default()
}
}
ServerTemplateConfig::Hetzner(config) => {
let datacenter = config.datacenter;
let server = match launch_hetzner_server(&name, config).await
{
Ok(server) => server,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch hetzner server\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {name} on ip {}",
server.ip
),
);
PartialServerConfig {
address: format!("http://{}:8120", server.ip).into(),
region: datacenter.as_ref().to_string().into(),
..Default::default()
}
}
};
match self.resolve(CreateServer { name, config }, user).await {
Ok(server) => {
update.push_simple_log(
"create server",
format!("created server {} ({})", server.name, server.id),
);
update.other_data = server.id;
}
Err(e) => {
update.push_error_log(
"create server",
format_serror(&e.context("failed to create server").into()),
);
}
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -0,0 +1,394 @@
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use monitor_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
alerter::Alerter,
build::Build,
builder::Builder,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
update::{Log, Update},
user::{sync_user, User},
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use crate::{
helpers::{
query::get_id_to_tags,
sync::{
deployment,
resource::{
get_updates_for_execution, AllResourcesById, ResourceSync,
},
},
update::update_update,
},
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, State},
};
impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
let (res, logs, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
update.logs.extend(logs);
update_update(update.clone()).await?;
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let (servers_to_create, servers_to_update, servers_to_delete) =
get_updates_for_execution::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = deployment::get_updates_for_execution(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
get_updates_for_execution::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (repos_to_create, repos_to_update, repos_to_delete) =
get_updates_for_execution::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
procedures_to_create,
procedures_to_update,
procedures_to_delete,
) = get_updates_for_execution::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
get_updates_for_execution::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
get_updates_for_execution::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
) = get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
) = get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await?;
let (
variables_to_create,
variables_to_update,
variables_to_delete,
) = crate::helpers::sync::variables::get_updates_for_execution(
resources.variables,
sync.config.delete,
)
.await?;
let (
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
) = crate::helpers::sync::user_groups::get_updates_for_execution(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await?;
if resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
&& server_templates_to_update.is_empty()
&& server_templates_to_delete.is_empty()
&& servers_to_create.is_empty()
&& servers_to_update.is_empty()
&& servers_to_delete.is_empty()
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
&& builders_to_create.is_empty()
&& builders_to_update.is_empty()
&& builders_to_delete.is_empty()
&& alerters_to_create.is_empty()
&& alerters_to_update.is_empty()
&& alerters_to_delete.is_empty()
&& repos_to_create.is_empty()
&& repos_to_update.is_empty()
&& repos_to_delete.is_empty()
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
&& variables_to_create.is_empty()
&& variables_to_update.is_empty()
&& variables_to_delete.is_empty()
{
update.push_simple_log(
"No Changes",
format!(
"{}. exiting.",
colored("nothing to do", Color::Green)
),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
// =================
// No deps
maybe_extend(
&mut update.logs,
crate::helpers::sync::variables::run_updates(
variables_to_create,
variables_to_update,
variables_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
crate::helpers::sync::user_groups::run_updates(
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
entities::sync::ResourceSync::run_updates(
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::run_updates(
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Server::run_updates(
servers_to_create,
servers_to_update,
servers_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Alerter::run_updates(
alerters_to_create,
alerters_to_update,
alerters_to_delete,
)
.await,
);
// Dependent on server
maybe_extend(
&mut update.logs,
Builder::run_updates(
builders_to_create,
builders_to_update,
builders_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Repo::run_updates(
repos_to_create,
repos_to_update,
repos_to_delete,
)
.await,
);
// Dependant on builder
maybe_extend(
&mut update.logs,
Build::run_updates(
builds_to_create,
builds_to_update,
builds_to_delete,
)
.await,
);
// Dependant on server / build
if let Some(res) = deployment::run_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await
{
update.logs.extend(res);
}
// Dependant on everything
maybe_extend(
&mut update.logs,
Procedure::run_updates(
procedures_to_create,
procedures_to_update,
procedures_to_delete,
)
.await,
);
let db = db_client().await;
if let Err(e) = update_one_by_id(
&db.resource_syncs,
&sync.id,
doc! {
"$set": {
"info.last_sync_ts": monitor_timestamp(),
"info.last_sync_hash": hash,
"info.last_sync_message": message,
}
},
None,
)
.await
{
warn!(
"failed to update resource sync {} info after sync | {e:#}",
sync.name
)
}
if let Err(e) = State
.resolve(
RefreshResourceSyncPending { sync: sync.id },
sync_user().to_owned(),
)
.await
{
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
update.push_error_log(
"refresh sync",
format_serror(
&e.context("failed to refresh sync pending after run")
.into(),
),
);
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
fn maybe_extend(logs: &mut Vec<Log>, log: Option<Log>) {
if let Some(log) = log {
logs.push(log);
}
}

5
bin/core/src/api/mod.rs Normal file
View File

@@ -0,0 +1,5 @@
pub mod auth;
pub mod execute;
pub mod read;
pub mod user;
pub mod write;

View File

@@ -0,0 +1,84 @@
use anyhow::Context;
use monitor_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{update::ResourceTargetVariant, user::User},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
state::{db_client, State},
};
const NUM_ALERTS_PER_PAGE: u64 = 100;
impl Resolve<ListAlerts, User> for State {
async fn resolve(
&self,
ListAlerts { query, page }: ListAlerts,
user: User,
) -> anyhow::Result<ListAlertsResponse> {
let mut query = query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
)
.await?;
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
]
});
}
let alerts = find_collect(
&db_client().await.alerts,
query,
FindOptions::builder()
.sort(doc! { "ts": -1 })
.limit(NUM_ALERTS_PER_PAGE as i64)
.skip(page * NUM_ALERTS_PER_PAGE)
.build(),
)
.await
.context("failed to get alerts from db")?;
let next_page = if alerts.len() < NUM_ALERTS_PER_PAGE as usize {
None
} else {
Some((page + 1) as i64)
};
let res = ListAlertsResponse { next_page, alerts };
Ok(res)
}
}
impl Resolve<GetAlert, User> for State {
async fn resolve(
&self,
GetAlert { id }: GetAlert,
_: User,
) -> anyhow::Result<GetAlertResponse> {
find_one_by_id(&db_client().await.alerts, &id)
.await
.context("failed to query db for alert")?
.context("no alert found with given id")
}
}

View File

@@ -0,0 +1,91 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
alerter::{Alerter, AlerterListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
impl Resolve<GetAlerter, User> for State {
async fn resolve(
&self,
GetAlerter { alerter }: GetAlerter,
user: User,
) -> anyhow::Result<Alerter> {
resource::get_check_permissions::<Alerter>(
&alerter,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListAlerters, User> for State {
async fn resolve(
&self,
ListAlerters { query }: ListAlerters,
user: User,
) -> anyhow::Result<Vec<AlerterListItem>> {
resource::list_for_user::<Alerter>(query, &user).await
}
}
impl Resolve<ListFullAlerters, User> for State {
async fn resolve(
&self,
ListFullAlerters { query }: ListFullAlerters,
user: User,
) -> anyhow::Result<ListFullAlertersResponse> {
resource::list_full_for_user::<Alerter>(query, &user).await
}
}
impl Resolve<GetAlertersSummary, User> for State {
async fn resolve(
&self,
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.alerters
.count_documents(query, None)
.await
.context("failed to count all alerter documents")?;
let res = GetAlertersSummaryResponse {
total: total as u32,
};
Ok(res)
}
}

View File

@@ -0,0 +1,312 @@
use std::{
collections::{HashMap, HashSet},
sync::OnceLock,
};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use futures::TryStreamExt;
use monitor_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},
permission::PermissionLevel,
update::UpdateStatus,
user::User,
Operation,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::{Resolve, ResolveToString};
use crate::{
config::core_config,
resource,
state::{action_states, build_state_cache, db_client, State},
};
impl Resolve<GetBuild, User> for State {
async fn resolve(
&self,
GetBuild { build }: GetBuild,
user: User,
) -> anyhow::Result<Build> {
resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListBuilds, User> for State {
async fn resolve(
&self,
ListBuilds { query }: ListBuilds,
user: User,
) -> anyhow::Result<Vec<BuildListItem>> {
resource::list_for_user::<Build>(query, &user).await
}
}
impl Resolve<ListFullBuilds, User> for State {
async fn resolve(
&self,
ListFullBuilds { query }: ListFullBuilds,
user: User,
) -> anyhow::Result<ListFullBuildsResponse> {
resource::list_full_for_user::<Build>(query, &user).await
}
}
impl Resolve<GetBuildActionState, User> for State {
async fn resolve(
&self,
GetBuildActionState { build }: GetBuildActionState,
user: User,
) -> anyhow::Result<BuildActionState> {
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.build
.get(&build.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetBuildsSummary, User> for State {
async fn resolve(
&self,
GetBuildsSummary {}: GetBuildsSummary,
user: User,
) -> anyhow::Result<GetBuildsSummaryResponse> {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
&user,
)
.await
.context("failed to get all builds")?;
let mut res = GetBuildsSummaryResponse::default();
let cache = build_state_cache();
let action_states = action_states();
for build in builds {
res.total += 1;
match (
cache.get(&build.id).await.unwrap_or_default(),
action_states
.build
.get(&build.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.building => {
res.building += 1;
}
(BuildState::Ok, _) => res.ok += 1,
(BuildState::Failed, _) => res.failed += 1,
(BuildState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(BuildState::Building, _) => unreachable!(),
}
}
Ok(res)
}
}
const ONE_DAY_MS: i64 = 86400000;
impl Resolve<GetBuildMonthlyStats, User> for State {
async fn resolve(
&self,
GetBuildMonthlyStats { page }: GetBuildMonthlyStats,
_: User,
) -> anyhow::Result<GetBuildMonthlyStatsResponse> {
let curr_ts = unix_timestamp_ms() as i64;
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
let close_ts = next_day - page as i64 * 30 * ONE_DAY_MS;
let open_ts = close_ts - 30 * ONE_DAY_MS;
let mut build_updates = db_client()
.await
.updates
.find(
doc! {
"start_ts": {
"$gte": open_ts,
"$lt": close_ts
},
"operation": Operation::RunBuild.to_string(),
},
None,
)
.await
.context("failed to get updates cursor")?;
let mut days = HashMap::<i64, BuildStatsDay>::with_capacity(32);
let mut curr = open_ts;
while curr < close_ts {
let stats = BuildStatsDay {
ts: curr as f64,
..Default::default()
};
days.insert(curr, stats);
curr += ONE_DAY_MS;
}
while let Some(update) = build_updates.try_next().await? {
if let Some(end_ts) = update.end_ts {
let day = update.start_ts - update.start_ts % ONE_DAY_MS;
let entry = days.entry(day).or_default();
entry.count += 1.0;
entry.time += ms_to_hour(end_ts - update.start_ts);
}
}
Ok(GetBuildMonthlyStatsResponse::new(
days.into_values().collect(),
))
}
}
const MS_TO_HOUR_DIVISOR: f64 = 1000.0 * 60.0 * 60.0;
fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}
impl Resolve<GetBuildVersions, User> for State {
async fn resolve(
&self,
GetBuildVersions {
build,
major,
minor,
patch,
limit,
}: GetBuildVersions,
user: User,
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
let build = resource::get_check_permissions::<Build>(
&build,
&user,
PermissionLevel::Read,
)
.await?;
let mut filter = doc! {
"target": {
"type": "Build",
"id": build.id
},
"operation": Operation::RunBuild.to_string(),
"status": UpdateStatus::Complete.to_string(),
"success": true
};
if let Some(major) = major {
filter.insert("version.major", major);
}
if let Some(minor) = minor {
filter.insert("version.minor", minor);
}
if let Some(patch) = patch {
filter.insert("version.patch", patch);
}
let versions = find_collect(
&db_client().await.updates,
filter,
FindOptions::builder()
.sort(doc! { "_id": -1 })
.limit(limit)
.build(),
)
.await
.context("failed to pull versions from mongo")?
.into_iter()
.map(|u| (u.version, u.start_ts))
.filter(|(v, _)| !v.is_none())
.map(|(version, ts)| BuildVersionResponseItem { version, ts })
.collect();
Ok(versions)
}
}
fn github_organizations() -> &'static String {
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
GITHUB_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().github_organizations)
.expect("failed to serialize github organizations")
})
}
impl ResolveToString<ListGithubOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListGithubOrganizations {}: ListGithubOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(github_organizations().clone())
}
}
fn docker_organizations() -> &'static String {
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
DOCKER_ORGANIZATIONS.get_or_init(|| {
serde_json::to_string(&core_config().docker_organizations)
.expect("failed to serialize docker organizations")
})
}
impl ResolveToString<ListDockerOrganizations, User> for State {
async fn resolve_to_string(
&self,
ListDockerOrganizations {}: ListDockerOrganizations,
_: User,
) -> anyhow::Result<String> {
Ok(docker_organizations().clone())
}
}
impl Resolve<ListCommonBuildExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
let builds = resource::list_full_for_user::<Build>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for build in builds {
for extra_arg in build.config.extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}

View File

@@ -0,0 +1,140 @@
use std::{collections::HashSet, str::FromStr};
use anyhow::Context;
use monitor_client::{
api::read::{self, *},
entities::{
builder::{Builder, BuilderConfig, BuilderListItem},
permission::PermissionLevel,
update::ResourceTargetVariant,
user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
impl Resolve<GetBuilder, User> for State {
async fn resolve(
&self,
GetBuilder { builder }: GetBuilder,
user: User,
) -> anyhow::Result<Builder> {
resource::get_check_permissions::<Builder>(
&builder,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListBuilders, User> for State {
async fn resolve(
&self,
ListBuilders { query }: ListBuilders,
user: User,
) -> anyhow::Result<Vec<BuilderListItem>> {
resource::list_for_user::<Builder>(query, &user).await
}
}
impl Resolve<ListFullBuilders, User> for State {
async fn resolve(
&self,
ListFullBuilders { query }: ListFullBuilders,
user: User,
) -> anyhow::Result<ListFullBuildersResponse> {
resource::list_full_for_user::<Builder>(query, &user).await
}
}
impl Resolve<GetBuildersSummary, User> for State {
async fn resolve(
&self,
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query = if user.admin || core_config().transparent_mode {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.builders
.count_documents(query, None)
.await
.context("failed to count all builder documents")?;
let res = GetBuildersSummaryResponse {
total: total as u32,
};
Ok(res)
}
}
impl Resolve<GetBuilderAvailableAccounts, User> for State {
async fn resolve(
&self,
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
user: User,
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
let builder = resource::get_check_permissions::<Builder>(
&builder,
&user,
PermissionLevel::Read,
)
.await?;
let (github, docker) = match builder.config {
BuilderConfig::Aws(config) => {
(config.github_accounts, config.docker_accounts)
}
BuilderConfig::Server(config) => {
let res = self
.resolve(
read::GetAvailableAccounts {
server: Some(config.server_id),
},
user,
)
.await?;
(res.github, res.docker)
}
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
Ok(GetBuilderAvailableAccountsResponse { github, docker })
}
}

View File

@@ -0,0 +1,264 @@
use std::{cmp, collections::HashSet};
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::*,
entities::{
deployment::{
Deployment, DeploymentActionState, DeploymentConfig,
DeploymentListItem, DeploymentState, DockerContainerStats,
},
permission::PermissionLevel,
server::Server,
update::Log,
user::User,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::periphery_client,
resource,
state::{action_states, deployment_status_cache, State},
};
impl Resolve<GetDeployment, User> for State {
async fn resolve(
&self,
GetDeployment { deployment }: GetDeployment,
user: User,
) -> anyhow::Result<Deployment> {
resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListDeployments, User> for State {
async fn resolve(
&self,
ListDeployments { query }: ListDeployments,
user: User,
) -> anyhow::Result<Vec<DeploymentListItem>> {
resource::list_for_user::<Deployment>(query, &user).await
}
}
impl Resolve<ListFullDeployments, User> for State {
async fn resolve(
&self,
ListFullDeployments { query }: ListFullDeployments,
user: User,
) -> anyhow::Result<ListFullDeploymentsResponse> {
resource::list_full_for_user::<Deployment>(query, &user).await
}
}
impl Resolve<GetDeploymentContainer, User> for State {
async fn resolve(
&self,
GetDeploymentContainer { deployment }: GetDeploymentContainer,
user: User,
) -> anyhow::Result<GetDeploymentContainerResponse> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await?;
let status = deployment_status_cache()
.get(&deployment.id)
.await
.unwrap_or_default();
let response = GetDeploymentContainerResponse {
state: status.curr.state,
container: status.curr.container.clone(),
};
Ok(response)
}
}
const MAX_LOG_LENGTH: u64 = 5000;
impl Resolve<GetLog, User> for State {
async fn resolve(
&self,
GetLog { deployment, tail }: GetLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerLog {
name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
})
.await
.context("failed at call to periphery")
}
}
impl Resolve<SearchLog, User> for State {
async fn resolve(
&self,
SearchLog {
deployment,
terms,
combinator,
invert,
}: SearchLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerLogSearch {
name,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")
}
}
impl Resolve<GetDeploymentStats, User> for State {
async fn resolve(
&self,
GetDeploymentStats { deployment }: GetDeploymentStats,
user: User,
) -> anyhow::Result<DockerContainerStats> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
return Err(anyhow!("deployment has no server attached"));
}
let server = resource::get::<Server>(&server_id).await?;
periphery_client(&server)?
.request(api::container::GetContainerStats { name })
.await
.context("failed to get stats from periphery")
}
}
impl Resolve<GetDeploymentActionState, User> for State {
async fn resolve(
&self,
GetDeploymentActionState { deployment }: GetDeploymentActionState,
user: User,
) -> anyhow::Result<DeploymentActionState> {
let deployment = resource::get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.deployment
.get(&deployment.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetDeploymentsSummary, User> for State {
async fn resolve(
&self,
GetDeploymentsSummary {}: GetDeploymentsSummary,
user: User,
) -> anyhow::Result<GetDeploymentsSummaryResponse> {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
&user,
)
.await
.context("failed to get deployments from db")?;
let mut res = GetDeploymentsSummaryResponse::default();
let status_cache = deployment_status_cache();
for deployment in deployments {
res.total += 1;
let status =
status_cache.get(&deployment.id).await.unwrap_or_default();
match status.curr.state {
DeploymentState::Running => {
res.running += 1;
}
DeploymentState::Unknown => {
res.unknown += 1;
}
DeploymentState::NotDeployed => {
res.not_deployed += 1;
}
_ => {
res.stopped += 1;
}
}
}
Ok(res)
}
}
impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
async fn resolve(
&self,
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
user: User,
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
let deployments =
resource::list_full_for_user::<Deployment>(query, &user)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
for deployment in deployments {
for extra_arg in deployment.config.extra_args {
res.insert(extra_arg);
}
}
let mut res = res.into_iter().collect::<Vec<_>>();
res.sort();
Ok(res)
}
}

View File

@@ -0,0 +1,244 @@
use std::time::Instant;
use anyhow::anyhow;
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::read::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, config::core_config, state::State};
mod alert;
mod alerter;
mod build;
mod builder;
mod deployment;
mod permission;
mod procedure;
mod repo;
mod search;
mod server;
mod server_template;
mod sync;
mod tag;
mod toml;
mod update;
mod user;
mod user_group;
mod variable;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
GetVersion(GetVersion),
GetCoreInfo(GetCoreInfo),
GetAvailableAwsEcrLabels(GetAvailableAwsEcrLabels),
// ==== USER ====
ListUsers(ListUsers),
GetUsername(GetUsername),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
GetPermissionLevel(GetPermissionLevel),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
GetUserGroup(GetUserGroup),
ListUserGroups(ListUserGroups),
// ==== SEARCH ====
FindResources(FindResources),
// ==== PROCEDURE ====
GetProceduresSummary(GetProceduresSummary),
GetProcedure(GetProcedure),
GetProcedureActionState(GetProcedureActionState),
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
GetServerTemplatesSummary(GetServerTemplatesSummary),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
ListServers(ListServers),
ListFullServers(ListFullServers),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetDockerContainers(GetDockerContainers),
GetDockerImages(GetDockerImages),
GetDockerNetworks(GetDockerNetworks),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
GetAvailableAccounts(GetAvailableAccounts),
GetAvailableSecrets(GetAvailableSecrets),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
GetDeployment(GetDeployment),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
// ==== BUILD ====
GetBuildsSummary(GetBuildsSummary),
GetBuild(GetBuild),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
GetBuildVersions(GetBuildVersions),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
#[to_string_resolver]
ListGithubOrganizations(ListGithubOrganizations),
#[to_string_resolver]
ListDockerOrganizations(ListDockerOrganizations),
// ==== REPO ====
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
GetRepoActionState(GetRepoActionState),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
GetResourceSyncActionState(GetResourceSyncActionState),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
ListBuilders(ListBuilders),
ListFullBuilders(ListFullBuilders),
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
// ==== ALERTER ====
GetAlertersSummary(GetAlertersSummary),
GetAlerter(GetAlerter),
ListAlerters(ListAlerters),
ListFullAlerters(ListFullAlerters),
// ==== TOML ====
ExportAllResourcesToToml(ExportAllResourcesToToml),
ExportResourcesToToml(ExportResourcesToToml),
// ==== TAG ====
GetTag(GetTag),
ListTags(ListTags),
// ==== UPDATE ====
GetUpdate(GetUpdate),
ListUpdates(ListUpdates),
// ==== ALERT ====
ListAlerts(ListAlerts),
GetAlert(GetAlert),
// ==== SERVER STATS ====
#[to_string_resolver]
GetSystemInformation(GetSystemInformation),
#[to_string_resolver]
GetSystemStats(GetSystemStats),
#[to_string_resolver]
GetSystemProcesses(GetSystemProcesses),
// ==== VARIABLE ====
GetVariable(GetVariable),
ListVariables(ListVariables),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!("/read request | user: {}", user.username);
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
debug!("/read request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/read request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
impl Resolve<GetVersion, User> for State {
#[instrument(name = "GetVersion", level = "debug", skip(self))]
async fn resolve(
&self,
GetVersion {}: GetVersion,
_: User,
) -> anyhow::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
}
}
impl Resolve<GetCoreInfo, User> for State {
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
async fn resolve(
&self,
GetCoreInfo {}: GetCoreInfo,
_: User,
) -> anyhow::Result<GetCoreInfoResponse> {
let config = core_config();
Ok(GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
github_webhook_base_url: config
.github_webhook_base_url
.clone()
.unwrap_or_else(|| config.host.clone()),
transparent_mode: config.transparent_mode,
ui_write_disabled: config.ui_write_disabled,
})
}
}
impl Resolve<GetAvailableAwsEcrLabels, User> for State {
async fn resolve(
&self,
GetAvailableAwsEcrLabels {}: GetAvailableAwsEcrLabels,
_: User,
) -> anyhow::Result<GetAvailableAwsEcrLabelsResponse> {
Ok(core_config().aws_ecr_registries.keys().cloned().collect())
}
}

View File

@@ -0,0 +1,72 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
ListPermissionsResponse, ListUserTargetPermissions,
ListUserTargetPermissionsResponse,
},
entities::{permission::PermissionLevel, user::User},
};
use mungos::{find::find_collect, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{
helpers::query::get_user_permission_on_resource,
state::{db_client, State},
};
impl Resolve<ListPermissions, User> for State {
async fn resolve(
&self,
ListPermissions {}: ListPermissions,
user: User,
) -> anyhow::Result<ListPermissionsResponse> {
find_collect(
&db_client().await.permissions,
doc! {
"user_target.type": "User",
"user_target.id": &user.id
},
None,
)
.await
.context("failed to query db for permissions")
}
}
impl Resolve<GetPermissionLevel, User> for State {
async fn resolve(
&self,
GetPermissionLevel { target }: GetPermissionLevel,
user: User,
) -> anyhow::Result<GetPermissionLevelResponse> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let (variant, id) = target.extract_variant_id();
get_user_permission_on_resource(&user.id, variant, id).await
}
}
impl Resolve<ListUserTargetPermissions, User> for State {
async fn resolve(
&self,
ListUserTargetPermissions { user_target }: ListUserTargetPermissions,
user: User,
) -> anyhow::Result<ListUserTargetPermissionsResponse> {
if !user.admin {
return Err(anyhow!("this method is admin only"));
}
let (variant, id) = user_target.extract_variant_id();
find_collect(
&db_client().await.permissions,
doc! {
"user_target.type": variant.as_ref(),
"user_target.id": id
},
None,
)
.await
.context("failed to query db for permissions")
}
}

View File

@@ -0,0 +1,117 @@
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel,
procedure::{Procedure, ProcedureState},
user::User,
},
};
use resolver_api::Resolve;
use crate::{
resource,
state::{action_states, procedure_state_cache, State},
};
impl Resolve<GetProcedure, User> for State {
async fn resolve(
&self,
GetProcedure { procedure }: GetProcedure,
user: User,
) -> anyhow::Result<GetProcedureResponse> {
resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListProcedures, User> for State {
async fn resolve(
&self,
ListProcedures { query }: ListProcedures,
user: User,
) -> anyhow::Result<ListProceduresResponse> {
resource::list_for_user::<Procedure>(query, &user).await
}
}
impl Resolve<ListFullProcedures, User> for State {
async fn resolve(
&self,
ListFullProcedures { query }: ListFullProcedures,
user: User,
) -> anyhow::Result<ListFullProceduresResponse> {
resource::list_full_for_user::<Procedure>(query, &user).await
}
}
impl Resolve<GetProceduresSummary, User> for State {
async fn resolve(
&self,
GetProceduresSummary {}: GetProceduresSummary,
user: User,
) -> anyhow::Result<GetProceduresSummaryResponse> {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
&user,
)
.await
.context("failed to get procedures from db")?;
let mut res = GetProceduresSummaryResponse::default();
let cache = procedure_state_cache();
let action_states = action_states();
for procedure in procedures {
res.total += 1;
match (
cache.get(&procedure.id).await.unwrap_or_default(),
action_states
.procedure
.get(&procedure.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.running => {
res.running += 1;
}
(ProcedureState::Ok, _) => res.ok += 1,
(ProcedureState::Failed, _) => res.failed += 1,
(ProcedureState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the running state, since that comes from action states
(ProcedureState::Running, _) => unreachable!(),
}
}
Ok(res)
}
}
impl Resolve<GetProcedureActionState, User> for State {
async fn resolve(
&self,
GetProcedureActionState { procedure }: GetProcedureActionState,
user: User,
) -> anyhow::Result<GetProcedureActionStateResponse> {
let procedure = resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.procedure
.get(&procedure.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}

View File

@@ -0,0 +1,120 @@
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
user::User,
},
};
use resolver_api::Resolve;
use crate::{
resource,
state::{action_states, repo_state_cache, State},
};
impl Resolve<GetRepo, User> for State {
async fn resolve(
&self,
GetRepo { repo }: GetRepo,
user: User,
) -> anyhow::Result<Repo> {
resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListRepos, User> for State {
async fn resolve(
&self,
ListRepos { query }: ListRepos,
user: User,
) -> anyhow::Result<Vec<RepoListItem>> {
resource::list_for_user::<Repo>(query, &user).await
}
}
impl Resolve<ListFullRepos, User> for State {
async fn resolve(
&self,
ListFullRepos { query }: ListFullRepos,
user: User,
) -> anyhow::Result<ListFullReposResponse> {
resource::list_full_for_user::<Repo>(query, &user).await
}
}
impl Resolve<GetRepoActionState, User> for State {
async fn resolve(
&self,
GetRepoActionState { repo }: GetRepoActionState,
user: User,
) -> anyhow::Result<RepoActionState> {
let repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.repo
.get(&repo.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetReposSummary, User> for State {
async fn resolve(
&self,
GetReposSummary {}: GetReposSummary,
user: User,
) -> anyhow::Result<GetReposSummaryResponse> {
let repos =
resource::list_full_for_user::<Repo>(Default::default(), &user)
.await
.context("failed to get repos from db")?;
let mut res = GetReposSummaryResponse::default();
let cache = repo_state_cache();
let action_states = action_states();
for repo in repos {
res.total += 1;
match (
cache.get(&repo.id).await.unwrap_or_default(),
action_states
.repo
.get(&repo.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.cloning => {
res.cloning += 1;
}
(_, action_states) if action_states.pulling => {
res.pulling += 1;
}
(RepoState::Ok, _) => res.ok += 1,
(RepoState::Failed, _) => res.failed += 1,
(RepoState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(RepoState::Cloning, _) | (RepoState::Pulling, _) => {
unreachable!()
}
}
}
Ok(res)
}
}

View File

@@ -0,0 +1,83 @@
use monitor_client::{
api::read::{FindResources, FindResourcesResponse},
entities::{
build::Build, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, update::ResourceTargetVariant,
user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
const FIND_RESOURCE_TYPES: [ResourceTargetVariant; 5] = [
ResourceTargetVariant::Server,
ResourceTargetVariant::Build,
ResourceTargetVariant::Deployment,
ResourceTargetVariant::Repo,
ResourceTargetVariant::Procedure,
];
impl Resolve<FindResources, User> for State {
async fn resolve(
&self,
FindResources { query, resources }: FindResources,
user: User,
) -> anyhow::Result<FindResourcesResponse> {
let mut res = FindResourcesResponse::default();
let resource_types = if resources.is_empty() {
FIND_RESOURCE_TYPES.to_vec()
} else {
resources
.into_iter()
.filter(|r| {
!matches!(
r,
ResourceTargetVariant::System
| ResourceTargetVariant::Builder
| ResourceTargetVariant::Alerter
)
})
.collect()
};
for resource_type in resource_types {
match resource_type {
ResourceTargetVariant::Server => {
res.servers = resource::list_for_user_using_document::<
Server,
>(query.clone(), &user)
.await?;
}
ResourceTargetVariant::Deployment => {
res.deployments = resource::list_for_user_using_document::<
Deployment,
>(query.clone(), &user)
.await?;
}
ResourceTargetVariant::Build => {
res.builds =
resource::list_for_user_using_document::<Build>(
query.clone(),
&user,
)
.await?;
}
ResourceTargetVariant::Repo => {
res.repos = resource::list_for_user_using_document::<Repo>(
query.clone(),
&user,
)
.await?;
}
ResourceTargetVariant::Procedure => {
res.procedures = resource::list_for_user_using_document::<
Procedure,
>(query.clone(), &user)
.await?;
}
_ => {}
}
}
Ok(res)
}
}

View File

@@ -0,0 +1,451 @@
use std::{
collections::{HashMap, HashSet},
sync::{Arc, OnceLock},
};
use anyhow::{anyhow, Context};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
};
use monitor_client::{
api::read::*,
entities::{
deployment::ContainerSummary,
permission::PermissionLevel,
server::{
docker_image::ImageSummary, docker_network::DockerNetwork,
Server, ServerActionState, ServerListItem, ServerState,
},
user::User,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api::{self, GetAccountsResponse};
use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
config::core_config,
helpers::periphery_client,
resource,
state::{action_states, db_client, server_status_cache, State},
};
impl Resolve<GetServersSummary, User> for State {
async fn resolve(
&self,
GetServersSummary {}: GetServersSummary,
user: User,
) -> anyhow::Result<GetServersSummaryResponse> {
let servers =
resource::list_for_user::<Server>(Default::default(), &user)
.await?;
let mut res = GetServersSummaryResponse::default();
for server in servers {
res.total += 1;
match server.info.state {
ServerState::Ok => {
res.healthy += 1;
}
ServerState::NotOk => {
res.unhealthy += 1;
}
ServerState::Disabled => {
res.disabled += 1;
}
}
}
Ok(res)
}
}
impl Resolve<GetPeripheryVersion, User> for State {
async fn resolve(
&self,
req: GetPeripheryVersion,
user: User,
) -> anyhow::Result<GetPeripheryVersionResponse> {
let server = resource::get_check_permissions::<Server>(
&req.server,
&user,
PermissionLevel::Read,
)
.await?;
let version = server_status_cache()
.get(&server.id)
.await
.map(|s| s.version.clone())
.unwrap_or(String::from("unknown"));
Ok(GetPeripheryVersionResponse { version })
}
}
impl Resolve<GetServer, User> for State {
async fn resolve(
&self,
req: GetServer,
user: User,
) -> anyhow::Result<Server> {
resource::get_check_permissions::<Server>(
&req.server,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListServers, User> for State {
async fn resolve(
&self,
ListServers { query }: ListServers,
user: User,
) -> anyhow::Result<Vec<ServerListItem>> {
resource::list_for_user::<Server>(query, &user).await
}
}
impl Resolve<ListFullServers, User> for State {
async fn resolve(
&self,
ListFullServers { query }: ListFullServers,
user: User,
) -> anyhow::Result<ListFullServersResponse> {
resource::list_full_for_user::<Server>(query, &user).await
}
}
impl Resolve<GetServerState, User> for State {
async fn resolve(
&self,
GetServerState { server }: GetServerState,
user: User,
) -> anyhow::Result<GetServerStateResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let status = server_status_cache()
.get(&server.id)
.await
.ok_or(anyhow!("did not find cached status for server"))?;
let response = GetServerStateResponse {
status: status.state,
};
Ok(response)
}
}
impl Resolve<GetServerActionState, User> for State {
async fn resolve(
&self,
GetServerActionState { server }: GetServerActionState,
user: User,
) -> anyhow::Result<ServerActionState> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.server
.get(&server.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
// This protects the peripheries from spam requests
const SYSTEM_INFO_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
type SystemInfoCache = Mutex<HashMap<String, Arc<(String, u128)>>>;
fn system_info_cache() -> &'static SystemInfoCache {
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
OnceLock::new();
SYSTEM_INFO_CACHE.get_or_init(Default::default)
}
impl ResolveToString<GetSystemInformation, User> for State {
async fn resolve_to_string(
&self,
GetSystemInformation { server }: GetSystemInformation,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let mut lock = system_info_cache().lock().await;
let res = match lock.get(&server.id) {
Some(cached) if cached.1 > unix_timestamp_ms() => {
cached.0.clone()
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemInformation {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
server.id,
(res.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
.into(),
);
res
}
};
Ok(res)
}
}
impl ResolveToString<GetSystemStats, User> for State {
async fn resolve_to_string(
&self,
GetSystemStats { server }: GetSystemStats,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let status =
server_status_cache().get(&server.id).await.with_context(
|| format!("did not find status for server at {}", server.id),
)?;
let stats = status
.stats
.as_ref()
.context("server stats not available")?;
let stats = serde_json::to_string(&stats)?;
Ok(stats)
}
}
// This protects the peripheries from spam requests
const PROCESSES_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
type ProcessesCache = Mutex<HashMap<String, Arc<(String, u128)>>>;
fn processes_cache() -> &'static ProcessesCache {
static PROCESSES_CACHE: OnceLock<ProcessesCache> = OnceLock::new();
PROCESSES_CACHE.get_or_init(Default::default)
}
impl ResolveToString<GetSystemProcesses, User> for State {
async fn resolve_to_string(
&self,
GetSystemProcesses { server }: GetSystemProcesses,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let mut lock = processes_cache().lock().await;
let res = match lock.get(&server.id) {
Some(cached) if cached.1 > unix_timestamp_ms() => {
cached.0.clone()
}
_ => {
let stats = periphery_client(&server)?
.request(api::stats::GetSystemProcesses {})
.await?;
let res = serde_json::to_string(&stats)?;
lock.insert(
server.id,
(res.clone(), unix_timestamp_ms() + PROCESSES_EXPIRY)
.into(),
);
res
}
};
Ok(res)
}
}
const STATS_PER_PAGE: i64 = 500;
impl Resolve<GetHistoricalServerStats, User> for State {
async fn resolve(
&self,
GetHistoricalServerStats {
server,
granularity,
page,
}: GetHistoricalServerStats,
user: User,
) -> anyhow::Result<GetHistoricalServerStatsResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let granularity =
get_timelength_in_ms(granularity.to_string().parse().unwrap())
as i64;
let mut ts_vec = Vec::<i64>::new();
let curr_ts = unix_timestamp_ms() as i64;
let mut curr_ts = curr_ts
- curr_ts % granularity
- granularity * STATS_PER_PAGE * page as i64;
for _ in 0..STATS_PER_PAGE {
ts_vec.push(curr_ts);
curr_ts -= granularity;
}
let stats = find_collect(
&db_client().await.stats,
doc! {
"sid": server.id,
"ts": { "$in": ts_vec },
},
FindOptions::builder()
.sort(doc! { "ts": -1 })
.skip(page as u64 * STATS_PER_PAGE as u64)
.limit(STATS_PER_PAGE)
.build(),
)
.await
.context("failed to pull stats from db")?;
let next_page = if stats.len() == STATS_PER_PAGE as usize {
Some(page + 1)
} else {
None
};
let res = GetHistoricalServerStatsResponse { stats, next_page };
Ok(res)
}
}
impl Resolve<GetDockerImages, User> for State {
async fn resolve(
&self,
GetDockerImages { server }: GetDockerImages,
user: User,
) -> anyhow::Result<Vec<ImageSummary>> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::build::GetImageList {})
.await
}
}
impl Resolve<GetDockerNetworks, User> for State {
async fn resolve(
&self,
GetDockerNetworks { server }: GetDockerNetworks,
user: User,
) -> anyhow::Result<Vec<DockerNetwork>> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::network::GetNetworkList {})
.await
}
}
impl Resolve<GetDockerContainers, User> for State {
async fn resolve(
&self,
GetDockerContainers { server }: GetDockerContainers,
user: User,
) -> anyhow::Result<Vec<ContainerSummary>> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(api::container::GetContainerList {})
.await
}
}
impl Resolve<GetAvailableAccounts, User> for State {
async fn resolve(
&self,
GetAvailableAccounts { server }: GetAvailableAccounts,
user: User,
) -> anyhow::Result<GetAvailableAccountsResponse> {
let (github, docker) = match server {
Some(server) => {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let GetAccountsResponse { github, docker } =
periphery_client(&server)?
.request(api::GetAccounts {})
.await
.context("failed to get accounts from periphery")?;
(github, docker)
}
None => Default::default(),
};
let mut github_set = HashSet::<String>::new();
github_set.extend(core_config().github_accounts.keys().cloned());
github_set.extend(github);
let mut github = github_set.into_iter().collect::<Vec<_>>();
github.sort();
let mut docker_set = HashSet::<String>::new();
docker_set.extend(core_config().docker_accounts.keys().cloned());
docker_set.extend(docker);
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
docker.sort();
let res = GetAvailableAccountsResponse { github, docker };
Ok(res)
}
}
impl Resolve<GetAvailableSecrets, User> for State {
async fn resolve(
&self,
GetAvailableSecrets { server }: GetAvailableSecrets,
user: User,
) -> anyhow::Result<GetAvailableSecretsResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let mut secrets = periphery_client(&server)?
.request(api::GetSecrets {})
.await
.context("failed to get accounts from periphery")?;
secrets.sort();
Ok(secrets)
}
}

View File

@@ -0,0 +1,88 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
update::ResourceTargetVariant, user::User,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
impl Resolve<GetServerTemplate, User> for State {
async fn resolve(
&self,
GetServerTemplate { server_template }: GetServerTemplate,
user: User,
) -> anyhow::Result<GetServerTemplateResponse> {
resource::get_check_permissions::<ServerTemplate>(
&server_template,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListServerTemplates, User> for State {
async fn resolve(
&self,
ListServerTemplates { query }: ListServerTemplates,
user: User,
) -> anyhow::Result<ListServerTemplatesResponse> {
resource::list_for_user::<ServerTemplate>(query, &user).await
}
}
impl Resolve<ListFullServerTemplates, User> for State {
async fn resolve(
&self,
ListFullServerTemplates { query }: ListFullServerTemplates,
user: User,
) -> anyhow::Result<ListFullServerTemplatesResponse> {
resource::list_full_for_user::<ServerTemplate>(query, &user).await
}
}
impl Resolve<GetServerTemplatesSummary, User> for State {
async fn resolve(
&self,
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = if user.admin {
None
} else {
let ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<Vec<_>>();
let query = doc! {
"_id": { "$in": ids }
};
Some(query)
};
let total = db_client()
.await
.server_templates
.count_documents(query, None)
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {
total: total as u32,
};
Ok(res)
}
}

View File

@@ -0,0 +1,139 @@
use anyhow::Context;
use monitor_client::{
api::read::*,
entities::{
permission::PermissionLevel,
sync::{
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
ResourceSyncListItem, ResourceSyncState,
},
user::User,
},
};
use resolver_api::Resolve;
use crate::{
resource,
state::{action_states, resource_sync_state_cache, State},
};
impl Resolve<GetResourceSync, User> for State {
async fn resolve(
&self,
GetResourceSync { sync }: GetResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListResourceSyncs, User> for State {
async fn resolve(
&self,
ListResourceSyncs { query }: ListResourceSyncs,
user: User,
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
resource::list_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<ListFullResourceSyncs, User> for State {
async fn resolve(
&self,
ListFullResourceSyncs { query }: ListFullResourceSyncs,
user: User,
) -> anyhow::Result<ListFullResourceSyncsResponse> {
resource::list_full_for_user::<ResourceSync>(query, &user).await
}
}
impl Resolve<GetResourceSyncActionState, User> for State {
async fn resolve(
&self,
GetResourceSyncActionState { sync }: GetResourceSyncActionState,
user: User,
) -> anyhow::Result<ResourceSyncActionState> {
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.resource_sync
.get(&sync.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetResourceSyncsSummary, User> for State {
async fn resolve(
&self,
GetResourceSyncsSummary {}: GetResourceSyncsSummary,
user: User,
) -> anyhow::Result<GetResourceSyncsSummaryResponse> {
let resource_syncs =
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
)
.await
.context("failed to get resource_syncs from db")?;
let mut res = GetResourceSyncsSummaryResponse::default();
let cache = resource_sync_state_cache();
let action_states = action_states();
for resource_sync in resource_syncs {
res.total += 1;
match resource_sync.info.pending.data {
PendingSyncUpdatesData::Ok(data) => {
if !data.no_updates() {
res.pending += 1;
continue;
}
}
PendingSyncUpdatesData::Err(_) => {
res.failed += 1;
continue;
}
}
match (
cache.get(&resource_sync.id).await.unwrap_or_default(),
action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.syncing => {
res.syncing += 1;
}
(ResourceSyncState::Ok, _) => res.ok += 1,
(ResourceSyncState::Failed, _) => res.failed += 1,
(ResourceSyncState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(ResourceSyncState::Syncing, _) => {
unreachable!()
}
(ResourceSyncState::Pending, _) => {
unreachable!()
}
}
}
Ok(res)
}
}

View File

@@ -0,0 +1,39 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
api::read::{GetTag, ListTags},
entities::{tag::Tag, user::User},
};
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
helpers::query::get_tag,
state::{db_client, State},
};
impl Resolve<GetTag, User> for State {
async fn resolve(
&self,
GetTag { tag }: GetTag,
_: User,
) -> anyhow::Result<Tag> {
get_tag(&tag).await
}
}
impl Resolve<ListTags, User> for State {
async fn resolve(
&self,
ListTags { query }: ListTags,
_: User,
) -> anyhow::Result<Vec<Tag>> {
find_collect(
&db_client().await.tags,
query,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to get tags from db")
}
}

View File

@@ -0,0 +1,789 @@
use std::collections::HashMap;
use anyhow::Context;
use monitor_client::{
api::{
execute::Execution,
read::{
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
ExportResourcesToToml, ExportResourcesToTomlResponse,
GetUserGroup, ListUserTargetPermissions,
},
},
entities::{
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
deployment::{
conversions_to_string, term_signal_labels_to_string,
Deployment, DeploymentImage,
},
environment_vars_to_string,
permission::{PermissionLevel, UserTarget},
procedure::Procedure,
repo::Repo,
resource::{Resource, ResourceQuery},
server::Server,
server_template::ServerTemplate,
sync::ResourceSync,
toml::{
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
},
update::ResourceTarget,
user::User,
},
};
use mungos::find::find_collect;
use ordered_hash_map::OrderedHashMap;
use partial_derive2::PartialDiff;
use resolver_api::Resolve;
use serde_json::Value;
use crate::{
helpers::query::get_user_user_group_ids,
resource::{self, MonitorResource},
state::{db_client, State},
};
impl Resolve<ExportAllResourcesToToml, User> for State {
async fn resolve(
&self,
ExportAllResourcesToToml { tags }: ExportAllResourcesToToml,
user: User,
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
let mut targets = Vec::<ResourceTarget>::new();
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
&user,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
let user_groups = if user.admin {
find_collect(&db_client().await.user_groups, None, None)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|user_group| user_group.id)
.collect()
} else {
get_user_user_group_ids(&user.id).await?
};
self
.resolve(
ExportResourcesToToml {
targets,
user_groups,
include_variables: true,
},
user,
)
.await
}
}
impl Resolve<ExportResourcesToToml, User> for State {
async fn resolve(
&self,
ExportResourcesToToml {
targets,
user_groups,
include_variables,
}: ExportResourcesToToml,
user: User,
) -> anyhow::Result<ExportResourcesToTomlResponse> {
let mut res = ResourcesToml::default();
let names = ResourceNames::new()
.await
.context("failed to init resource name maps")?;
for target in targets {
match target {
ResourceTarget::Alerter(id) => {
let alerter = resource::get_check_permissions::<Alerter>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
res
.alerters
.push(convert_resource::<Alerter>(alerter, &names.tags))
}
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
res
.resource_syncs
.push(convert_resource::<ResourceSync>(sync, &names.tags))
}
ResourceTarget::ServerTemplate(id) => {
let template = resource::get_check_permissions::<
ServerTemplate,
>(
&id, &user, PermissionLevel::Read
)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(template, &names.tags),
)
}
ResourceTarget::Server(id) => {
let server = resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
res
.servers
.push(convert_resource::<Server>(server, &names.tags))
}
ResourceTarget::Builder(id) => {
let mut builder =
resource::get_check_permissions::<Builder>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
// replace server id of builder
if let BuilderConfig::Server(config) = &mut builder.config {
config.server_id.clone_from(
names.servers.get(&id).unwrap_or(&String::new()),
)
}
res
.builders
.push(convert_resource::<Builder>(builder, &names.tags))
}
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
// replace builder id of build
build.config.builder_id.clone_from(
names
.builders
.get(&build.config.builder_id)
.unwrap_or(&String::new()),
);
res
.builds
.push(convert_resource::<Build>(build, &names.tags))
}
ResourceTarget::Deployment(id) => {
let mut deployment = resource::get_check_permissions::<
Deployment,
>(
&id, &user, PermissionLevel::Read
)
.await?;
// replace deployment server with name
deployment.config.server_id.clone_from(
names
.servers
.get(&deployment.config.server_id)
.unwrap_or(&String::new()),
);
// replace deployment build id with name
if let DeploymentImage::Build { build_id, .. } =
&mut deployment.config.image
{
build_id.clone_from(
names.builds.get(build_id).unwrap_or(&String::new()),
);
}
res.deployments.push(convert_resource::<Deployment>(
deployment,
&names.tags,
))
}
ResourceTarget::Repo(id) => {
let mut repo = resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
// replace repo server with name
repo.config.server_id.clone_from(
names
.servers
.get(&repo.config.server_id)
.unwrap_or(&String::new()),
);
res.repos.push(convert_resource::<Repo>(repo, &names.tags))
}
ResourceTarget::Procedure(id) => {
add_procedure(&id, &mut res, &user, &names)
.await
.with_context(|| {
format!("failed to add procedure {id}")
})?;
}
ResourceTarget::System(_) => continue,
};
}
add_user_groups(user_groups, &mut res, &user)
.await
.context("failed to add user groups")?;
if include_variables {
res.variables =
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to get variables from db")?;
}
let toml = serialize_resources_toml(&res)
.context("failed to serialize resources to toml")?;
Ok(ExportResourcesToTomlResponse { toml })
}
}
async fn add_procedure(
id: &str,
res: &mut ResourcesToml,
user: &User,
names: &ResourceNames,
) -> anyhow::Result<()> {
let mut procedure = resource::get_check_permissions::<Procedure>(
id,
user,
PermissionLevel::Read,
)
.await?;
for stage in &mut procedure.config.stages {
for execution in &mut stage.executions {
match &mut execution.execution {
Execution::RunProcedure(exec) => exec.procedure.clone_from(
names
.procedures
.get(&exec.procedure)
.unwrap_or(&String::new()),
),
Execution::RunBuild(exec) => exec.build.clone_from(
names.builds.get(&exec.build).unwrap_or(&String::new()),
),
Execution::Deploy(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::StartContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::StopContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::RemoveContainer(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::CloneRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::PullRepo(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::StopAllContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneNetworks(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneImages(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::RunSync(exec) => exec.sync.clone_from(
names.syncs.get(&exec.sync).unwrap_or(&String::new()),
),
Execution::Sleep(_) | Execution::None(_) => {}
}
}
}
res
.procedures
.push(convert_resource::<Procedure>(procedure, &names.tags));
Ok(())
}
struct ResourceNames {
tags: HashMap<String, String>,
servers: HashMap<String, String>,
builders: HashMap<String, String>,
builds: HashMap<String, String>,
repos: HashMap<String, String>,
deployments: HashMap<String, String>,
procedures: HashMap<String, String>,
syncs: HashMap<String, String>,
}
impl ResourceNames {
async fn new() -> anyhow::Result<ResourceNames> {
let db = db_client().await;
Ok(ResourceNames {
tags: find_collect(&db.tags, None, None)
.await
.context("failed to get all tags")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
servers: find_collect(&db.servers, None, None)
.await
.context("failed to get all servers")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
builders: find_collect(&db.builders, None, None)
.await
.context("failed to get all builders")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
builds: find_collect(&db.builds, None, None)
.await
.context("failed to get all builds")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
repos: find_collect(&db.repos, None, None)
.await
.context("failed to get all repos")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
deployments: find_collect(&db.deployments, None, None)
.await
.context("failed to get all deployments")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
procedures: find_collect(&db.procedures, None, None)
.await
.context("failed to get all procedures")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
syncs: find_collect(&db.resource_syncs, None, None)
.await
.context("failed to get all resource syncs")?
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>(),
})
}
}
async fn add_user_groups(
user_groups: Vec<String>,
res: &mut ResourcesToml,
user: &User,
) -> anyhow::Result<()> {
let db = db_client().await;
let usernames = find_collect(&db.users, None, None)
.await?
.into_iter()
.map(|user| (user.id, user.username))
.collect::<HashMap<_, _>>();
for user_group in user_groups {
let ug = State
.resolve(GetUserGroup { user_group }, user.clone())
.await?;
// this method is admin only, but we already know user can see user group if above does not return Err
let permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(ug.id),
},
User {
admin: true,
..Default::default()
},
)
.await?
.into_iter()
.map(|permission| PermissionToml {
target: permission.resource_target,
level: permission.level,
})
.collect();
res.user_groups.push(UserGroupToml {
name: ug.name,
users: ug
.users
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect(),
permissions,
});
}
Ok(())
}
fn convert_resource<R: MonitorResource>(
resource: Resource<R::Config, R::Info>,
tag_names: &HashMap<String, String>,
) -> ResourceToml<R::PartialConfig> {
// This makes sure all non-necessary (defaulted) fields don't make it into final toml
let partial: R::PartialConfig = resource.config.into();
let config = R::Config::default().minimize_partial(partial);
ResourceToml {
name: resource.name,
tags: resource
.tags
.iter()
.filter_map(|t| tag_names.get(t).cloned())
.collect(),
description: resource.description,
deploy: false,
after: Default::default(),
config,
}
}
fn serialize_resources_toml(
resources: &ResourcesToml,
) -> anyhow::Result<String> {
let mut res = String::new();
let options = toml_pretty::Options::default()
.tab(" ")
.skip_empty_string(true)
.max_inline_array_length(30);
for server in &resources.servers {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[server]]\n");
res.push_str(
&toml_pretty::to_string(&server, options)
.context("failed to serialize servers to toml")?,
);
}
for deployment in &resources.deployments {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[deployment]]\n");
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&deployment)?)?;
let config = parsed
.get_mut("config")
.context("deployment has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(DeploymentImage::Build { version, .. }) =
&deployment.config.image
{
let image = config
.get_mut("image")
.context("deployment has no image")?
.get_mut("params")
.context("deployment image has no params")?
.as_object_mut()
.context("deployment image params is not object")?;
if version.is_none() {
image.remove("version");
} else {
image.insert(
"version".to_string(),
Value::String(version.to_string()),
);
}
}
if let Some(term_signal_labels) =
&deployment.config.term_signal_labels
{
config.insert(
"term_signal_labels".to_string(),
Value::String(term_signal_labels_to_string(
term_signal_labels,
)),
);
}
if let Some(ports) = &deployment.config.ports {
config.insert(
"ports".to_string(),
Value::String(conversions_to_string(ports)),
);
}
if let Some(volumes) = &deployment.config.volumes {
config.insert(
"volumes".to_string(),
Value::String(conversions_to_string(volumes)),
);
}
if let Some(environment) = &deployment.config.environment {
config.insert(
"environment".to_string(),
Value::String(environment_vars_to_string(environment)),
);
}
if let Some(labels) = &deployment.config.labels {
config.insert(
"labels".to_string(),
Value::String(environment_vars_to_string(labels)),
);
}
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize deployments to toml")?,
);
}
for build in &resources.builds {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&build)?)?;
let config = parsed
.get_mut("config")
.context("build has no config?")?
.as_object_mut()
.context("config is not object?")?;
if let Some(version) = &build.config.version {
config.insert(
"version".to_string(),
Value::String(version.to_string()),
);
}
if let Some(build_args) = &build.config.build_args {
config.insert(
"build_args".to_string(),
Value::String(environment_vars_to_string(build_args)),
);
}
if let Some(labels) = &build.config.labels {
config.insert(
"labels".to_string(),
Value::String(environment_vars_to_string(labels)),
);
}
res.push_str("[[build]]\n");
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize builds to toml")?,
);
}
for repo in &resources.repos {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[repo]]\n");
res.push_str(
&toml_pretty::to_string(&repo, options)
.context("failed to serialize repos to toml")?,
);
}
for procedure in &resources.procedures {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
let mut parsed: OrderedHashMap<String, Value> =
serde_json::from_str(&serde_json::to_string(&procedure)?)?;
let config = parsed
.get_mut("config")
.context("procedure has no config?")?
.as_object_mut()
.context("config is not object?")?;
let stages = config
.remove("stages")
.context("procedure config has no stages")?;
let stages = stages.as_array().context("stages is not array")?;
res.push_str("[[procedure]]\n");
res.push_str(
&toml_pretty::to_string(&parsed, options)
.context("failed to serialize procedures to toml")?,
);
for stage in stages {
res.push_str("\n\n[[procedure.config.stage]]\n");
res.push_str(
&toml_pretty::to_string(stage, options)
.context("failed to serialize procedures to toml")?,
);
}
}
for alerter in &resources.alerters {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[alerter]]\n");
res.push_str(
&toml_pretty::to_string(&alerter, options)
.context("failed to serialize alerters to toml")?,
);
}
for builder in &resources.builders {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[builder]]\n");
res.push_str(
&toml_pretty::to_string(&builder, options)
.context("failed to serialize builders to toml")?,
);
}
for server_template in &resources.server_templates {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[server_template]]\n");
res.push_str(
&toml_pretty::to_string(&server_template, options)
.context("failed to serialize server_templates to toml")?,
);
}
for resource_sync in &resources.resource_syncs {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[resource_sync]]\n");
res.push_str(
&toml_pretty::to_string(&resource_sync, options)
.context("failed to serialize resource_syncs to toml")?,
);
}
for variable in &resources.variables {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[variable]]\n");
res.push_str(
&toml_pretty::to_string(&variable, options)
.context("failed to serialize variables to toml")?,
);
}
for user_group in &resources.user_groups {
if !res.is_empty() {
res.push_str("\n\n##\n\n");
}
res.push_str("[[user_group]]\n");
res.push_str(
&toml_pretty::to_string(&user_group, options)
.context("failed to serialize user_groups to toml")?,
);
}
Ok(res)
}

View File

@@ -0,0 +1,253 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
sync::ResourceSync,
update::{
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
},
user::User,
},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_resource_ids_for_non_admin,
resource,
state::{db_client, State},
};
const UPDATES_PER_PAGE: i64 = 100;
impl Resolve<ListUpdates, User> for State {
async fn resolve(
&self,
ListUpdates { query, page }: ListUpdates,
user: User,
) -> anyhow::Result<ListUpdatesResponse> {
let query = if user.admin || core_config().transparent_mode {
query
} else {
let server_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Server,
)
.await?;
let deployment_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Deployment,
)
.await?;
let build_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Build,
)
.await?;
let repo_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Repo,
)
.await?;
let procedure_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Procedure,
)
.await?;
let builder_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Builder,
)
.await?;
let alerter_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::Alerter,
)
.await?;
let server_template_ids = get_resource_ids_for_non_admin(
&user.id,
ResourceTargetVariant::ServerTemplate,
)
.await?;
let mut query = query.unwrap_or_default();
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
{ "target.type": "ServerTemplate", "target.id": { "$in": &server_template_ids } },
]
});
query.into()
};
let usernames =
find_collect(&db_client().await.users, None, None)
.await
.context("failed to pull users from db")?
.into_iter()
.map(|u| (u.id, u.username))
.collect::<HashMap<_, _>>();
let updates = find_collect(
&db_client().await.updates,
query,
FindOptions::builder()
.sort(doc! { "start_ts": -1 })
.skip(page as u64 * UPDATES_PER_PAGE as u64)
.limit(UPDATES_PER_PAGE)
.build(),
)
.await
.context("failed to pull updates from db")?
.into_iter()
.map(|u| {
let username = if User::is_service_user(&u.operator) {
u.operator.clone()
} else {
usernames
.get(&u.operator)
.cloned()
.unwrap_or("unknown".to_string())
};
UpdateListItem {
username,
id: u.id,
operation: u.operation,
start_ts: u.start_ts,
success: u.success,
operator: u.operator,
target: u.target,
status: u.status,
version: u.version,
other_data: u.other_data,
}
})
.collect::<Vec<_>>();
let next_page = if updates.len() == UPDATES_PER_PAGE as usize {
Some(page + 1)
} else {
None
};
Ok(ListUpdatesResponse { updates, next_page })
}
}
impl Resolve<GetUpdate, User> for State {
async fn resolve(
&self,
GetUpdate { id }: GetUpdate,
user: User,
) -> anyhow::Result<Update> {
let update = find_one_by_id(&db_client().await.updates, &id)
.await
.context("failed to query to db")?
.context("no update exists with given id")?;
if user.admin || core_config().transparent_mode {
return Ok(update);
}
match &update.target {
ResourceTarget::System(_) => {
return Err(anyhow!(
"user must be admin to view system updates"
))
}
ResourceTarget::Server(id) => {
resource::get_check_permissions::<Server>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Deployment(id) => {
resource::get_check_permissions::<Deployment>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Repo(id) => {
resource::get_check_permissions::<Repo>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Builder(id) => {
resource::get_check_permissions::<Builder>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Alerter(id) => {
resource::get_check_permissions::<Alerter>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Procedure(id) => {
resource::get_check_permissions::<Procedure>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
}
Ok(update)
}
}

View File

@@ -0,0 +1,118 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::read::{
GetUsername, GetUsernameResponse, ListApiKeys,
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
ListApiKeysResponse, ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<GetUsername, User> for State {
async fn resolve(
&self,
GetUsername { user_id }: GetUsername,
_: User,
) -> anyhow::Result<GetUsernameResponse> {
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed at mongo query for user")?
.context("no user found with id")?;
let avatar = match user.config {
UserConfig::Github { avatar, .. } => Some(avatar),
UserConfig::Google { avatar, .. } => Some(avatar),
_ => None,
};
Ok(GetUsernameResponse {
username: user.username,
avatar,
})
}
}
impl Resolve<ListUsers, User> for State {
async fn resolve(
&self,
ListUsers {}: ListUsers,
user: User,
) -> anyhow::Result<ListUsersResponse> {
if !user.admin {
return Err(anyhow!("this route is only accessable by admins"));
}
let mut users = find_collect(
&db_client().await.users,
None,
FindOptions::builder().sort(doc! { "username": 1 }).build(),
)
.await
.context("failed to pull users from db")?;
users.iter_mut().for_each(|user| user.sanitize());
Ok(users)
}
}
impl Resolve<ListApiKeys, User> for State {
async fn resolve(
&self,
ListApiKeys {}: ListApiKeys,
user: User,
) -> anyhow::Result<ListApiKeysResponse> {
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": &user.id },
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for api keys")?
.into_iter()
.map(|mut api_keys| {
api_keys.sanitize();
api_keys
})
.collect();
Ok(api_keys)
}
}
impl Resolve<ListApiKeysForServiceUser, User> for State {
async fn resolve(
&self,
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
admin: User,
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only."));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for users")?
.context("user at id not found")?;
let UserConfig::Service { .. } = user.config else {
return Err(anyhow!("Given user is not service user"));
};
let api_keys = find_collect(
&db_client().await.api_keys,
doc! { "user_id": user_id },
None,
)
.await
.context("failed to query db for api keys")?
.into_iter()
.map(|mut api_keys| {
api_keys.sanitize();
api_keys
})
.collect();
Ok(api_keys)
}
}

View File

@@ -0,0 +1,65 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
api::read::{
GetUserGroup, GetUserGroupResponse, ListUserGroups,
ListUserGroupsResponse,
},
entities::user::User,
};
use mungos::{
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
options::FindOptions,
},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<GetUserGroup, User> for State {
async fn resolve(
&self,
GetUserGroup { user_group }: GetUserGroup,
user: User,
) -> anyhow::Result<GetUserGroupResponse> {
let mut filter = match ObjectId::from_str(&user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &user_group },
};
// Don't allow non admin users to get UserGroups they aren't a part of.
if !user.admin {
// Filter for only UserGroups which contain the users id
filter.insert("users", &user.id);
}
db_client()
.await
.user_groups
.find_one(filter, None)
.await
.context("failed to query db for user groups")?
.context("no UserGroup found with given name or id")
}
}
impl Resolve<ListUserGroups, User> for State {
async fn resolve(
&self,
ListUserGroups {}: ListUserGroups,
user: User,
) -> anyhow::Result<ListUserGroupsResponse> {
let mut filter = Document::new();
if !user.admin {
filter.insert("users", &user.id);
}
find_collect(
&db_client().await.user_groups,
filter,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for UserGroups")
}
}

View File

@@ -0,0 +1,47 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
api::read::{
GetVariable, GetVariableResponse, ListVariables,
ListVariablesResponse,
},
entities::user::User,
};
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_variable,
state::{db_client, State},
};
impl Resolve<GetVariable, User> for State {
async fn resolve(
&self,
GetVariable { name }: GetVariable,
_: User,
) -> anyhow::Result<GetVariableResponse> {
get_variable(&name).await
}
}
impl Resolve<ListVariables, User> for State {
async fn resolve(
&self,
ListVariables {}: ListVariables,
_: User,
) -> anyhow::Result<ListVariablesResponse> {
let variables = find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")?;
Ok(ListVariablesResponse {
variables,
secrets: core_config().secrets.keys().cloned().collect(),
})
}
}

225
bin/core/src/api/user.rs Normal file
View File

@@ -0,0 +1,225 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use mongo_indexed::doc;
use monitor_client::{
api::user::{
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
DeleteApiKeyResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::{auth_request, random_string},
helpers::query::get_user,
state::{db_client, State},
};
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
enum UserRequest {
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/user request {req_id} | user: {} ({})",
user.username, user.id
);
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/user request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/user request {req_id} | resolve time: {elapsed:?}");
Ok((TypedHeader(ContentType::json()), res?))
}
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<PushRecentlyViewed, User> for State {
#[instrument(
name = "PushRecentlyViewed",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
PushRecentlyViewed { resource }: PushRecentlyViewed,
user: User,
) -> anyhow::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (recents, id, field) = match resource {
ResourceTarget::Server(id) => {
(user.recent_servers, id, "recent_servers")
}
ResourceTarget::Deployment(id) => {
(user.recent_deployments, id, "recent_deployments")
}
ResourceTarget::Build(id) => {
(user.recent_builds, id, "recent_builds")
}
ResourceTarget::Repo(id) => {
(user.recent_repos, id, "recent_repos")
}
ResourceTarget::Procedure(id) => {
(user.recent_procedures, id, "recent_procedures")
}
_ => return Ok(PushRecentlyViewedResponse {}),
};
let mut recents = recents
.into_iter()
.filter(|_id| !id.eq(_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
let update = doc! { field: to_bson(&recents)? };
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| format!("failed to update {field}"))?;
Ok(PushRecentlyViewedResponse {})
}
}
impl Resolve<SetLastSeenUpdate, User> for State {
#[instrument(
name = "SetLastSeenUpdate",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
SetLastSeenUpdate {}: SetLastSeenUpdate,
user: User,
) -> anyhow::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": monitor_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateApiKey, User> for State {
#[instrument(
name = "CreateApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
CreateApiKey { name, expires }: CreateApiKey,
user: User,
) -> anyhow::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: monitor_timestamp(),
expires,
};
db_client()
.await
.api_keys
.insert_one(api_key, None)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
impl Resolve<DeleteApiKey, User> for State {
#[instrument(
name = "DeleteApiKey",
level = "debug",
skip(self, user)
)]
async fn resolve(
&self,
DeleteApiKey { key }: DeleteApiKey,
user: User,
) -> anyhow::Result<DeleteApiKeyResponse> {
let client = db_client().await;
let key = client
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user"));
}
client
.api_keys
.delete_one(doc! { "key": key.key }, None)
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}

View File

@@ -0,0 +1,61 @@
use monitor_client::{
api::write::{
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
},
entities::{
alerter::Alerter, permission::PermissionLevel, user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateAlerter, User> for State {
#[instrument(name = "CreateAlerter", skip(self, user))]
async fn resolve(
&self,
CreateAlerter { name, config }: CreateAlerter,
user: User,
) -> anyhow::Result<Alerter> {
resource::create::<Alerter>(&name, config, &user).await
}
}
impl Resolve<CopyAlerter, User> for State {
#[instrument(name = "CopyAlerter", skip(self, user))]
async fn resolve(
&self,
CopyAlerter { name, id }: CopyAlerter,
user: User,
) -> anyhow::Result<Alerter> {
let Alerter { config, .. } = resource::get_check_permissions::<
Alerter,
>(
&id, &user, PermissionLevel::Write
)
.await?;
resource::create::<Alerter>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteAlerter, User> for State {
#[instrument(name = "DeleteAlerter", skip(self, user))]
async fn resolve(
&self,
DeleteAlerter { id }: DeleteAlerter,
user: User,
) -> anyhow::Result<Alerter> {
resource::delete::<Alerter>(&id, &user).await
}
}
impl Resolve<UpdateAlerter, User> for State {
#[instrument(name = "UpdateAlerter", skip(self, user))]
async fn resolve(
&self,
UpdateAlerter { id, config }: UpdateAlerter,
user: User,
) -> anyhow::Result<Alerter> {
resource::update::<Alerter>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,58 @@
use monitor_client::{
api::write::*,
entities::{build::Build, permission::PermissionLevel, user::User},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateBuild, User> for State {
#[instrument(name = "CreateBuild", skip(self, user))]
async fn resolve(
&self,
CreateBuild { name, config }: CreateBuild,
user: User,
) -> anyhow::Result<Build> {
resource::create::<Build>(&name, config, &user).await
}
}
impl Resolve<CopyBuild, User> for State {
#[instrument(name = "CopyBuild", skip(self, user))]
async fn resolve(
&self,
CopyBuild { name, id }: CopyBuild,
user: User,
) -> anyhow::Result<Build> {
let Build { config, .. } =
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Build>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteBuild, User> for State {
#[instrument(name = "DeleteBuild", skip(self, user))]
async fn resolve(
&self,
DeleteBuild { id }: DeleteBuild,
user: User,
) -> anyhow::Result<Build> {
resource::delete::<Build>(&id, &user).await
}
}
impl Resolve<UpdateBuild, User> for State {
#[instrument(name = "UpdateBuild", skip(self, user))]
async fn resolve(
&self,
UpdateBuild { id, config }: UpdateBuild,
user: User,
) -> anyhow::Result<Build> {
resource::update::<Build>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,59 @@
use monitor_client::{
api::write::*,
entities::{
builder::Builder, permission::PermissionLevel, user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateBuilder, User> for State {
#[instrument(name = "CreateBuilder", skip(self, user))]
async fn resolve(
&self,
CreateBuilder { name, config }: CreateBuilder,
user: User,
) -> anyhow::Result<Builder> {
resource::create::<Builder>(&name, config, &user).await
}
}
impl Resolve<CopyBuilder, User> for State {
#[instrument(name = "CopyBuilder", skip(self, user))]
async fn resolve(
&self,
CopyBuilder { name, id }: CopyBuilder,
user: User,
) -> anyhow::Result<Builder> {
let Builder { config, .. } = resource::get_check_permissions::<
Builder,
>(
&id, &user, PermissionLevel::Write
)
.await?;
resource::create::<Builder>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteBuilder, User> for State {
#[instrument(name = "DeleteBuilder", skip(self, user))]
async fn resolve(
&self,
DeleteBuilder { id }: DeleteBuilder,
user: User,
) -> anyhow::Result<Builder> {
resource::delete::<Builder>(&id, &user).await
}
}
impl Resolve<UpdateBuilder, User> for State {
#[instrument(name = "UpdateBuilder", skip(self, user))]
async fn resolve(
&self,
UpdateBuilder { id, config }: UpdateBuilder,
user: User,
) -> anyhow::Result<Builder> {
resource::update::<Builder>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,155 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::*,
entities::{
deployment::{Deployment, DeploymentState},
monitor_timestamp,
permission::PermissionLevel,
server::Server,
to_monitor_name,
update::Update,
user::User,
Operation,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client,
query::get_deployment_state,
update::{add_update, make_update},
},
resource,
state::{action_states, db_client, State},
};
impl Resolve<CreateDeployment, User> for State {
#[instrument(name = "CreateDeployment", skip(self, user))]
async fn resolve(
&self,
CreateDeployment { name, config }: CreateDeployment,
user: User,
) -> anyhow::Result<Deployment> {
resource::create::<Deployment>(&name, config, &user).await
}
}
impl Resolve<CopyDeployment, User> for State {
#[instrument(name = "CopyDeployment", skip(self, user))]
async fn resolve(
&self,
CopyDeployment { name, id }: CopyDeployment,
user: User,
) -> anyhow::Result<Deployment> {
let Deployment { config, .. } =
resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Deployment>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteDeployment, User> for State {
#[instrument(name = "DeleteDeployment", skip(self, user))]
async fn resolve(
&self,
DeleteDeployment { id }: DeleteDeployment,
user: User,
) -> anyhow::Result<Deployment> {
resource::delete::<Deployment>(&id, &user).await
}
}
impl Resolve<UpdateDeployment, User> for State {
#[instrument(name = "UpdateDeployment", skip(self, user))]
async fn resolve(
&self,
UpdateDeployment { id, config }: UpdateDeployment,
user: User,
) -> anyhow::Result<Deployment> {
resource::update::<Deployment>(&id, config, &user).await
}
}
impl Resolve<RenameDeployment, User> for State {
#[instrument(name = "RenameDeployment", skip(self, user))]
async fn resolve(
&self,
RenameDeployment { id, name }: RenameDeployment,
user: User,
) -> anyhow::Result<Update> {
let deployment = resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.deployment
.get_or_insert_default(&deployment.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_monitor_name(&name);
let container_state = get_deployment_state(&deployment).await?;
if container_state == DeploymentState::Unknown {
return Err(anyhow!(
"cannot rename deployment when container status is unknown"
));
}
let mut update =
make_update(&deployment, Operation::RenameDeployment, &user);
update_one_by_id(
&db_client().await.deployments,
&deployment.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": monitor_timestamp() },
),
None,
)
.await
.context("failed to update deployment name on db")?;
if container_state != DeploymentState::NotDeployed {
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
let log = periphery_client(&server)?
.request(api::container::RenameContainer {
curr_name: deployment.name.clone(),
new_name: name.clone(),
})
.await
.context("failed to rename container on server")?;
update.logs.push(log);
}
update.push_simple_log(
"rename deployment",
format!(
"renamed deployment from {} to {}",
deployment.name, name
),
);
update.finalize();
add_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -0,0 +1,106 @@
use anyhow::anyhow;
use monitor_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate,
sync::ResourceSync, update::ResourceTarget, user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<UpdateDescription, User> for State {
#[instrument(name = "UpdateDescription", skip(self, user))]
async fn resolve(
&self,
UpdateDescription {
target,
description,
}: UpdateDescription,
user: User,
) -> anyhow::Result<UpdateDescriptionResponse> {
match target {
ResourceTarget::System(_) => {
return Err(anyhow!(
"cannot update description of System resource target"
))
}
ResourceTarget::Server(id) => {
resource::update_description::<Server>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Deployment(id) => {
resource::update_description::<Deployment>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Build(id) => {
resource::update_description::<Build>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Repo(id) => {
resource::update_description::<Repo>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Builder(id) => {
resource::update_description::<Builder>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Alerter(id) => {
resource::update_description::<Alerter>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::Procedure(id) => {
resource::update_description::<Procedure>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::update_description::<ServerTemplate>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_description::<ResourceSync>(
&id,
&description,
&user,
)
.await?;
}
}
Ok(UpdateDescriptionResponse {})
}
}

View File

@@ -0,0 +1,181 @@
use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::write::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{auth::auth_request, state::State};
mod alerter;
mod build;
mod builder;
mod deployment;
mod description;
mod permissions;
mod procedure;
mod repo;
mod server;
mod server_template;
mod service_user;
mod sync;
mod tag;
mod user_group;
mod variable;
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
#[resolver_target(State)]
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
pub enum WriteRequest {
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER GROUP ====
CreateUserGroup(CreateUserGroup),
RenameUserGroup(RenameUserGroup),
DeleteUserGroup(DeleteUserGroup),
AddUserToUserGroup(AddUserToUserGroup),
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
SetUsersInUserGroup(SetUsersInUserGroup),
// ==== PERMISSIONS ====
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
UpdateDescription(UpdateDescription),
// ==== SERVER ====
CreateServer(CreateServer),
DeleteServer(DeleteServer),
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
DeleteNetwork(DeleteNetwork),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
CopyDeployment(CopyDeployment),
DeleteDeployment(DeleteDeployment),
UpdateDeployment(UpdateDeployment),
RenameDeployment(RenameDeployment),
// ==== BUILD ====
CreateBuild(CreateBuild),
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
CopyBuilder(CopyBuilder),
DeleteBuilder(DeleteBuilder),
UpdateBuilder(UpdateBuilder),
// ==== SERVER TEMPLATE ====
CreateServerTemplate(CreateServerTemplate),
CopyServerTemplate(CopyServerTemplate),
DeleteServerTemplate(DeleteServerTemplate),
UpdateServerTemplate(UpdateServerTemplate),
// ==== REPO ====
CreateRepo(CreateRepo),
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
CopyAlerter(CopyAlerter),
DeleteAlerter(DeleteAlerter),
UpdateAlerter(UpdateAlerter),
// ==== PROCEDURE ====
CreateProcedure(CreateProcedure),
CopyProcedure(CopyProcedure),
DeleteProcedure(DeleteProcedure),
UpdateProcedure(UpdateProcedure),
// ==== SYNC ====
CreateResourceSync(CreateResourceSync),
CopyResourceSync(CopyResourceSync),
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
RenameTag(RenameTag),
UpdateTagsOnResource(UpdateTagsOnResource),
// ==== VARIABLE ====
CreateVariable(CreateVariable),
UpdateVariableValue(UpdateVariableValue),
UpdateVariableDescription(UpdateVariableDescription),
DeleteVariable(DeleteVariable),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.layer(middleware::from_fn(auth_request))
}
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<WriteRequest>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request, user))
.await
.context("failure in spawned task");
if let Err(e) = &res {
warn!("/write request {req_id} spawn error: {e:#}");
}
Ok((TypedHeader(ContentType::json()), res??))
}
#[instrument(name = "WriteRequest", skip(user), fields(user_id = user.id))]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> anyhow::Result<String> {
info!("/write request | user: {}", user.username);
let timer = Instant::now();
let res =
State
.resolve_request(request, user)
.await
.map_err(|e| match e {
resolver_api::Error::Serialization(e) => {
anyhow!("{e:?}").context("response serialization error")
}
resolver_api::Error::Inner(e) => e,
});
if let Err(e) = &res {
warn!("/write request {req_id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res
}

View File

@@ -0,0 +1,323 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
},
entities::{
permission::{UserTarget, UserTargetVariant},
update::{ResourceTarget, ResourceTargetVariant},
user::User,
},
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{doc, oid::ObjectId, Document},
options::UpdateOptions,
},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_user,
state::{db_client, State},
};
impl Resolve<UpdateUserBasePermissions, User> for State {
#[instrument(name = "UpdateUserBasePermissions", skip(self, admin))]
async fn resolve(
&self,
UpdateUserBasePermissions {
user_id,
enabled,
create_servers,
create_builds,
}: UpdateUserBasePermissions,
admin: User,
) -> anyhow::Result<UpdateUserBasePermissionsResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
let user = find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query mongo for user")?
.context("did not find user with given id")?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
let mut update_doc = Document::new();
if let Some(enabled) = enabled {
update_doc.insert("enabled", enabled);
}
if let Some(create_servers) = create_servers {
update_doc.insert("create_server_permissions", create_servers);
}
if let Some(create_builds) = create_builds {
update_doc.insert("create_build_permissions", create_builds);
}
update_one_by_id(
&db_client().await.users,
&user_id,
mungos::update::Update::Set(update_doc),
None,
)
.await?;
Ok(UpdateUserBasePermissionsResponse {})
}
}
impl Resolve<UpdatePermissionOnTarget, User> for State {
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
async fn resolve(
&self,
UpdatePermissionOnTarget {
user_target,
resource_target,
permission,
}: UpdatePermissionOnTarget,
admin: User,
) -> anyhow::Result<UpdatePermissionOnTargetResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only"));
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if user.admin {
return Err(anyhow!(
"cannot use this method to update other admins permissions"
));
}
if !user.enabled {
return Err(anyhow!("user not enabled"));
}
}
let (user_target_variant, user_target_id) =
extract_user_target_with_validation(&user_target).await?;
let (resource_variant, resource_id) =
extract_resource_target_with_validation(&resource_target)
.await?;
let (user_target_variant, resource_variant) =
(user_target_variant.as_ref(), resource_variant.as_ref());
db_client()
.await
.permissions
.update_one(
doc! {
"user_target.type": user_target_variant,
"user_target.id": &user_target_id,
"resource_target.type": resource_variant,
"resource_target.id": &resource_id
},
doc! {
"$set": {
"user_target.type": user_target_variant,
"user_target.id": user_target_id,
"resource_target.type": resource_variant,
"resource_target.id": resource_id,
"level": permission.as_ref(),
}
},
UpdateOptions::builder().upsert(true).build(),
)
.await?;
Ok(UpdatePermissionOnTargetResponse {})
}
}
/// checks if inner id is actually a `name`, and replaces it with id if so.
async fn extract_user_target_with_validation(
user_target: &UserTarget,
) -> anyhow::Result<(UserTargetVariant, String)> {
match user_target {
UserTarget::User(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": ident },
};
let id = db_client()
.await
.users
.find_one(filter, None)
.await
.context("failed to query db for users")?
.context("no matching user found")?
.id;
Ok((UserTargetVariant::User, id))
}
UserTarget::UserGroup(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.user_groups
.find_one(filter, None)
.await
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
.id;
Ok((UserTargetVariant::UserGroup, id))
}
}
}
/// checks if inner id is actually a `name`, and replaces it with id if so.
async fn extract_resource_target_with_validation(
resource_target: &ResourceTarget,
) -> anyhow::Result<(ResourceTargetVariant, String)> {
match resource_target {
ResourceTarget::System(_) => {
let res = resource_target.extract_variant_id();
Ok((res.0, res.1.clone()))
}
ResourceTarget::Build(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.builds
.find_one(filter, None)
.await
.context("failed to query db for builds")?
.context("no matching build found")?
.id;
Ok((ResourceTargetVariant::Build, id))
}
ResourceTarget::Builder(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.builders
.find_one(filter, None)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
.id;
Ok((ResourceTargetVariant::Builder, id))
}
ResourceTarget::Deployment(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.deployments
.find_one(filter, None)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
.id;
Ok((ResourceTargetVariant::Deployment, id))
}
ResourceTarget::Server(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.servers
.find_one(filter, None)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
.id;
Ok((ResourceTargetVariant::Server, id))
}
ResourceTarget::Repo(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.repos
.find_one(filter, None)
.await
.context("failed to query db for repos")?
.context("no matching repo found")?
.id;
Ok((ResourceTargetVariant::Repo, id))
}
ResourceTarget::Alerter(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.alerters
.find_one(filter, None)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
.id;
Ok((ResourceTargetVariant::Alerter, id))
}
ResourceTarget::Procedure(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.procedures
.find_one(filter, None)
.await
.context("failed to query db for procedures")?
.context("no matching procedure found")?
.id;
Ok((ResourceTargetVariant::Procedure, id))
}
ResourceTarget::ServerTemplate(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.server_templates
.find_one(filter, None)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
.id;
Ok((ResourceTargetVariant::ServerTemplate, id))
}
ResourceTarget::ResourceSync(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.await
.resource_syncs
.find_one(filter, None)
.await
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?
.id;
Ok((ResourceTargetVariant::ResourceSync, id))
}
}
}

View File

@@ -0,0 +1,60 @@
use monitor_client::{
api::write::*,
entities::{
permission::PermissionLevel, procedure::Procedure, user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateProcedure, User> for State {
#[instrument(name = "CreateProcedure", skip(self, user))]
async fn resolve(
&self,
CreateProcedure { name, config }: CreateProcedure,
user: User,
) -> anyhow::Result<CreateProcedureResponse> {
resource::create::<Procedure>(&name, config, &user).await
}
}
impl Resolve<CopyProcedure, User> for State {
#[instrument(name = "CopyProcedure", skip(self, user))]
async fn resolve(
&self,
CopyProcedure { name, id }: CopyProcedure,
user: User,
) -> anyhow::Result<CopyProcedureResponse> {
let Procedure { config, .. } =
resource::get_check_permissions::<Procedure>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Procedure>(&name, config.into(), &user).await
}
}
impl Resolve<UpdateProcedure, User> for State {
#[instrument(name = "UpdateProcedure", skip(self, user))]
async fn resolve(
&self,
UpdateProcedure { id, config }: UpdateProcedure,
user: User,
) -> anyhow::Result<UpdateProcedureResponse> {
resource::update::<Procedure>(&id, config, &user).await
}
}
impl Resolve<DeleteProcedure, User> for State {
#[instrument(name = "DeleteProcedure", skip(self, user))]
async fn resolve(
&self,
DeleteProcedure { id }: DeleteProcedure,
user: User,
) -> anyhow::Result<DeleteProcedureResponse> {
resource::delete::<Procedure>(&id, &user).await
}
}

View File

@@ -0,0 +1,58 @@
use monitor_client::{
api::write::*,
entities::{permission::PermissionLevel, repo::Repo, user::User},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateRepo, User> for State {
#[instrument(name = "CreateRepo", skip(self, user))]
async fn resolve(
&self,
CreateRepo { name, config }: CreateRepo,
user: User,
) -> anyhow::Result<Repo> {
resource::create::<Repo>(&name, config, &user).await
}
}
impl Resolve<CopyRepo, User> for State {
#[instrument(name = "CopyRepo", skip(self, user))]
async fn resolve(
&self,
CopyRepo { name, id }: CopyRepo,
user: User,
) -> anyhow::Result<Repo> {
let Repo { config, .. } =
resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<Repo>(&name, config.into(), &user).await
}
}
impl Resolve<DeleteRepo, User> for State {
#[instrument(name = "DeleteRepo", skip(self, user))]
async fn resolve(
&self,
DeleteRepo { id }: DeleteRepo,
user: User,
) -> anyhow::Result<Repo> {
resource::delete::<Repo>(&id, &user).await
}
}
impl Resolve<UpdateRepo, User> for State {
#[instrument(name = "UpdateRepo", skip(self, user))]
async fn resolve(
&self,
UpdateRepo { id, config }: UpdateRepo,
user: User,
) -> anyhow::Result<Repo> {
resource::update::<Repo>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,165 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
monitor_timestamp,
permission::PermissionLevel,
server::Server,
update::{Update, UpdateStatus},
user::User,
Operation,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client,
update::{add_update, make_update, update_update},
},
resource,
state::{db_client, State},
};
impl Resolve<CreateServer, User> for State {
#[instrument(name = "CreateServer", skip(self, user))]
async fn resolve(
&self,
CreateServer { name, config }: CreateServer,
user: User,
) -> anyhow::Result<Server> {
resource::create::<Server>(&name, config, &user).await
}
}
impl Resolve<DeleteServer, User> for State {
#[instrument(name = "DeleteServer", skip(self, user))]
async fn resolve(
&self,
DeleteServer { id }: DeleteServer,
user: User,
) -> anyhow::Result<Server> {
resource::delete::<Server>(&id, &user).await
}
}
impl Resolve<UpdateServer, User> for State {
#[instrument(name = "UpdateServer", skip(self, user))]
async fn resolve(
&self,
UpdateServer { id, config }: UpdateServer,
user: User,
) -> anyhow::Result<Server> {
resource::update::<Server>(&id, config, &user).await
}
}
impl Resolve<RenameServer, User> for State {
#[instrument(name = "RenameServer", skip(self, user))]
async fn resolve(
&self,
RenameServer { id, name }: RenameServer,
user: User,
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&server, Operation::RenameServer, &user);
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None)
.await
.context("failed to update server on db. this name may already be taken.")?;
update.push_simple_log(
"rename server",
format!("renamed server {id} from {} to {name}", server.name),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<CreateNetwork, User> for State {
#[instrument(name = "CreateNetwork", skip(self, user))]
async fn resolve(
&self,
CreateNetwork { server, name }: CreateNetwork,
user: User,
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Write,
)
.await?;
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::CreateNetwork, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::CreateNetwork { name, driver: None })
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<DeleteNetwork, User> for State {
#[instrument(name = "DeleteNetwork", skip(self, user))]
async fn resolve(
&self,
DeleteNetwork { server, name }: DeleteNetwork,
user: User,
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Write,
)
.await?;
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::DeleteNetwork, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::DeleteNetwork { name })
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"delete network",
format_serror(&e.context("failed to delete network").into()),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -0,0 +1,65 @@
use monitor_client::{
api::write::{
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
UpdateServerTemplate,
},
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateServerTemplate, User> for State {
#[instrument(name = "CreateServerTemplate", skip(self, user))]
async fn resolve(
&self,
CreateServerTemplate { name, config }: CreateServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
resource::create::<ServerTemplate>(&name, config, &user).await
}
}
impl Resolve<CopyServerTemplate, User> for State {
#[instrument(name = "CopyServerTemplate", skip(self, user))]
async fn resolve(
&self,
CopyServerTemplate { name, id }: CopyServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
let ServerTemplate { config, .. } =
resource::get_check_permissions::<ServerTemplate>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<ServerTemplate>(&name, config.into(), &user)
.await
}
}
impl Resolve<DeleteServerTemplate, User> for State {
#[instrument(name = "DeleteServerTemplate", skip(self, user))]
async fn resolve(
&self,
DeleteServerTemplate { id }: DeleteServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
resource::delete::<ServerTemplate>(&id, &user).await
}
}
impl Resolve<UpdateServerTemplate, User> for State {
#[instrument(name = "UpdateServerTemplate", skip(self, user))]
async fn resolve(
&self,
UpdateServerTemplate { id, config }: UpdateServerTemplate,
user: User,
) -> anyhow::Result<ServerTemplate> {
resource::update::<ServerTemplate>(&id, config, &user).await
}
}

View File

@@ -0,0 +1,176 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::{
user::CreateApiKey,
write::{
CreateApiKeyForServiceUser, CreateApiKeyForServiceUserResponse,
CreateServiceUser, CreateServiceUserResponse,
DeleteApiKeyForServiceUser, DeleteApiKeyForServiceUserResponse,
UpdateServiceUserDescription,
UpdateServiceUserDescriptionResponse,
},
},
entities::{
monitor_timestamp,
user::{User, UserConfig},
},
};
use mungos::{
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<CreateServiceUser, User> for State {
#[instrument(name = "CreateServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateServiceUser {
username,
description,
}: CreateServiceUser,
user: User,
) -> anyhow::Result<CreateServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
if ObjectId::from_str(&username).is_ok() {
return Err(anyhow!("username cannot be valid ObjectId"));
}
let config = UserConfig::Service { description };
let mut user = User {
id: Default::default(),
username,
config,
enabled: true,
admin: false,
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
updated_at: monitor_timestamp(),
};
user.id = db_client()
.await
.users
.insert_one(&user, None)
.await
.context("failed to create service user on db")?
.inserted_id
.as_object_id()
.context("inserted id is not object id")?
.to_string();
Ok(user)
}
}
impl Resolve<UpdateServiceUserDescription, User> for State {
#[instrument(
name = "UpdateServiceUserDescription",
skip(self, user)
)]
async fn resolve(
&self,
UpdateServiceUserDescription {
username,
description,
}: UpdateServiceUserDescription,
user: User,
) -> anyhow::Result<UpdateServiceUserDescriptionResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let service_user = db
.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed to query db for user")?
.context("no user with given username")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.users
.update_one(
doc! { "username": &username },
doc! { "$set": { "config.data.description": description } },
None,
)
.await
.context("failed to update user on db")?;
db.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed to query db for user")?
.context("user with username not found")
}
}
impl Resolve<CreateApiKeyForServiceUser, User> for State {
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
CreateApiKeyForServiceUser {
user_id,
name,
expires,
}: CreateApiKeyForServiceUser,
user: User,
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let service_user =
find_one_by_id(&db_client().await.users, &user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
self
.resolve(CreateApiKey { name, expires }, service_user)
.await
}
}
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
async fn resolve(
&self,
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
user: User,
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin"));
}
let db = db_client().await;
let api_key = db
.api_keys
.find_one(doc! { "key": &key }, None)
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().await.users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(anyhow!("user is not service user"));
};
db.api_keys
.delete_one(doc! { "key": key }, None)
.await
.context("failed to delete api key on db")?;
Ok(DeleteApiKeyForServiceUserResponse {})
}
}

View File

@@ -0,0 +1,325 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
api::write::*,
entities::{
self,
alert::{Alert, AlertData},
alerter::Alerter,
build::Build,
builder::Builder,
monitor_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{stats::SeverityLevel, Server},
server_template::ServerTemplate,
sync::{
PendingSyncUpdates, PendingSyncUpdatesData,
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
ResourceSync,
},
update::ResourceTarget,
user::User,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{
alert::send_alerts,
query::get_id_to_tags,
sync::{
deployment,
resource::{get_updates_for_view, AllResourcesById},
},
},
resource,
state::{db_client, State},
};
impl Resolve<CreateResourceSync, User> for State {
#[instrument(name = "CreateResourceSync", skip(self, user))]
async fn resolve(
&self,
CreateResourceSync { name, config }: CreateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::create::<ResourceSync>(&name, config, &user).await
}
}
impl Resolve<CopyResourceSync, User> for State {
#[instrument(name = "CopyResourceSync", skip(self, user))]
async fn resolve(
&self,
CopyResourceSync { name, id }: CopyResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
let ResourceSync { config, .. } =
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::create::<ResourceSync>(&name, config.into(), &user)
.await
}
}
impl Resolve<DeleteResourceSync, User> for State {
#[instrument(name = "DeleteResourceSync", skip(self, user))]
async fn resolve(
&self,
DeleteResourceSync { id }: DeleteResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::delete::<ResourceSync>(&id, &user).await
}
}
impl Resolve<UpdateResourceSync, User> for State {
#[instrument(name = "UpdateResourceSync", skip(self, user))]
async fn resolve(
&self,
UpdateResourceSync { id, config }: UpdateResourceSync,
user: User,
) -> anyhow::Result<ResourceSync> {
resource::update::<ResourceSync>(&id, config, &user).await
}
}
impl Resolve<RefreshResourceSyncPending, User> for State {
async fn resolve(
&self,
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
user: User,
) -> anyhow::Result<ResourceSync> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// sync should be able to do this.
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
.await?;
if sync.config.repo.is_empty() {
return Err(anyhow!("resource sync repo not configured"));
}
let res = async {
let (res, _, hash, message) =
crate::helpers::sync::remote::get_remote_resources(&sync)
.await
.context("failed to get remote resources")?;
let resources = res?;
let all_resources = AllResourcesById::load().await?;
let id_to_tags = get_id_to_tags(None).await?;
let data = PendingSyncUpdatesDataOk {
server_updates: get_updates_for_view::<Server>(
resources.servers,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server updates")?,
deployment_updates: deployment::get_updates_for_view(
resources.deployments,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get deployment updates")?,
build_updates: get_updates_for_view::<Build>(
resources.builds,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get build updates")?,
repo_updates: get_updates_for_view::<Repo>(
resources.repos,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get repo updates")?,
procedure_updates: get_updates_for_view::<Procedure>(
resources.procedures,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get procedure updates")?,
alerter_updates: get_updates_for_view::<Alerter>(
resources.alerters,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get alerter updates")?,
builder_updates: get_updates_for_view::<Builder>(
resources.builders,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get builder updates")?,
server_template_updates:
get_updates_for_view::<ServerTemplate>(
resources.server_templates,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get server template updates")?,
resource_sync_updates: get_updates_for_view::<
entities::sync::ResourceSync,
>(
resources.resource_syncs,
sync.config.delete,
&all_resources,
&id_to_tags,
)
.await
.context("failed to get resource sync updates")?,
variable_updates:
crate::helpers::sync::variables::get_updates_for_view(
resources.variables,
sync.config.delete,
)
.await
.context("failed to get variable updates")?,
user_group_updates:
crate::helpers::sync::user_groups::get_updates_for_view(
resources.user_groups,
sync.config.delete,
&all_resources,
)
.await
.context("failed to get user group updates")?,
};
anyhow::Ok((hash, message, data))
}
.await;
let (pending, has_updates) = match res {
Ok((hash, message, data)) => {
let has_updates = !data.no_updates();
(
PendingSyncUpdates {
hash: Some(hash),
message: Some(message),
data: PendingSyncUpdatesData::Ok(data),
},
has_updates,
)
}
Err(e) => (
PendingSyncUpdates {
hash: None,
message: None,
data: PendingSyncUpdatesData::Err(
PendingSyncUpdatesDataErr {
message: format_serror(&e.into()),
},
),
},
false,
),
};
let pending = to_document(&pending)
.context("failed to serialize pending to document")?;
update_one_by_id(
&db_client().await.resource_syncs,
&sync.id,
doc! { "$set": { "info.pending": pending } },
None,
)
.await?;
// check to update alert
let id = sync.id.clone();
let name = sync.name.clone();
tokio::task::spawn(async move {
let db = db_client().await;
let Some(existing) = db_client()
.await
.alerts
.find_one(
doc! {
"resolved": false,
"target.type": "ResourceSync",
"target.id": &id,
},
None,
)
.await
.context("failed to query db for alert")
.inspect_err(|e| warn!("{e:#}"))
.ok()
else {
return;
};
match (existing, has_updates) {
// OPEN A NEW ALERT
(None, true) => {
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
resolved: false,
level: SeverityLevel::Ok,
target: ResourceTarget::ResourceSync(id.clone()),
data: AlertData::ResourceSyncPendingUpdates { id, name },
resolved_ts: None,
};
db.alerts
.insert_one(&alert, None)
.await
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
update_one_by_id(
&db.alerts,
&existing.id,
doc! {
"$set": {
"resolved": true,
"resolved_ts": monitor_timestamp()
}
},
None,
)
.await
.context("failed to close existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
}
// NOTHING TO DO
_ => {}
}
});
crate::resource::get::<ResourceSync>(&sync.id).await
}
}

View File

@@ -0,0 +1,206 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
UpdateTagsOnResourceResponse,
},
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, sync::ResourceSync, tag::Tag,
update::ResourceTarget, user::User,
},
};
use mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::{
helpers::query::{get_tag, get_tag_check_owner},
resource,
state::{db_client, State},
};
impl Resolve<CreateTag, User> for State {
#[instrument(name = "CreateTag", skip(self, user))]
async fn resolve(
&self,
CreateTag { name }: CreateTag,
user: User,
) -> anyhow::Result<Tag> {
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("tag name cannot be ObjectId"));
}
let mut tag = Tag {
id: Default::default(),
name,
owner: user.id.clone(),
};
tag.id = db_client()
.await
.tags
.insert_one(&tag, None)
.await
.context("failed to create tag on db")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
Ok(tag)
}
}
impl Resolve<RenameTag, User> for State {
async fn resolve(
&self,
RenameTag { id, name }: RenameTag,
user: User,
) -> anyhow::Result<Tag> {
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("tag name cannot be ObjectId"));
}
get_tag_check_owner(&id, &user).await?;
update_one_by_id(
&db_client().await.tags,
&id,
doc! { "$set": { "name": name } },
None,
)
.await
.context("failed to rename tag on db")?;
get_tag(&id).await
}
}
impl Resolve<DeleteTag, User> for State {
#[instrument(name = "DeleteTag", skip(self, user))]
async fn resolve(
&self,
DeleteTag { id }: DeleteTag,
user: User,
) -> anyhow::Result<Tag> {
let tag = get_tag_check_owner(&id, &user).await?;
tokio::try_join!(
resource::remove_tag_from_all::<Server>(&id),
resource::remove_tag_from_all::<Deployment>(&id),
resource::remove_tag_from_all::<Build>(&id),
resource::remove_tag_from_all::<Repo>(&id),
resource::remove_tag_from_all::<Builder>(&id),
resource::remove_tag_from_all::<Alerter>(&id),
resource::remove_tag_from_all::<Procedure>(&id),
resource::remove_tag_from_all::<ServerTemplate>(&id),
)?;
delete_one_by_id(&db_client().await.tags, &id, None).await?;
Ok(tag)
}
}
impl Resolve<UpdateTagsOnResource, User> for State {
#[instrument(name = "UpdateTagsOnResource", skip(self, user))]
async fn resolve(
&self,
UpdateTagsOnResource { target, tags }: UpdateTagsOnResource,
user: User,
) -> anyhow::Result<UpdateTagsOnResourceResponse> {
match target {
ResourceTarget::System(_) => return Err(anyhow!("")),
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Build>(&id, tags, user).await?;
}
ResourceTarget::Builder(id) => {
resource::get_check_permissions::<Builder>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Builder>(&id, tags, user).await?
}
ResourceTarget::Deployment(id) => {
resource::get_check_permissions::<Deployment>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Deployment>(&id, tags, user).await?
}
ResourceTarget::Server(id) => {
resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Server>(&id, tags, user).await?
}
ResourceTarget::Repo(id) => {
resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Repo>(&id, tags, user).await?
}
ResourceTarget::Alerter(id) => {
resource::get_check_permissions::<Alerter>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Alerter>(&id, tags, user).await?
}
ResourceTarget::Procedure(id) => {
resource::get_check_permissions::<Procedure>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Procedure>(&id, tags, user).await?
}
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ServerTemplate>(&id, tags, user)
.await?
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ResourceSync>(&id, tags, user).await?
}
};
Ok(UpdateTagsOnResourceResponse {})
}
}

View File

@@ -0,0 +1,245 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
},
entities::{monitor_timestamp, user::User, user_group::UserGroup},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
find::find_collect,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::state::{db_client, State};
impl Resolve<CreateUserGroup, User> for State {
async fn resolve(
&self,
CreateUserGroup { name }: CreateUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let user_group = UserGroup {
id: Default::default(),
users: Default::default(),
updated_at: monitor_timestamp(),
name,
};
let db = db_client().await;
let id = db
.user_groups
.insert_one(user_group, None)
.await
.context("failed to create UserGroup on db")?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.to_string();
find_one_by_id(&db.user_groups, &id)
.await
.context("failed to query db for user groups")?
.context("user group at id not found")
}
}
impl Resolve<RenameUserGroup, User> for State {
async fn resolve(
&self,
RenameUserGroup { id, name }: RenameUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let db = db_client().await;
update_one_by_id(
&db.user_groups,
&id,
doc! { "$set": { "name": name } },
None,
)
.await
.context("failed to rename UserGroup on db")?;
find_one_by_id(&db.user_groups, &id)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}
impl Resolve<DeleteUserGroup, User> for State {
async fn resolve(
&self,
DeleteUserGroup { id }: DeleteUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let db = db_client().await;
let ug = find_one_by_id(&db.user_groups, &id)
.await
.context("failed to query db for UserGroups")?
.context("no UserGroup found with given id")?;
delete_one_by_id(&db.user_groups, &id, None)
.await
.context("failed to delete UserGroup from db")?;
db.permissions
.delete_many(doc! {
"user_target.type": "UserGroup",
"user_target.id": id,
}, None)
.await
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
Ok(ug)
}
}
impl Resolve<AddUserToUserGroup, User> for State {
async fn resolve(
&self,
AddUserToUserGroup { user_group, user }: AddUserToUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let db = db_client().await;
let filter = match ObjectId::from_str(&user) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": &user },
};
let user = db
.users
.find_one(filter, None)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
let filter = match ObjectId::from_str(&user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$addToSet": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}
impl Resolve<RemoveUserFromUserGroup, User> for State {
async fn resolve(
&self,
RemoveUserFromUserGroup {
user_group,
user,
}: RemoveUserFromUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let db = db_client().await;
let filter = match ObjectId::from_str(&user) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "username": &user },
};
let user = db
.users
.find_one(filter, None)
.await
.context("failed to query mongo for users")?
.context("no matching user found")?;
let filter = match ObjectId::from_str(&user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$pull": { "users": &user.id } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}
impl Resolve<SetUsersInUserGroup, User> for State {
async fn resolve(
&self,
SetUsersInUserGroup { user_group, users }: SetUsersInUserGroup,
admin: User,
) -> anyhow::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only"));
}
let db = db_client().await;
let all_users = find_collect(&db.users, None, None)
.await
.context("failed to query db for users")?
.into_iter()
.map(|u| (u.username, u.id))
.collect::<HashMap<_, _>>();
// Make sure all users are user ids
let users = users
.into_iter()
.filter_map(|user| match ObjectId::from_str(&user) {
Ok(_) => Some(user),
Err(_) => all_users.get(&user).cloned(),
})
.collect::<Vec<_>>();
let filter = match ObjectId::from_str(&user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &user_group },
};
db.user_groups
.update_one(
filter.clone(),
doc! { "$set": { "users": users } },
None,
)
.await
.context("failed to add user to group on db")?;
db.user_groups
.find_one(filter, None)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")
}
}

View File

@@ -0,0 +1,169 @@
use anyhow::{anyhow, Context};
use monitor_client::{
api::write::{
CreateVariable, CreateVariableResponse, DeleteVariable,
DeleteVariableResponse, UpdateVariableDescription,
UpdateVariableDescriptionResponse, UpdateVariableValue,
UpdateVariableValueResponse,
},
entities::{
update::ResourceTarget, user::User, variable::Variable, Operation,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::{
query::get_variable,
update::{add_update, make_update},
},
state::{db_client, State},
};
impl Resolve<CreateVariable, User> for State {
async fn resolve(
&self,
CreateVariable {
name,
value,
description,
}: CreateVariable,
user: User,
) -> anyhow::Result<CreateVariableResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
}
let variable = Variable {
name,
value,
description,
};
db_client()
.await
.variables
.insert_one(&variable, None)
.await
.context("failed to create variable on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateVariable,
&user,
);
update
.push_simple_log("create variable", format!("{variable:#?}"));
update.finalize();
add_update(update).await?;
get_variable(&variable.name).await
}
}
impl Resolve<UpdateVariableValue, User> for State {
async fn resolve(
&self,
UpdateVariableValue { name, value }: UpdateVariableValue,
user: User,
) -> anyhow::Result<UpdateVariableValueResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
}
let variable = get_variable(&name).await?;
if value == variable.value {
return Err(anyhow!("no change"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "value": &value } },
None,
)
.await
.context("failed to update variable value on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateVariableValue,
&user,
);
update.push_simple_log(
"update variable value",
format!(
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
variable.value
),
);
update.finalize();
add_update(update).await?;
get_variable(&name).await
}
}
impl Resolve<UpdateVariableDescription, User> for State {
async fn resolve(
&self,
UpdateVariableDescription { name, description }: UpdateVariableDescription,
user: User,
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "description": &description } },
None,
)
.await
.context("failed to update variable description on db")?;
get_variable(&name).await
}
}
impl Resolve<DeleteVariable, User> for State {
async fn resolve(
&self,
DeleteVariable { name }: DeleteVariable,
user: User,
) -> anyhow::Result<DeleteVariableResponse> {
if !user.admin {
return Err(anyhow!("only admins can create variables"));
}
let variable = get_variable(&name).await?;
db_client()
.await
.variables
.delete_one(doc! { "name": &name }, None)
.await
.context("failed to delete variable on db")?;
let mut update = make_update(
ResourceTarget::system(),
Operation::DeleteVariable,
&user,
);
update
.push_simple_log("delete variable", format!("{variable:#?}"));
update.finalize();
add_update(update).await?;
Ok(variable)
}
}

View File

@@ -0,0 +1,229 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use monitor_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
};
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
static GITHUB_OAUTH_CLIENT: OnceLock<Option<GithubOauthClient>> =
OnceLock::new();
GITHUB_OAUTH_CLIENT
.get_or_init(|| GithubOauthClient::new(core_config()))
}
pub struct GithubOauthClient {
http: reqwest::Client,
client_id: String,
client_secret: String,
redirect_uri: String,
scopes: String,
states: Mutex<Vec<String>>,
user_agent: String,
}
impl GithubOauthClient {
pub fn new(
CoreConfig {
github_oauth:
OauthCredentials {
enabled,
id,
secret,
},
host,
..
}: &CoreConfig,
) -> Option<GithubOauthClient> {
if !enabled {
return None;
}
if host.is_empty() {
warn!("github oauth is enabled, but 'config.host' is not configured");
return None;
}
if id.is_empty() {
warn!("github oauth is enabled, but 'config.github_oauth.id' is not configured");
return None;
}
if secret.is_empty() {
warn!("github oauth is enabled, but 'config.github_oauth.secret' is not configured");
return None;
}
GithubOauthClient {
http: reqwest::Client::new(),
client_id: id.clone(),
client_secret: secret.clone(),
redirect_uri: format!("{host}/auth/github/callback"),
user_agent: Default::default(),
scopes: Default::default(),
states: Default::default(),
}
.into()
}
#[instrument(level = "debug", skip(self))]
pub async fn get_login_redirect_url(
&self,
redirect: Option<String>,
) -> String {
let state_prefix = random_string(STATE_PREFIX_LENGTH);
let state = match redirect {
Some(redirect) => format!("{state_prefix}{redirect}"),
None => state_prefix,
};
let redirect_url = format!(
"https://github.com/login/oauth/authorize?state={state}&client_id={}&redirect_uri={}&scope={}",
self.client_id, self.redirect_uri, self.scopes
);
let mut states = self.states.lock().await;
states.push(state);
redirect_url
}
#[instrument(level = "debug", skip(self))]
pub async fn check_state(&self, state: &str) -> bool {
let mut contained = false;
self.states.lock().await.retain(|s| {
if s.as_str() == state {
contained = true;
false
} else {
true
}
});
contained
}
#[instrument(level = "debug", skip(self))]
pub async fn get_access_token(
&self,
code: &str,
) -> anyhow::Result<AccessTokenResponse> {
self
.post::<(), _>(
"https://github.com/login/oauth/access_token",
&[
("client_id", self.client_id.as_str()),
("client_secret", self.client_secret.as_str()),
("redirect_uri", self.redirect_uri.as_str()),
("code", code),
],
None,
None,
)
.await
.context("failed to get github access token using code")
}
#[instrument(level = "debug", skip(self))]
pub async fn get_github_user(
&self,
token: &str,
) -> anyhow::Result<GithubUserResponse> {
self
.get("https://api.github.com/user", &[], Some(token))
.await
.context("failed to get github user using access token")
}
#[instrument(level = "debug", skip(self))]
async fn get<R: DeserializeOwned>(
&self,
endpoint: &str,
query: &[(&str, &str)],
bearer_token: Option<&str>,
) -> anyhow::Result<R> {
let mut req = self
.http
.get(endpoint)
.query(query)
.header("User-Agent", &self.user_agent);
if let Some(bearer_token) = bearer_token {
req =
req.header("Authorization", format!("Bearer {bearer_token}"));
}
let res = req.send().await.context("failed to reach github")?;
let status = res.status();
if status == StatusCode::OK {
let body = res
.json()
.await
.context("failed to parse body into expected type")?;
Ok(body)
} else {
let text = res.text().await.context(format!(
"status: {status} | failed to get response text"
))?;
Err(anyhow!("status: {status} | text: {text}"))
}
}
async fn post<B: Serialize, R: DeserializeOwned>(
&self,
endpoint: &str,
query: &[(&str, &str)],
body: Option<&B>,
bearer_token: Option<&str>,
) -> anyhow::Result<R> {
let mut req = self
.http
.post(endpoint)
.query(query)
.header("Accept", "application/json")
.header("User-Agent", &self.user_agent);
if let Some(body) = body {
req = req.json(body);
}
if let Some(bearer_token) = bearer_token {
req =
req.header("Authorization", format!("Bearer {bearer_token}"));
}
let res = req.send().await.context("failed to reach github")?;
let status = res.status();
if status == StatusCode::OK {
let body = res
.json()
.await
.context("failed to parse POST body into expected type")?;
Ok(body)
} else {
let text = res.text().await.with_context(|| format!(
"method: POST | status: {status} | failed to get response text"
))?;
Err(anyhow!("method: POST | status: {status} | text: {text}"))
}
}
}
#[derive(Deserialize)]
pub struct AccessTokenResponse {
pub access_token: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize)]
pub struct GithubUserResponse {
pub login: String,
pub id: u128,
pub avatar_url: String,
// pub email: Option<String>,
}

View File

@@ -0,0 +1,122 @@
use anyhow::{anyhow, Context};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use monitor_client::entities::{
monitor_timestamp,
user::{User, UserConfig},
};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use crate::{
config::core_config,
state::{db_client, jwt_client},
};
use self::client::github_oauth_client;
use super::{RedirectQuery, STATE_PREFIX_LENGTH};
pub mod client;
pub fn router() -> Router {
Router::new()
.route(
"/login",
get(|Query(query): Query<RedirectQuery>| async {
Redirect::to(
&github_oauth_client()
.as_ref()
// OK: the router is only mounted in case that the client is populated
.unwrap()
.get_login_redirect_url(query.redirect)
.await,
)
}),
)
.route(
"/callback",
get(|query| async {
callback(query).await.status_code(StatusCode::UNAUTHORIZED)
}),
)
}
#[derive(Debug, Deserialize)]
struct CallbackQuery {
state: String,
code: String,
}
#[instrument(name = "GithubCallback", level = "debug")]
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
let client = github_oauth_client().as_ref().unwrap();
if !client.check_state(&query.state).await {
return Err(anyhow!("state mismatch"));
}
let token = client.get_access_token(&query.code).await?;
let github_user =
client.get_github_user(&token.access_token).await?;
let github_id = github_user.id.to_string();
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.github_id": &github_id }, None)
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client()
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
let user = User {
id: Default::default(),
username: github_user.login,
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
config: UserConfig::Github {
github_id,
avatar: github_user.avatar_url,
},
};
let user_id = db_client
.users
.insert_one(user, None)
.await
.context("failed to create user on mongo")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.generate(user_id)
.context("failed to generate jwt")?
}
};
let exchange_token = jwt_client().create_exchange_token(jwt).await;
let redirect = &query.state[STATE_PREFIX_LENGTH..];
let redirect_url = if redirect.is_empty() {
format!("{}?token={exchange_token}", core_config().host)
} else {
let splitter = if redirect.contains('?') { '&' } else { '?' };
format!("{}{splitter}token={exchange_token}", redirect)
};
Ok(Redirect::to(&redirect_url))
}

View File

@@ -0,0 +1,200 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use jwt::Token;
use monitor_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize};
use serde_json::Value;
use tokio::sync::Mutex;
use crate::{
auth::{random_string, STATE_PREFIX_LENGTH},
config::core_config,
};
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
static GOOGLE_OAUTH_CLIENT: OnceLock<Option<GoogleOauthClient>> =
OnceLock::new();
GOOGLE_OAUTH_CLIENT
.get_or_init(|| GoogleOauthClient::new(core_config()))
}
pub struct GoogleOauthClient {
http: reqwest::Client,
client_id: String,
client_secret: String,
redirect_uri: String,
scopes: String,
states: Mutex<Vec<String>>,
user_agent: String,
}
impl GoogleOauthClient {
pub fn new(
CoreConfig {
google_oauth:
OauthCredentials {
enabled,
id,
secret,
},
host,
..
}: &CoreConfig,
) -> Option<GoogleOauthClient> {
if !enabled {
return None;
}
if host.is_empty() {
warn!("google oauth is enabled, but 'config.host' is not configured");
return None;
}
if id.is_empty() {
warn!("google oauth is enabled, but 'config.google_oauth.id' is not configured");
return None;
}
if secret.is_empty() {
warn!("google oauth is enabled, but 'config.google_oauth.secret' is not configured");
return None;
}
let scopes = urlencoding::encode(
&[
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/userinfo.email",
]
.join(" "),
)
.to_string();
GoogleOauthClient {
http: Default::default(),
client_id: id.clone(),
client_secret: secret.clone(),
redirect_uri: format!("{host}/auth/google/callback"),
user_agent: String::from("monitor"),
states: Default::default(),
scopes,
}
.into()
}
#[instrument(level = "debug", skip(self))]
pub async fn get_login_redirect_url(
&self,
redirect: Option<String>,
) -> String {
let state_prefix = random_string(STATE_PREFIX_LENGTH);
let state = match redirect {
Some(redirect) => format!("{state_prefix}{redirect}"),
None => state_prefix,
};
let redirect_url = format!(
"https://accounts.google.com/o/oauth2/v2/auth?response_type=code&state={state}&client_id={}&redirect_uri={}&scope={}",
self.client_id, self.redirect_uri, self.scopes
);
let mut states = self.states.lock().await;
states.push(state);
redirect_url
}
#[instrument(level = "debug", skip(self))]
pub async fn check_state(&self, state: &str) -> bool {
let mut contained = false;
self.states.lock().await.retain(|s| {
if s.as_str() == state {
contained = true;
false
} else {
true
}
});
contained
}
#[instrument(level = "debug", skip(self))]
pub async fn get_access_token(
&self,
code: &str,
) -> anyhow::Result<AccessTokenResponse> {
self
.post::<_>(
"https://oauth2.googleapis.com/token",
&[
("client_id", self.client_id.as_str()),
("client_secret", self.client_secret.as_str()),
("redirect_uri", self.redirect_uri.as_str()),
("code", code),
("grant_type", "authorization_code"),
],
None,
)
.await
.context("failed to get google access token using code")
}
#[instrument(level = "debug", skip(self))]
pub fn get_google_user(
&self,
id_token: &str,
) -> anyhow::Result<GoogleUser> {
let t: Token<Value, GoogleUser, jwt::Unverified> =
Token::parse_unverified(id_token)
.context("failed to parse id_token")?;
Ok(t.claims().to_owned())
}
#[instrument(level = "debug", skip(self))]
async fn post<R: DeserializeOwned>(
&self,
endpoint: &str,
body: &[(&str, &str)],
bearer_token: Option<&str>,
) -> anyhow::Result<R> {
let mut req = self
.http
.post(endpoint)
.form(body)
.header("Accept", "application/json")
.header("User-Agent", &self.user_agent);
if let Some(bearer_token) = bearer_token {
req =
req.header("Authorization", format!("Bearer {bearer_token}"));
}
let res = req.send().await.context("failed to reach google")?;
let status = res.status();
if status == StatusCode::OK {
let body = res
.json()
.await
.context("failed to parse POST body into expected type")?;
Ok(body)
} else {
let text = res.text().await.context(format!(
"method: POST | status: {status} | failed to get response text"
))?;
Err(anyhow!("method: POST | status: {status} | text: {text}"))
}
}
}
#[derive(Deserialize)]
pub struct AccessTokenResponse {
// pub access_token: String,
pub id_token: String,
// pub scope: String,
// pub token_type: String,
}
#[derive(Deserialize, Clone)]
pub struct GoogleUser {
#[serde(rename = "sub")]
pub id: String,
pub email: String,
pub picture: String,
}

View File

@@ -0,0 +1,137 @@
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use monitor_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use crate::{
config::core_config,
state::{db_client, jwt_client},
};
use self::client::google_oauth_client;
use super::{RedirectQuery, STATE_PREFIX_LENGTH};
pub mod client;
pub fn router() -> Router {
Router::new()
.route(
"/login",
get(|Query(query): Query<RedirectQuery>| async move {
Redirect::to(
&google_oauth_client()
.as_ref()
// OK: its not mounted unless the client is populated
.unwrap()
.get_login_redirect_url(query.redirect)
.await,
)
}),
)
.route(
"/callback",
get(|query| async {
callback(query).await.status_code(StatusCode::UNAUTHORIZED)
}),
)
}
#[derive(Debug, Deserialize)]
struct CallbackQuery {
state: Option<String>,
code: Option<String>,
error: Option<String>,
}
#[instrument(name = "GoogleCallback", level = "debug")]
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
// Safe: the method is only called after the client is_some
let client = google_oauth_client().as_ref().unwrap();
if let Some(error) = query.error {
return Err(anyhow!("auth error from google: {error}"));
}
let state = query
.state
.context("callback query does not contain state")?;
if !client.check_state(&state).await {
return Err(anyhow!("state mismatch"));
}
let token = client
.get_access_token(
&query.code.context("callback query does not contain code")?,
)
.await?;
let google_user = client.get_google_user(&token.id_token)?;
let google_id = google_user.id.to_string();
let db_client = db_client().await;
let user = db_client
.users
.find_one(doc! { "config.data.google_id": &google_id }, None)
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client()
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = unix_timestamp_ms() as i64;
let no_users_exist =
db_client.users.find_one(None, None).await?.is_none();
let user = User {
id: Default::default(),
username: google_user
.email
.split('@')
.collect::<Vec<&str>>()
.first()
.unwrap()
.to_string(),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
config: UserConfig::Google {
google_id,
avatar: google_user.picture,
},
};
let user_id = db_client
.users
.insert_one(user, None)
.await
.context("failed to create user on mongo")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.generate(user_id)
.context("failed to generate jwt")?
}
};
let exchange_token = jwt_client().create_exchange_token(jwt).await;
let redirect = &state[STATE_PREFIX_LENGTH..];
let redirect_url = if redirect.is_empty() {
format!("{}?token={exchange_token}", core_config().host)
} else {
let splitter = if redirect.contains('?') { '&' } else { '?' };
format!("{}{splitter}token={exchange_token}", redirect)
};
Ok(Redirect::to(&redirect_url))
}

89
bin/core/src/auth/jwt.rs Normal file
View File

@@ -0,0 +1,89 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, Timelength,
};
use hmac::{Hmac, Mac};
use jwt::SignWithKey;
use monitor_client::entities::config::core::CoreConfig;
use mungos::mongodb::bson::doc;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
use super::random_string;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
#[derive(Serialize, Deserialize)]
pub struct JwtClaims {
pub id: String,
pub iat: u128,
pub exp: u128,
}
pub struct JwtClient {
pub key: Hmac<Sha256>,
valid_for_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
impl JwtClient {
pub fn new(config: &CoreConfig) -> JwtClient {
let key = Hmac::new_from_slice(random_string(40).as_bytes())
.expect("failed at taking HmacSha256 of jwt secret");
JwtClient {
key,
valid_for_ms: get_timelength_in_ms(
config.jwt_valid_for.to_string().parse().unwrap(),
),
exchange_tokens: Default::default(),
}
}
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.valid_for_ms;
let claims = JwtClaims {
id: user_id,
iat,
exp,
};
let jwt = claims
.sign_with_key(&self.key)
.context("failed at signing claim")?;
Ok(jwt)
}
#[instrument(level = "debug", skip_all)]
pub async fn create_exchange_token(&self, jwt: String) -> String {
let exchange_token = random_string(40);
self.exchange_tokens.lock().await.insert(
exchange_token.clone(),
(
jwt,
unix_timestamp_ms()
+ get_timelength_in_ms(Timelength::OneMinute),
),
);
exchange_token
}
#[instrument(level = "debug", skip(self))]
pub async fn redeem_exchange_token(
&self,
exchange_token: &str,
) -> anyhow::Result<String> {
let (jwt, valid_until) = self
.exchange_tokens
.lock()
.await
.remove(exchange_token)
.context("invalid exchange token: unrecognized")?;
if unix_timestamp_ms() < valid_until {
Ok(jwt)
} else {
Err(anyhow!("invalid exchange token: expired"))
}
}
}

134
bin/core/src/auth/local.rs Normal file
View File

@@ -0,0 +1,134 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use monitor_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
LoginLocalUserResponse,
},
entities::user::{User, UserConfig},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
config::core_config,
state::State,
state::{db_client, jwt_client},
};
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateLocalUser, HeaderMap> for State {
#[instrument(name = "CreateLocalUser", skip(self))]
async fn resolve(
&self,
CreateLocalUser { username, password }: CreateLocalUser,
_: HeaderMap,
) -> anyhow::Result<CreateLocalUserResponse> {
if !core_config().local_auth {
return Err(anyhow!("local auth is not enabled"));
}
if username.is_empty() {
return Err(anyhow!("username cannot be empty string"));
}
if ObjectId::from_str(&username).is_ok() {
return Err(anyhow!("username cannot be valid ObjectId"));
}
let password = bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")?;
let no_users_exist = db_client()
.await
.users
.find_one(None, None)
.await?
.is_none();
let ts = unix_timestamp_ms() as i64;
let user = User {
id: Default::default(),
username,
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
updated_at: ts,
last_update_view: 0,
recent_servers: Vec::new(),
recent_deployments: Vec::new(),
recent_builds: Vec::new(),
recent_repos: Vec::new(),
recent_procedures: Vec::new(),
config: UserConfig::Local { password },
};
let user_id = db_client()
.await
.users
.insert_one(user, None)
.await
.context("failed to create user")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
let jwt = jwt_client()
.generate(user_id)
.context("failed to generate jwt for user")?;
Ok(CreateLocalUserResponse { jwt })
}
}
impl Resolve<LoginLocalUser, HeaderMap> for State {
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
async fn resolve(
&self,
LoginLocalUser { username, password }: LoginLocalUser,
_: HeaderMap,
) -> anyhow::Result<LoginLocalUserResponse> {
if !core_config().local_auth {
return Err(anyhow!("local auth is not enabled"));
}
let user = db_client()
.await
.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed at db query for users")?
.with_context(|| {
format!("did not find user with username {username}")
})?;
let UserConfig::Local {
password: user_pw_hash,
} = user.config
else {
return Err(anyhow!(
"non-local auth users can not log in with a password"
));
};
let verified = bcrypt::verify(password, &user_pw_hash)
.context("failed at verify password")?;
if !verified {
return Err(anyhow!("invalid credentials"));
}
let jwt = jwt_client()
.generate(user.id)
.context("failed at generating jwt for user")?;
Ok(LoginLocalUserResponse { jwt })
}
}

165
bin/core/src/auth/mod.rs Normal file
View File

@@ -0,0 +1,165 @@
use ::jwt::VerifyWithKey;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Request, http::HeaderMap, middleware::Next,
response::Response,
};
use monitor_client::entities::{monitor_timestamp, user::User};
use mungos::mongodb::bson::doc;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use crate::{
helpers::query::get_user,
state::{db_client, jwt_client},
};
use self::jwt::JwtClaims;
pub mod github;
pub mod google;
pub mod jwt;
mod local;
const STATE_PREFIX_LENGTH: usize = 20;
#[derive(Deserialize)]
pub struct RedirectQuery {
pub redirect: Option<String>,
}
#[instrument(level = "debug")]
pub async fn auth_request(
headers: HeaderMap,
mut req: Request,
next: Next,
) -> serror::Result<Response> {
let user = authenticate_check_enabled(&headers)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
req.extensions_mut().insert(user);
Ok(next.run(req).await)
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
#[instrument(level = "debug")]
pub async fn get_user_id_from_headers(
headers: &HeaderMap,
) -> anyhow::Result<String> {
match (
headers.get("authorization"),
headers.get("x-api-key"),
headers.get("x-api-secret"),
) {
(Some(jwt), _, _) => {
// USE JWT
let jwt = jwt.to_str().context("jwt is not str")?;
auth_jwt_get_user_id(jwt)
.await
.context("failed to authenticate jwt")
}
(None, Some(key), Some(secret)) => {
// USE API KEY / SECRET
let key = key.to_str().context("key is not str")?;
let secret = secret.to_str().context("secret is not str")?;
auth_api_key_get_user_id(key, secret)
.await
.context("failed to authenticate api key")
}
_ => {
// AUTH FAIL
Err(anyhow!("must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"))
}
}
}
#[instrument(level = "debug")]
pub async fn authenticate_check_enabled(
headers: &HeaderMap,
) -> anyhow::Result<User> {
let user_id = get_user_id_from_headers(headers).await?;
let user = get_user(&user_id).await?;
if user.enabled {
Ok(user)
} else {
Err(anyhow!("user not enabled"))
}
}
#[instrument(level = "debug")]
pub async fn auth_jwt_get_user_id(
jwt: &str,
) -> anyhow::Result<String> {
let claims: JwtClaims = jwt
.verify_with_key(&jwt_client().key)
.context("failed to verify claims")?;
if claims.exp > unix_timestamp_ms() {
Ok(claims.id)
} else {
Err(anyhow!("token has expired"))
}
}
#[instrument(level = "debug")]
pub async fn auth_jwt_check_enabled(
jwt: &str,
) -> anyhow::Result<User> {
let user_id = auth_jwt_get_user_id(jwt).await?;
check_enabled(user_id).await
}
#[instrument(level = "debug")]
pub async fn auth_api_key_get_user_id(
key: &str,
secret: &str,
) -> anyhow::Result<String> {
let key = db_client()
.await
.api_keys
.find_one(doc! { "key": key }, None)
.await
.context("failed to query db")?
.context("no api key matching key")?;
if key.expires != 0 && key.expires < monitor_timestamp() {
return Err(anyhow!("api key expired"));
}
if bcrypt::verify(secret, &key.secret)
.context("failed to verify secret hash")?
{
// secret matches
Ok(key.user_id)
} else {
// secret mismatch
Err(anyhow!("invalid api secret"))
}
}
#[instrument(level = "debug")]
pub async fn auth_api_key_check_enabled(
key: &str,
secret: &str,
) -> anyhow::Result<User> {
let user_id = auth_api_key_get_user_id(key, secret).await?;
check_enabled(user_id).await
}
#[instrument(level = "debug")]
async fn check_enabled(user_id: String) -> anyhow::Result<User> {
let user = get_user(&user_id).await?;
if user.enabled {
Ok(user)
} else {
Err(anyhow!("user not enabled"))
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,82 @@
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ecr::Client as EcrClient;
use run_command::async_run_command;
#[tracing::instrument(skip(access_key_id, secret_access_key))]
async fn make_ecr_client(
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> EcrClient {
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.load()
.await;
EcrClient::new(&config)
}
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn maybe_create_repo(
repo: &str,
region: String,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<()> {
let client =
make_ecr_client(region, access_key_id, secret_access_key).await;
let existing = client
.describe_repositories()
.send()
.await
.context("failed to describe existing repositories")?
.repositories
.unwrap_or_default();
if existing.iter().any(|r| {
if let Some(name) = r.repository_name() {
name == repo
} else {
false
}
}) {
return Ok(());
};
client
.create_repository()
.repository_name(repo)
.send()
.await
.context("failed to create repository")?;
Ok(())
}
/// Gets a token docker login.
///
/// Requires the aws cli be installed on the host
#[tracing::instrument(skip(access_key_id, secret_access_key))]
pub async fn get_ecr_token(
region: &str,
access_key_id: &str,
secret_access_key: &str,
) -> anyhow::Result<String> {
let log = async_run_command(&format!(
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
))
.await;
if log.success() {
Ok(log.stdout)
} else {
Err(
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
.context("failed to get aws ecr login token"),
)
}
}

View File

@@ -0,0 +1,2 @@
pub mod ec2;
pub mod ecr;

View File

@@ -0,0 +1,157 @@
use anyhow::{anyhow, Context};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{de::DeserializeOwned, Serialize};
use super::{
common::{
HetznerActionResponse, HetznerDatacenterResponse,
HetznerServerResponse, HetznerVolumeResponse,
},
create_server::{CreateServerBody, CreateServerResponse},
create_volume::{CreateVolumeBody, CreateVolumeResponse},
};
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
pub struct HetznerClient(reqwest::Client);
impl HetznerClient {
pub fn new(token: &str) -> HetznerClient {
HetznerClient(
reqwest::ClientBuilder::new()
.default_headers(
[(
HeaderName::from_static("authorization"),
HeaderValue::from_str(&format!("Bearer {token}"))
.unwrap(),
)]
.into_iter()
.collect(),
)
.build()
.context("failed to build Hetzner request client")
.unwrap(),
)
}
pub async fn get_server(
&self,
id: i64,
) -> anyhow::Result<HetznerServerResponse> {
self.get(&format!("/servers/{id}")).await
}
pub async fn create_server(
&self,
body: &CreateServerBody,
) -> anyhow::Result<CreateServerResponse> {
self.post("/servers", body).await
}
#[allow(unused)]
pub async fn delete_server(
&self,
id: i64,
) -> anyhow::Result<HetznerActionResponse> {
self.delete(&format!("/servers/{id}")).await
}
pub async fn get_volume(
&self,
id: i64,
) -> anyhow::Result<HetznerVolumeResponse> {
self.get(&format!("/volumes/{id}")).await
}
pub async fn create_volume(
&self,
body: &CreateVolumeBody,
) -> anyhow::Result<CreateVolumeResponse> {
self.post("/volumes", body).await
}
#[allow(unused)]
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
let res = self
.0
.delete(format!("{BASE_URL}/volumes/{id}"))
.send()
.await
.context("failed at request to delete volume")?;
let status = res.status();
if status == StatusCode::NO_CONTENT {
Ok(())
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
Err(anyhow!("{status} | {text}"))
}
}
#[allow(unused)]
pub async fn list_datacenters(
&self,
) -> anyhow::Result<HetznerDatacenterResponse> {
self.get("/datacenters").await
}
async fn get<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.get(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at GET request to Hetzner | path: {path}")
})
}
async fn post<Body: Serialize, Res: DeserializeOwned>(
&self,
path: &str,
body: &Body,
) -> anyhow::Result<Res> {
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
handle_req(req).await.with_context(|| {
format!("failed at POST request to Hetzner | path: {path}")
})
}
#[allow(unused)]
async fn delete<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.delete(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at DELETE request to Hetzner | path: {path}")
})
}
}
async fn handle_req<Res: DeserializeOwned>(
req: RequestBuilder,
) -> anyhow::Result<Res> {
let res = req.send().await?;
let status = res.status();
if status.is_success() {
res.json().await.context("failed to parse response to json")
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
if let Ok(json_error) =
serde_json::from_str::<serde_json::Value>(&text)
{
return Err(anyhow!("{status} | {json_error:?}"));
}
Err(anyhow!("{status} | {text}"))
}
}

View File

@@ -0,0 +1,277 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerResponse {
pub server: HetznerServer,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServer {
pub id: i64,
pub name: String,
pub primary_disk_size: f64,
pub image: Option<HetznerImage>,
pub private_net: Vec<HetznerPrivateNet>,
pub public_net: HetznerPublicNet,
pub server_type: HetznerServerTypeDetails,
pub status: HetznerServerStatus,
#[serde(default)]
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerTypeDetails {
pub architecture: String,
pub cores: i64,
pub cpu_type: String,
pub description: String,
pub disk: f64,
pub id: i64,
pub memory: f64,
pub name: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPrivateNet {
pub alias_ips: Vec<String>,
pub ip: String,
pub mac_address: String,
pub network: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPublicNet {
#[serde(default)]
pub firewalls: Vec<HetznerFirewall>,
pub floating_ips: Vec<i64>,
pub ipv4: Option<HetznerIpv4>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerFirewall {
pub id: i64,
pub status: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerIpv4 {
pub id: Option<i64>,
pub blocked: bool,
pub dns_ptr: String,
pub ip: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerImage {
pub id: i64,
pub description: String,
pub name: Option<String>,
pub os_flavor: String,
pub os_version: Option<String>,
pub rapid_deploy: Option<bool>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerActionResponse {
pub action: HetznerAction,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerAction {
pub command: String,
pub error: Option<HetznerError>,
pub finished: Option<String>,
pub id: i64,
pub progress: i32,
pub resources: Vec<HetznerResource>,
pub started: String,
pub status: HetznerActionStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerError {
pub code: String,
pub message: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerResource {
pub id: i64,
#[serde(rename = "type")]
pub ty: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolumeResponse {
pub volume: HetznerVolume,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolume {
/// Name of the Resource. Must be unique per Project.
pub name: String,
/// Point in time when the Resource was created (in ISO-8601 format).
pub created: String,
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
pub format: Option<HetznerVolumeFormat>,
/// ID of the Volume.
pub id: i64,
/// User-defined labels ( key/value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Device path on the file system for the Volume
pub linux_device: String,
/// Protection configuration for the Resource.
pub protection: HetznerProtection,
/// ID of the Server the Volume is attached to, null if it is not attached at all
pub server: Option<i64>,
/// Size in GB of the Volume
pub size: i64,
/// Current status of the Volume. Allowed: `creating`, `available`
pub status: HetznerVolumeStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerProtection {
/// Prevent the Resource from being deleted.
pub delete: bool,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterResponse {
pub datacenters: Vec<HetznerDatacenterDetails>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterDetails {
pub id: i64,
pub name: String,
pub location: serde_json::Map<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HetznerLocation {
#[serde(rename = "nbg1")]
Nuremberg1,
#[serde(rename = "hel1")]
Helsinki1,
#[serde(rename = "fsn1")]
Falkenstein1,
#[serde(rename = "ash")]
Ashburn,
#[serde(rename = "hil")]
Hillsboro,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum HetznerDatacenter {
#[serde(rename = "nbg1-dc3")]
Nuremberg1Dc3,
#[serde(rename = "hel1-dc2")]
Helsinki1Dc2,
#[serde(rename = "fsn1-dc14")]
Falkenstein1Dc14,
#[serde(rename = "ash-dc1")]
AshburnDc1,
#[serde(rename = "hil-dc1")]
HillsboroDc1,
}
impl From<HetznerDatacenter> for HetznerLocation {
fn from(value: HetznerDatacenter) -> Self {
match value {
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
HetznerDatacenter::Falkenstein1Dc14 => {
HetznerLocation::Falkenstein1
}
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeFormat {
Xfs,
Ext4,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeStatus {
Creating,
Available,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerServerStatus {
Running,
Initializing,
Starting,
Stopping,
Off,
Deleting,
Migrating,
Rebuilding,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerActionStatus {
Running,
Success,
Error,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cx11")]
SharedIntel1Core2Ram20Disk,
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx21")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx31")]
SharedIntel2Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx41")]
SharedIntel4Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx51")]
SharedIntel8Core32Ram240Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated
#[serde(rename = "ccx13")]
DedicatedAmd2Core8Ram80Disk,
#[serde(rename = "ccx23")]
DedicatedAmd4Core16Ram160Disk,
#[serde(rename = "ccx33")]
DedicatedAmd8Core32Ram240Disk,
#[serde(rename = "ccx43")]
DedicatedAmd16Core64Ram360Disk,
#[serde(rename = "ccx53")]
DedicatedAmd32Core128Ram600Disk,
#[serde(rename = "ccx63")]
DedicatedAmd48Core192Ram960Disk,
}

View File

@@ -0,0 +1,76 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
HetznerServerType,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateServerBody {
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
pub name: String,
/// Auto-mount Volumes after attach
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// ID or name of Datacenter to create Server in (must not be used together with location)
#[serde(skip_serializing_if = "Option::is_none")]
pub datacenter: Option<HetznerDatacenter>,
/// ID or name of Location to create Server in (must not be used together with datacenter)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Firewalls which should be applied on the Server's public network interface at creation time
pub firewalls: Vec<Firewall>,
/// ID or name of the Image the Server is created from
pub image: String,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Network IDs which should be attached to the Server private network interface at the creation time
pub networks: Vec<i64>,
/// ID of the Placement Group the server should be in
#[serde(skip_serializing_if = "Option::is_none")]
pub placement_group: Option<i64>,
/// Public Network options
#[serde(skip_serializing_if = "Option::is_none")]
pub public_net: Option<PublicNet>,
/// ID or name of the Server type this Server should be created with
pub server_type: HetznerServerType,
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
pub ssh_keys: Vec<String>,
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
pub start_after_create: bool,
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
#[serde(skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct Firewall {
/// ID of the Firewall
pub firewall: i64,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct PublicNet {
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
pub enable_ipv4: bool,
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
pub enable_ipv6: bool,
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv4: Option<i64>,
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv6: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateServerResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub root_password: Option<String>,
pub server: HetznerServer,
}

View File

@@ -0,0 +1,36 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateVolumeBody {
/// Name of the volume
pub name: String,
/// Auto-mount Volume after attach. server must be provided.
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// Format Volume after creation. One of: xfs, ext4
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<HetznerVolumeFormat>,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Location to create the Volume in (can be omitted if Server is specified)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
#[serde(skip_serializing_if = "Option::is_none")]
pub server: Option<i64>,
/// Size of the Volume in GB
pub size: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateVolumeResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub volume: HetznerVolume,
}

View File

@@ -0,0 +1,282 @@
use std::{
sync::{Arc, Mutex, OnceLock},
time::Duration,
};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use monitor_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
HetznerVolumeFormat,
};
use crate::{
cloud::hetzner::{
common::HetznerServerStatus, create_server::CreateServerBody,
create_volume::CreateVolumeBody,
},
config::core_config,
};
use self::{client::HetznerClient, common::HetznerVolumeStatus};
mod client;
mod common;
mod create_server;
mod create_volume;
fn hetzner() -> Option<&'static HetznerClient> {
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
OnceLock::new();
HETZNER_CLIENT
.get_or_init(|| {
let token = &core_config().hetzner.token;
(!token.is_empty()).then(|| HetznerClient::new(token))
})
.as_ref()
}
pub struct HetznerServerMinimal {
pub id: i64,
pub ip: String,
}
const POLL_RATE_SECS: u64 = 3;
const MAX_POLL_TRIES: usize = 100;
#[instrument]
pub async fn launch_hetzner_server(
name: &str,
config: HetznerServerTemplateConfig,
) -> anyhow::Result<HetznerServerMinimal> {
let hetzner =
*hetzner().as_ref().context("Hetzner token not configured")?;
let HetznerServerTemplateConfig {
image,
datacenter,
private_network_ids,
placement_group,
enable_public_ipv4,
enable_public_ipv6,
firewall_ids,
server_type,
ssh_keys,
user_data,
use_public_ip,
labels,
volumes,
port: _,
} = config;
let datacenter = hetzner_datacenter(datacenter);
// Create volumes and get their ids
let mut volume_ids = Vec::new();
for volume in volumes {
let body = CreateVolumeBody {
name: volume.name,
format: Some(hetzner_format(volume.format)),
location: Some(datacenter.into()),
labels: volume.labels,
size: volume.size_gb,
automount: None,
server: None,
};
let id = hetzner
.create_volume(&body)
.await
.context("failed to create hetzner volume")?
.volume
.id;
volume_ids.push(id);
}
// Make sure volumes are available before continue
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
for _ in 0..MAX_POLL_TRIES {
if vol_ids_poll.lock().unwrap().is_empty() {
break;
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let ids = vol_ids_poll.lock().unwrap().clone();
let futures = ids.into_iter().map(|id| {
let vol_ids = vol_ids_poll.clone();
async move {
let Ok(res) = hetzner.get_volume(id).await else {
return;
};
if matches!(res.volume.status, HetznerVolumeStatus::Available)
{
vol_ids.lock().unwrap().retain(|_id| *_id != id);
}
}
});
join_all(futures).await;
}
if !vol_ids_poll.lock().unwrap().is_empty() {
return Err(anyhow!("Volumes not ready after poll"));
}
let body = CreateServerBody {
name: name.to_string(),
automount: None,
datacenter: Some(datacenter),
location: None,
firewalls: firewall_ids
.into_iter()
.map(|firewall| create_server::Firewall { firewall })
.collect(),
image,
labels,
networks: private_network_ids,
placement_group: (placement_group > 0).then_some(placement_group),
public_net: (enable_public_ipv4 || enable_public_ipv6).then_some(
create_server::PublicNet {
enable_ipv4: enable_public_ipv4,
enable_ipv6: enable_public_ipv6,
ipv4: None,
ipv6: None,
},
),
server_type: hetzner_server_type(server_type),
ssh_keys,
start_after_create: true,
user_data: (!user_data.is_empty()).then_some(user_data),
volumes: volume_ids,
};
let server_id = hetzner
.create_server(&body)
.await
.context("failed to create hetnzer server")?
.server
.id;
for _ in 0..MAX_POLL_TRIES {
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let Ok(res) = hetzner.get_server(server_id).await else {
continue;
};
if matches!(res.server.status, HetznerServerStatus::Running) {
let ip = if use_public_ip {
res
.server
.public_net
.ipv4
.context("instance does not have public ipv4 attached")?
.ip
} else {
res
.server
.private_net
.first()
.context("no private networks attached")?
.ip
.to_string()
};
let server = HetznerServerMinimal { id: server_id, ip };
return Ok(server);
}
}
Err(anyhow!(
"failed to verify server running after polling status"
))
}
fn hetzner_format(
format: HetznerVolumeFormat,
) -> common::HetznerVolumeFormat {
match format {
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
}
}
fn hetzner_datacenter(
datacenter: HetznerDatacenter,
) -> common::HetznerDatacenter {
match datacenter {
HetznerDatacenter::Nuremberg1Dc3 => {
common::HetznerDatacenter::Nuremberg1Dc3
}
HetznerDatacenter::Helsinki1Dc2 => {
common::HetznerDatacenter::Helsinki1Dc2
}
HetznerDatacenter::Falkenstein1Dc14 => {
common::HetznerDatacenter::Falkenstein1Dc14
}
HetznerDatacenter::AshburnDc1 => {
common::HetznerDatacenter::AshburnDc1
}
HetznerDatacenter::HillsboroDc1 => {
common::HetznerDatacenter::HillsboroDc1
}
}
}
fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedIntel1Core2Ram20Disk => {
common::HetznerServerType::SharedIntel1Core2Ram20Disk
}
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
HetznerServerType::SharedArm2Core4Ram40Disk => {
common::HetznerServerType::SharedArm2Core4Ram40Disk
}
HetznerServerType::SharedIntel2Core4Ram40Disk => {
common::HetznerServerType::SharedIntel2Core4Ram40Disk
}
HetznerServerType::SharedAmd3Core4Ram80Disk => {
common::HetznerServerType::SharedAmd3Core4Ram80Disk
}
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel2Core8Ram80Disk => {
common::HetznerServerType::SharedIntel2Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
}
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel4Core16Ram160Disk => {
common::HetznerServerType::SharedIntel4Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
}
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel8Core32Ram240Disk => {
common::HetznerServerType::SharedIntel8Core32Ram240Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk
}
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
}
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
}
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
}
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
}
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
}
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
}
}
}

10
bin/core/src/cloud/mod.rs Normal file
View File

@@ -0,0 +1,10 @@
pub mod aws;
#[allow(unused)]
pub mod hetzner;
#[derive(Debug)]
pub enum BuildCleanupData {
Server { repo_name: String },
Aws { instance_id: String, region: String },
}

Some files were not shown because too many files have changed in this diff Show More