Compare commits

..

376 Commits

Author SHA1 Message Date
mbecker20
f0697e812a shift + N open new variable dialog 2025-11-11 14:22:56 -08:00
mbecker20
78766463d6 create variable Enter submit 2025-11-11 14:18:28 -08:00
mbecker20
0fa1edba2c deploy 2.0.0-dev-90 2025-11-11 14:13:55 -08:00
mbecker20
bbd968cac3 bump toml pretty with fix syncing procedure executions with multiline batch patterns 2025-11-11 14:13:25 -08:00
mbecker20
5f24fc1be3 deploy 2.0.0-dev-89 2025-11-11 00:44:49 -08:00
mbecker20
7ecd2b0b0b improve cmd wrapper with comment removal support 2025-11-11 00:43:54 -08:00
mbecker20
7bf44d2e04 fix some broken tabs 2025-11-11 00:35:24 -08:00
mbecker20
24e0672384 dashboard resets page title 2025-11-11 00:18:16 -08:00
mbecker20
04f081631f deploy 2.0.0-dev-88 2025-11-11 00:16:07 -08:00
mbecker20
b1af956b63 fix dashboard pie chart code splitting issue 2025-11-11 00:05:32 -08:00
mbecker20
370712b29f gen served client types 2025-11-11 00:05:02 -08:00
mbecker20
2b6c552964 canius lite update 2025-11-11 00:04:52 -08:00
mbecker20
434a1d8ea9 clippy lint 2025-11-11 00:04:39 -08:00
ChanningHe
0b7f28360f Add optional command wrapper for Docker Compose in StackConfig (#973) 2025-11-10 23:59:09 -08:00
ChanningHe
3c8ef0ab29 Add track option for Additional Env Files (#955) 2025-11-10 23:47:07 -08:00
mbecker20
930b2423c3 deploy 2.0.0-dev-87 2025-11-07 10:33:23 -08:00
mbecker20
546747b5f2 add timeout to dns ip resolve, only use ipv4 2025-11-07 10:32:55 -08:00
mbecker20
c6df866755 better aws builder config organization 2025-11-07 10:04:45 -08:00
mbecker20
ea5e684915 better useUserTargetPermissions 2025-11-06 22:18:31 -08:00
mbecker20
64db8933de RefreshBuildCache after build 2025-11-04 00:27:34 -08:00
mbecker20
7a5580de57 builder uppercase login 2025-11-04 00:06:46 -08:00
mbecker20
b1656bb174 log about enabling user linger 2025-11-03 10:29:50 -08:00
Badal Singh
559ce103da Update setup-periphery.py (#958) 2025-11-03 09:57:49 -08:00
mbecker20
75e278a57b builder fix partial_default 2025-10-30 00:41:27 -07:00
mbecker20
430f3ddc34 fix omni search container double select on same name 2025-10-29 00:02:32 -07:00
mbecker20
6c30c202e9 add Terminals to omni search 2025-10-28 23:59:41 -07:00
mbecker20
c5401de1c5 tweak user level tab view 2025-10-28 11:42:29 -07:00
mbecker20
7a3d9e0ef6 tweak description 2025-10-28 00:32:39 -07:00
mbecker20
595e3ece42 deploy 2.0.0-dev-86 2025-10-27 21:05:13 -07:00
mbecker20
a3bc895755 fix terminal disconnect 2025-10-27 21:04:46 -07:00
mbecker20
3e3def03ec terminal init properly lexes init command 2025-10-27 21:01:15 -07:00
mbecker20
bc672d9649 deploy 2.0.0-dev-85 2025-10-27 20:01:18 -07:00
mbecker20
ea6dee4d51 clippy lint 2025-10-27 19:13:43 -07:00
mbecker20
b985f18c74 deploy 2.0.0-dev-84 2025-10-27 19:12:54 -07:00
mbecker20
45909b2f04 pid1 reaper doesn't work, init: true should be required in compose 2025-10-27 19:06:50 -07:00
mbecker20
2b5a54ce89 deploy 2.0.0-dev-83 2025-10-27 18:31:56 -07:00
mbecker20
a18f33b95e formalize the terminal message variants 2025-10-27 18:31:30 -07:00
mbecker20
f35b00ea95 bump clap dependency 2025-10-27 16:18:30 -07:00
mbecker20
70fab08520 clean up terminal modules 2025-10-27 16:17:20 -07:00
mbecker20
0331780a5f rename variables shell -> command 2025-10-27 11:08:57 -07:00
mbecker20
06cdfd2bbc Terminal -> Terminals tabs 2025-10-27 02:53:06 -07:00
mbecker20
1555202569 Create Terminal don't auto set request after changed 2025-10-27 02:42:06 -07:00
mbecker20
5139622aad deploy 2.0.0-dev-82 2025-10-27 02:28:48 -07:00
mbecker20
61ce2ee3db improve new terminal 2025-10-27 02:04:15 -07:00
mbecker20
3171c14f2b comment on spawn process reaper 2025-10-27 01:41:06 -07:00
mbecker20
521db748d8 deploy 2.0.0-dev-81 2025-10-27 01:27:42 -07:00
mbecker20
35bf224080 deploy 2.0.0-dev-80 2025-10-27 01:21:44 -07:00
mbecker20
e0b31cfe51 CreateTerminal only shows resources which are actually available to connect to 2025-10-27 00:44:56 -07:00
mbecker20
0a890078b0 deploy 2.0.0-dev-79 2025-10-27 00:38:08 -07:00
mbecker20
df97ced7a4 deploy 2.0.0-dev-78 2025-10-27 00:03:26 -07:00
mbecker20
d4e5e2e6d8 add execute_<>_terminal convenience methods 2025-10-26 23:35:17 -07:00
mbecker20
19aa60dcb5 deploy 2.0.0-dev-77 2025-10-26 23:21:15 -07:00
mbecker20
fc19c53e6f deploy 2.0.0-dev-76 2025-10-26 23:00:59 -07:00
mbecker20
4f0af960db Big Terminal refactor + most commands run directly / bypass 'sh -c "..."' 2025-10-26 23:00:35 -07:00
mbecker20
e2ec5258fb add "New" kb shortcut 2025-10-23 23:55:24 -07:00
mbecker20
49b6545a02 reorder cli command list 2025-10-23 23:53:10 -07:00
mbecker20
0aabaa9e62 deploy 2.0.0-dev-75 2025-10-23 12:23:10 -07:00
mbecker20
dc65986eab binaries still built with bullseye for compat, but final images use trixie 2025-10-23 12:22:50 -07:00
mbecker20
1d8f28437d km attach <CONTAINER> 2025-10-23 12:22:02 -07:00
mbecker20
c1502e89c2 deploy 2.0.0-dev-74 2025-10-23 11:51:40 -07:00
mbecker20
0bd15fc442 ResourceQuery.names supports names or ids 2025-10-23 11:23:37 -07:00
mbecker20
5a3621b02e km exec 2025-10-23 01:55:50 -07:00
mbecker20
38192e2dac deploy 2.0.0-dev-73 2025-10-23 00:56:15 -07:00
mbecker20
5d271d5547 use Ping timeout to handle reconnect if for some reason network cuts but ws doesn't receive Close 2025-10-23 00:55:51 -07:00
mbecker20
11fb67a35b ssh use cancel token so stdout.write_all isn't cancelled mid-write, which leads to undefined behavior 2025-10-23 00:14:17 -07:00
mbecker20
a80499dcc4 improve stack config files responsive 2025-10-22 19:02:30 -07:00
mbecker20
8c76b8487f alert responsive, better Server terminal disabled 2025-10-22 13:48:08 -07:00
mbecker20
2b32d9042a deploy 2.0.0-dev-72 2025-10-22 01:00:19 -07:00
mbecker20
dc48f1f2ca deploy 2.0.0-dev-71 2025-10-22 00:50:02 -07:00
mbecker20
8e7b7bdcf1 deploy 2.0.0-dev-70 2025-10-22 00:44:54 -07:00
mbecker20
f11d64f72e add 'init' param to make 'execute_terminal' in single call possible 2025-10-22 00:44:33 -07:00
mbecker20
2ffae85180 dashboard table section headers link to resources page 2025-10-22 00:03:12 -07:00
mbecker20
bd79d0f1e0 km ssh <SERVER> [COMMAND] -n [NAME] 2025-10-21 23:55:36 -07:00
mbecker20
e890b1f675 deploy 2.0.0-dev-69 2025-10-21 23:32:18 -07:00
mbecker20
3b7de25c30 Shift + X - Terminals, Shift + N - New (Resource, Terminal) 2025-10-21 16:11:27 -07:00
mbecker20
793bb99f31 nav to terminal on create 2025-10-21 16:00:50 -07:00
mbecker20
d465c9f273 deploy 2.0.0-dev-68 2025-10-21 15:51:38 -07:00
mbecker20
ce641a8974 terminal page 2025-10-21 15:51:18 -07:00
mbecker20
1b89ceb122 deploy 2.0.0-dev-67 2025-10-21 02:50:21 -07:00
mbecker20
2dbc011d26 remove unneeded log on client terminal disconnect 2025-10-21 02:33:19 -07:00
mbecker20
246da88ae1 deploy 2.0.0-dev-66 2025-10-21 02:29:12 -07:00
mbecker20
a8c16f64b1 km ssh 2025-10-21 02:28:42 -07:00
mbecker20
a5b711a348 stack tabs localstorage increment 2025-10-20 20:35:08 -07:00
mbecker20
9666e9ad83 Fix monitoring table with proper server version component 2025-10-20 03:01:07 -07:00
mbecker20
7479640c73 add hover information for mysterious server header icons 2025-10-20 02:53:18 -07:00
mbecker20
4823825035 give websocket indicator info on hover 2025-10-20 02:35:12 -07:00
mbecker20
23897a7acf clippy 2025-10-20 02:16:52 -07:00
mbecker20
20d5588b5c deploy 2.0.0-dev-65 2025-10-20 02:15:15 -07:00
mbecker20
f7e15ccde5 progress on terminals page 2025-10-20 02:14:51 -07:00
mbecker20
cf7623b1fc combine all resources / table view into dashboard 2025-10-20 01:40:27 -07:00
mbecker20
d3c464c05d start Terminals management page 2025-10-20 00:42:45 -07:00
mbecker20
5c9d416aa4 prog on docs update 2025-10-19 23:33:41 -07:00
mbecker20
aabcd88312 update connect-servers docs 2025-10-19 23:07:50 -07:00
mbecker20
9d2624c6bc clarify root directory in periphery config file 2025-10-19 23:07:19 -07:00
mbecker20
ee11fb0b6c clean up setup script 2025-10-19 23:07:02 -07:00
mbecker20
45adfbddd0 mounting custom CA 2025-10-19 23:06:48 -07:00
mbecker20
d26d035dc6 clean up docs intro 2025-10-19 22:03:17 -07:00
mbecker20
e673ba0adf deploy 2.0.0-dev-64 2025-10-19 21:48:15 -07:00
mbecker20
f876facfa7 improve git status message / failure propogation 2025-10-19 21:47:29 -07:00
mbecker20
3a47d57478 container class px-[1.2rem] 2025-10-19 20:31:40 -07:00
mbecker20
a707028277 responsive tweaks 2025-10-19 20:07:30 -07:00
mbecker20
0c6276c677 fix Resources / Containers mobile 2025-10-19 19:51:28 -07:00
mbecker20
fc9c6706f1 keep more descriptive settings header mobile 2025-10-19 13:24:44 -07:00
mbecker20
7674269ce9 fix user dropdown not showing username mobile 2025-10-19 13:11:34 -07:00
mbecker20
3b511c5adc improve server terminal mobile responsiveness 2025-10-19 13:00:30 -07:00
mbecker20
87221a10e9 fix mobile ContainerTerminal responsiveness 2025-10-19 12:56:11 -07:00
mbecker20
450cb6a148 fix stack config files mobile responsiveness 2025-10-19 12:46:51 -07:00
mbecker20
f252cefb21 responsive server docker tab 2025-10-19 12:37:26 -07:00
mbecker20
7855e9d688 run dkf 2025-10-19 12:30:59 -07:00
mbecker20
feb263c15f more type safe tabs 2025-10-19 12:27:55 -07:00
mbecker20
4f8d1c22cc rest of tabs also use mobile friendly 2025-10-19 12:11:11 -07:00
mbecker20
60bd47834e deploy 2.0.0-dev-63 2025-10-19 11:48:09 -07:00
mbecker20
4d632a6b61 improve resources mobile tabs responsiveness 2025-10-19 11:47:47 -07:00
mbecker20
381dd76723 deploy 2.0.0-dev-62 2025-10-19 01:37:10 -07:00
mbecker20
077e28a5fe fix ConfigList too wide on mobile 2025-10-19 01:36:50 -07:00
mbecker20
6b02aaed7d hide core pubkey copy if origin not https 2025-10-19 01:28:45 -07:00
mbecker20
e466944c05 improve mobile settings view 2025-10-19 01:24:41 -07:00
mbecker20
8ff94b7465 deploy 2.0.0-dev-61 2025-10-19 00:35:26 -07:00
mbecker20
b17df5ed7b show host public ip 2025-10-19 00:34:52 -07:00
mbecker20
207dc30206 cli is distroless, no shell / update-ca-certificates 2025-10-18 22:12:44 -07:00
mbecker20
c3eb386bdb fix copy entrypoint 2025-10-18 22:07:16 -07:00
mbecker20
4279e46892 deploy 2.0.0-dev-60 2025-10-18 12:59:19 -07:00
mbecker20
8d3d2fee12 use entrypoint scripts to make update-ca-certificates consistent when using custom CMD 2025-10-18 12:58:55 -07:00
mbecker20
1df36c4266 deploy 2.0.0-dev-59 2025-10-18 11:36:07 -07:00
mbecker20
36f7ad33c7 core and periphery images auto run update-ca-certificates on start, only need to mount in. 2025-10-18 11:35:45 -07:00
mbecker20
ec34b2c139 deploy 2.0.0-dev-58 2025-10-18 11:02:11 -07:00
mbecker20
d14c28d1f2 new otel instrumentation 2025-10-18 11:01:47 -07:00
mbecker20
68f7a0e9ce all info menu to top of settings 2025-10-18 00:45:59 -07:00
mbecker20
50f0376f0a Add Core title and public key to top of Settings 2025-10-18 00:01:41 -07:00
mbecker20
bbd53747ad fix km ps -h description 2025-10-17 17:17:18 -07:00
mbecker20
6a2adf1f83 tweak logs 2025-10-16 01:06:37 -07:00
mbecker20
128b15b94f deploy 2.0.0-dev-57 2025-10-16 00:59:46 -07:00
mbecker20
8d74b377b7 more otel refinements 2025-10-16 00:59:20 -07:00
mbecker20
d7e972e5c6 stack ui doesn't show project missing when deploying 2025-10-15 23:49:26 -07:00
mbecker20
e5cb4aac5a Fix: Webhook triggered checks linked repo branch for build, stack, sync 2025-10-15 18:06:43 -07:00
mbecker20
d0f62f8326 rework tracing events / improve opentelemetry output 2025-10-15 01:41:18 -07:00
mbecker20
47c4091a4b onboarding key uses recognizable key 2025-10-14 16:57:35 -07:00
mbecker20
973480e2b3 remove all the unnecessary instrument debug 2025-10-14 00:33:53 -07:00
mbecker20
b9e1cc87d2 remove instrument from validate_cancel_repo_build 2025-10-13 23:52:55 -07:00
mbecker20
05d20c8603 deploy 2.0.0-dev-56 2025-10-13 22:05:07 -07:00
mbecker20
fe2d68a001 fix config loading 2025-10-13 22:04:42 -07:00
mbecker20
26fd5b2a6d deploy 2.0.0-dev-55 2025-10-13 20:30:40 -07:00
mbecker20
76457bcb61 apply env / shell interpolation as *final* config loading stage, to include env vars. 2025-10-13 20:26:13 -07:00
mbecker20
ebd2c2238d bump deps 2025-10-13 19:51:05 -07:00
mbecker20
b7fc1bef7b refine default env 2025-10-13 13:53:12 -07:00
mbecker20
50b9f2e1bf deploy 2.0.0-dev-54 2025-10-13 13:06:23 -07:00
mbecker20
41ce86f6ab deploy 2.0.0-dev-53 2025-10-12 20:00:47 -07:00
mbecker20
7a21c01e52 tweak 2025-10-12 19:59:09 -07:00
mbecker20
e63e282510 small clean up 2025-10-12 19:56:15 -07:00
mbecker20
5456b36c18 deploy 2.0.0-dev-52 2025-10-12 13:55:39 -07:00
mbecker20
fcfb58a7e9 periphery with server disabled can initialize core public key file 2025-10-12 13:55:15 -07:00
mbecker20
2203004a74 move periphery in memory state to state.rs 2025-10-12 13:15:52 -07:00
mbecker20
996fb49823 periphery server_enabled /version route 2025-10-12 12:56:29 -07:00
mbecker20
35d22c77a2 Core add non authed /version route 2025-10-12 12:55:14 -07:00
mbecker20
44ab89600f Simpllify Option + Result into one encoding layer 2025-10-12 03:24:00 -07:00
mbecker20
0900e48cb8 ntfy / pushover url interpolation 2025-10-12 01:34:07 -07:00
mbecker20
c530a46a27 deploy 2.0.0-dev-51 2025-10-12 01:09:35 -07:00
mbecker20
f69c8db3ea pass through whether Periphery docker daemon connection is ok 2025-10-12 01:08:45 -07:00
mbecker20
48f2f651e1 periphery runs with logs if couldn't connect to docker daemon 2025-10-12 01:05:20 -07:00
mbecker20
bdb5b4185e rename some websocket fields 2025-10-12 00:28:55 -07:00
mbecker20
42a7b8c19b move connection queries to periphery_client 2025-10-12 00:08:59 -07:00
mbecker20
ded17e4840 more encoding refine 2025-10-12 00:05:16 -07:00
mbecker20
80fb1e6889 more on encoding 2025-10-11 14:11:07 -07:00
mbecker20
1dc861f538 fix periphery keys init when config.private_key is not explicitly defined. 2025-10-11 12:00:29 -07:00
mbecker20
3da63395fd fix EncodedOption docs 2025-10-10 00:32:05 -07:00
mbecker20
c40cbc4d77 deploy 2.0.0-dev-50 2025-10-09 23:40:42 -07:00
mbecker20
05e352e88c attach working 2025-10-09 23:28:27 -07:00
mbecker20
5884c09fb8 fix fe 2025-10-09 22:05:36 -07:00
mbecker20
f8add38043 backend for container attach 2025-10-09 21:53:26 -07:00
mbecker20
501f734e8b deploy 2.0.0-dev-49 2025-10-09 20:13:17 -07:00
mbecker20
de62732ac8 bump jwt lib 2025-10-09 20:12:51 -07:00
mbecker20
bfa61058cd remove github only managed repo webhooks feature. Not well implemented or documented, also provider specific. 2025-10-09 20:06:06 -07:00
mbecker20
72ca6d9910 deploy 2.0.0-dev-48 2025-10-09 19:36:47 -07:00
mbecker20
4d1ac32ad3 clippy 2025-10-09 18:24:50 -07:00
mbecker20
927e5959fa move encoded message schemas between core / periphery into periphery_client 2025-10-09 18:23:44 -07:00
mbecker20
37ccc6e1ef isolate encoding out of transport 2025-10-09 18:11:49 -07:00
mbecker20
deaa8754f3 slowly better ergonomics 2025-10-09 17:29:05 -07:00
mbecker20
dd8ac67c72 clippy and fmt 2025-10-09 16:26:06 -07:00
mbecker20
be4457c9cf deploy 2.0.0-dev-47 2025-10-09 15:41:06 -07:00
mbecker20
1868421815 strictly typed transport bytes encoding 2025-10-09 15:40:42 -07:00
mbecker20
366f7a12b4 Enumerated transport message types 2025-10-08 20:36:16 -07:00
mbecker20
75119370df standardize key rotation with wrapper 2025-10-08 17:45:45 -07:00
mbecker20
9e85b9d4c8 deploy 2.0.0-dev-46 2025-10-08 03:59:22 -07:00
mbecker20
8afbbf23dc deploy 2.0.0-dev-45 2025-10-08 02:48:24 -07:00
mbecker20
770a1116a1 fix: RotateCoreKeys also needs to store the new keys in mem 2025-10-08 02:48:01 -07:00
mbecker20
0b4aebbc24 periphery refresh panics if server_enabled, and core public key fails to parse. 2025-10-08 02:32:52 -07:00
mbecker20
f1696e26e4 deploy 2.0.0-dev-44 2025-10-08 02:11:38 -07:00
mbecker20
1a7b682301 RotateCoreKeys api 2025-10-08 02:11:17 -07:00
mbecker20
b0110b05aa deploy 2.0.0-dev-43 2025-10-08 00:02:34 -07:00
mbecker20
561b490f26 write files potentially containing secrets as 0600 2025-10-07 23:59:53 -07:00
mbecker20
cac1f0b42e align server standard and monitoring tables 2025-10-07 19:41:57 -07:00
mbecker20
28886fb304 fix typos 2025-10-07 14:32:00 -07:00
mbecker20
fb84d4cf7d deploy 2.0.0-dev-42 2025-10-07 01:59:04 -07:00
mbecker20
31e9624556 auto_rotate_keys config 2025-10-07 01:58:13 -07:00
mbecker20
3864bb7115 onboarding key expiry view 2025-10-07 01:16:28 -07:00
mbecker20
cea8601246 remove deleted server from onboarding key copy server 2025-10-07 00:44:36 -07:00
mbecker20
a546364bf3 deploy 2.0.0-dev-41 2025-10-07 00:29:33 -07:00
mbecker20
c8c62ea562 core public keys improve refresh 2025-10-07 00:27:47 -07:00
mbecker20
845e8780c7 improve server stats UI 2025-10-06 23:45:26 -07:00
mbecker20
db60347566 deploy 2.0.0-dev-40 2025-10-06 22:12:40 -07:00
mbecker20
c3ea0239d6 fix passkey support 2025-10-06 22:12:15 -07:00
mbecker20
e9d13449bf improve ws trait ergonomics 2025-10-06 20:02:06 -07:00
mbecker20
2daa92a639 working with safer transport message api 2025-10-06 19:16:03 -07:00
mbecker20
6473080078 deploy 2.0.0-dev-39 2025-10-06 03:14:15 -07:00
mbecker20
d3957f65dc schedule alert send before not after 2025-10-06 03:13:48 -07:00
mbecker20
cb34969f1e move skip label to be built into images 2025-10-06 03:07:11 -07:00
mbecker20
55a0a8cd05 deploy 2.0.0-dev-38 2025-10-06 02:38:42 -07:00
mbecker20
89f08372c6 CloseAlert 2025-10-06 02:38:16 -07:00
mbecker20
6a3ce2d426 config log some errors 2025-10-06 02:13:36 -07:00
mbecker20
4928378d46 fmt 2025-10-06 02:03:03 -07:00
mbecker20
eea222cfba simplify periphery config by removing option 2025-10-06 02:02:31 -07:00
mbecker20
6e9cc2dc77 deploy 2.0.0-dev-37 2025-10-06 01:40:24 -07:00
mbecker20
55d45084d0 comment 2025-10-06 01:40:00 -07:00
mbecker20
9657a44049 Improve config toml / yaml / json interpolation support 2025-10-06 01:38:48 -07:00
mbecker20
51fa9ae3c2 update setup script readme 2025-10-06 00:29:35 -07:00
mbecker20
5fd256444e improve the setup script 2025-10-05 23:53:21 -07:00
mbecker20
059716f178 deploy 2.0.0-dev-36 2025-10-05 18:23:43 -07:00
mbecker20
0bee1fe2c5 fix: connect and connect insecure are swapped 2025-10-05 18:20:29 -07:00
mbecker20
1e58c1a958 deploy 2.0.0-dev-35 2025-10-05 17:45:16 -07:00
mbecker20
ed1431db0a improve v1 downgrade 2025-10-05 17:43:46 -07:00
mbecker20
dc769ff159 document periphery_public_key 2025-10-05 17:39:11 -07:00
mbecker20
098f23ac4c configure Core -> Periphery insecure_tls 2025-10-05 17:36:07 -07:00
mbecker20
03f577d22f forgiving periphery public key parsing 2025-10-05 17:02:08 -07:00
mbecker20
95ca217362 deploy 2.0.0-dev-34 2025-10-05 16:52:33 -07:00
mbecker20
6d61045764 support KOMODO_PERIPHERY_PUBLIC_KEY 2025-10-05 16:52:00 -07:00
mbecker20
34e075eaf3 periphery support core_tls_insecure_skip_verify 2025-10-05 16:12:04 -07:00
mbecker20
232dc0bb4e deploy 2.0.0-dev-33 2025-10-05 14:59:23 -07:00
mbecker20
0cc0ee2aab load periphery_public_key 2025-10-05 14:58:52 -07:00
mbecker20
edebe925ff add km maintenance tasks aliases 2025-10-05 14:50:20 -07:00
mbecker20
5fd45bbc7b deploy 2.0.0-dev-32 2025-10-05 14:39:09 -07:00
mbecker20
0a490dadb2 rotation maintenance execution doesn't make individual updates 2025-10-05 14:38:45 -07:00
mbecker20
23847c15bc deploy 2.0.0-dev-31 2025-10-05 14:21:18 -07:00
mbecker20
0d238aee4f onboarding create_builder 2025-10-05 14:20:58 -07:00
mbecker20
98ad6cf5fa create update use uppercase 2025-10-05 13:41:42 -07:00
mbecker20
e35b81630b deploy 2.0.0-dev-30 2025-10-05 13:30:47 -07:00
mbecker20
1215852fe4 onboarding set Server tags 2025-10-05 13:24:38 -07:00
mbecker20
4164b76ff5 onboarded server needs to be enabled 2025-10-05 12:36:04 -07:00
mbecker20
26a9daffeb deploy 2.0.0-dev-29 2025-10-05 05:49:29 -07:00
mbecker20
8bb9f16e9b onboarding save copy server selection 2025-10-05 05:47:51 -07:00
mbecker20
b6eaf76497 Include templates in onboarding selector 2025-10-05 05:44:58 -07:00
mbecker20
073893da0e deploy 2.0.0-dev-28 2025-10-05 05:18:48 -07:00
mbecker20
e71547f1c2 configure server onboarding key 2025-10-05 05:17:56 -07:00
mbecker20
1991627990 move periphery public key to Server info (keep it out of resource sync) 2025-10-05 04:18:59 -07:00
mbecker20
3434d827a3 deploy 2.0.0-dev-27 2025-10-05 02:57:58 -07:00
mbecker20
1ef8b9878a rotate all server keys task 2025-10-05 02:57:27 -07:00
mbecker20
07ddaa8377 tweak 2025-10-05 01:41:44 -07:00
mbecker20
142c08cde4 deploy 2.0.0-dev-26 2025-10-05 01:19:21 -07:00
mbecker20
1aa1422faa periphery private key rotation 2025-10-05 01:18:56 -07:00
mbecker20
1394e8a6b1 Rotate Server private keys 2025-10-05 00:54:56 -07:00
mbecker20
420ee10211 tweaks 2025-10-04 23:59:14 -07:00
mbecker20
e918461dc5 refine onboarding key 2025-10-04 23:36:37 -07:00
mbecker20
4dc9ca27be refactor Periphery onboarding 2025-10-04 16:43:02 -07:00
mbecker20
f49b186f2f consolidate periphery docker apis into single mod 2025-10-04 16:17:32 -07:00
mbecker20
6e039b41f1 deploy 2.0.0-dev-25 2025-10-03 17:51:46 -07:00
mbecker20
e7cd77b022 tweaks 2025-10-03 17:06:14 -07:00
mbecker20
556cbd04c7 server onboarding flow using onboarding key 2025-10-03 17:01:58 -07:00
mbecker20
4e3d181466 default documented setup now uses Periphery -> Core setup 2025-10-03 12:55:06 -07:00
mbecker20
5d4326f46f NOT_FOUND if server not found 2025-10-03 03:17:37 -07:00
mbecker20
4bb486ad0a deploy 2.0.0-dev-24 2025-10-03 02:30:20 -07:00
mbecker20
d29c5112d8 Confirm server public key flow 2025-10-03 02:29:53 -07:00
mbecker20
d41315b8a4 don't navigate to /login for network errors 2025-10-03 01:58:23 -07:00
mbecker20
847404388c deploy 2.0.0-dev-23 2025-10-03 00:48:11 -07:00
mbecker20
eef8ec59b8 deploy 2.0.0-dev-22 2025-10-03 00:19:43 -07:00
mbecker20
9eb32f9ff5 store attempted public keys 2025-10-03 00:13:55 -07:00
mbecker20
859bfe67ef Improve Core side connection handling and fix Periphery -> Core error report 2025-10-02 23:03:58 -07:00
mbecker20
21ea469cd4 add login message 2 sec timeout 2025-10-02 16:00:45 -07:00
mbecker20
7fb902b892 deploy 2.0.0-dev-21 2025-10-02 03:12:59 -07:00
mbecker20
c9c4ac47ee fix clippy 2025-10-02 02:34:23 -07:00
mbecker20
f228cd31f3 deploy 2.0.0-dev-20 2025-10-02 02:33:33 -07:00
mbecker20
4feecb4b97 write key pem files by default when not otherwise provided. 2025-10-02 02:32:13 -07:00
mbecker20
e2680d0942 fix deploy 2025-10-01 21:35:27 -07:00
mbecker20
7422c0730d deploy 2.0.0-dev-19 2025-10-01 21:27:59 -07:00
mbecker20
37ac0dc7e3 update deploy 2025-10-01 21:17:43 -07:00
mbecker20
dccaca1df4 make sure not a config file before include as compose file 2025-10-01 20:32:52 -07:00
mbecker20
886aea4c36 deploy 2.0.0-dev-18 2025-10-01 19:48:40 -07:00
mbecker20
cbca070bae load keys from files 2025-10-01 19:41:32 -07:00
mbecker20
b4bdd401f6 fix unneeded base64 prefix 2025-10-01 02:36:26 -07:00
mbecker20
e546166240 use pkcs8 and spki for private / public key encoding, matching openssl 2025-10-01 02:25:41 -07:00
mbecker20
21689ce0ad periphery support same key gen functions 2025-09-29 23:32:47 -07:00
mbecker20
941787db64 slack client 0.5.0 2025-09-29 12:38:39 -07:00
mbecker20
d4b1aacac3 comment out 2025-09-29 02:21:07 -07:00
mbecker20
30f89461bf deploy 2.0.0-dev-17 2025-09-29 00:57:19 -07:00
mbecker20
a42d1397e9 back to bullseye (for max GLIBC compatibility) 2025-09-29 00:56:19 -07:00
mbecker20
b29313c28f deploy 2.0.0-dev-16 2025-09-29 00:47:17 -07:00
mbecker20
08a246a90c bullseye -> trixie 2025-09-29 00:46:51 -07:00
mbecker20
1a08df28d0 docs and config clean up 2025-09-29 00:06:35 -07:00
mbecker20
a226ffc256 fix json config load from interpolated 2025-09-28 23:20:04 -07:00
mbecker20
b385ee5ec3 start on docs update 2025-09-28 22:59:58 -07:00
mbecker20
c78c34357d remove unnecessary connected to core websocket log 2025-09-28 18:14:57 -07:00
mbecker20
4b7c692f00 deploy 2.0.0-dev-15 2025-09-28 18:02:18 -07:00
mbecker20
1ac98a096e bump async timing util to 1.1.0 to support for timelengths 2025-09-28 17:57:12 -07:00
mbecker20
281a2dc1ce first server configuration works with Periphery -> Core 2025-09-28 14:39:11 -07:00
mbecker20
0fe91378a6 tweak key gen output 2025-09-28 14:12:41 -07:00
mbecker20
11e76d1cf2 deploy 2.0.0-dev-14 2025-09-28 13:10:00 -07:00
mbecker20
a3bcd71105 simplify cache refresh with single periphery call 2025-09-28 13:05:45 -07:00
mbecker20
3ecc56dd76 clean up crypto provider install 2025-09-27 21:40:20 -07:00
mbecker20
7239cbb19b remove extra install crypto provider 2025-09-27 19:37:50 -07:00
mbecker20
a0540f7011 deploy 2.0.0-dev-13 2025-09-27 16:54:00 -07:00
mbecker20
37aea7605e gen types 2025-09-27 14:33:14 -07:00
mbecker20
78be913541 fix stuff after main rebase 2025-09-27 14:26:58 -07:00
mbecker20
c34f5ebf49 update config and compose envs 2025-09-27 14:23:49 -07:00
mbecker20
e5822cefb8 clean up socket handling 2025-09-27 14:23:49 -07:00
mbecker20
4baab194cf centralize the terminal stuff 2025-09-27 14:23:49 -07:00
mbecker20
a896583da6 deploy 2.0.0-dev-12 2025-09-27 14:23:49 -07:00
mbecker20
7b2674c38b deploy 2.0.0-dev-11 2025-09-27 14:23:42 -07:00
mbecker20
d1e32989e3 allow any number of simultaneous inbound / outbound connections (to different Cores) 2025-09-27 14:23:36 -07:00
mbecker20
e802bb3882 periphery support multiple core_public_keys 2025-09-27 14:23:36 -07:00
mbecker20
27a38b1bf5 periphery support multiple simultaneous core connections 2025-09-27 14:23:36 -07:00
mbecker20
2bc8a754be clean up passkey login 2025-09-27 14:23:36 -07:00
mbecker20
7a2a54bec1 dev-10 2025-09-27 14:23:36 -07:00
mbecker20
6a15150d59 don't cleanup server type builders 2025-09-27 14:23:31 -07:00
mbecker20
1b1dca76da deploy 2.0.0-dev-9 2025-09-27 14:23:31 -07:00
mbecker20
a032f0f4ff move system info to server cache 2025-09-27 14:23:25 -07:00
mbecker20
2749d49435 Core -> Periphery connection prefers noise handshake if 'core_public_key' is set 2025-09-27 14:23:25 -07:00
mbecker20
d88e42ef2d add specific server passkey support back 2025-09-27 14:23:25 -07:00
mbecker20
a370e7d121 support passkey auth for Core -> Periphery connection to remove the breaking change 2025-09-27 14:23:25 -07:00
mbecker20
d139ad2b3d always fallback to core config 'periphery_public_key' 2025-09-27 14:23:25 -07:00
mbecker20
8d2d180398 deploy 2.0.0-dev-8 2025-09-27 14:22:48 -07:00
mbecker20
37ca4ca986 fix server update hang 2025-09-27 14:22:42 -07:00
mbecker20
33e73b8543 use warn log 2025-09-27 14:22:42 -07:00
mbecker20
cf6e36e90c periphery server avoid auth fail log spam 2025-09-27 14:22:42 -07:00
mbecker20
9eb8b32f4a create and delete connections on demand 2025-09-27 14:22:42 -07:00
mbecker20
b400add6f1 deploy 2.0.0-dev-7 2025-09-27 14:22:41 -07:00
mbecker20
24adb89d25 execute container exec waits a bit for terminal to init before sending command 2025-09-27 14:22:36 -07:00
mbecker20
4674b2badb deploy 2.0.0-dev-6 2025-09-27 14:22:36 -07:00
mbecker20
65d1a69cb9 Mount ExecuteContainerExec periphery api 2025-09-27 14:22:27 -07:00
mbecker20
0da5718991 store connection channels under the connection 2025-09-27 14:22:27 -07:00
mbecker20
6b26cd120c simplify most of periphery client into bin/core 2025-09-27 14:22:27 -07:00
mbecker20
28e1bb19a4 deploy 2.0.0-dev-5 2025-09-27 14:22:27 -07:00
mbecker20
166107ac07 bail_if_not_connected 2025-09-27 14:22:21 -07:00
mbecker20
d77201880f dashboard Active include GlobalAutoUpdate 2025-09-27 14:22:21 -07:00
mbecker20
1d7629e9b2 Update server address description and placeholders 2025-09-27 14:22:21 -07:00
mbecker20
198f690ca5 Got invalid public key: {public_key} 2025-09-27 14:22:21 -07:00
mbecker20
531c79a144 deploy 2.0.0-dev-4 2025-09-27 14:22:21 -07:00
mbecker20
d685862713 improve Core - Periphery auth error messages 2025-09-27 14:22:09 -07:00
mbecker20
af0f245b5b deploy 2.0.0-dev-3 2025-09-27 14:22:09 -07:00
mbecker20
cba36861b7 deploy 2.0.0-dev-2 2025-09-27 14:22:02 -07:00
mbecker20
2c2c1d47b4 dev-2 2025-09-27 14:22:02 -07:00
mbecker20
3a6b997241 Json and JsonPretty formatting 2025-09-27 14:21:54 -07:00
mbecker20
7122f79b9d add -f json option to key utils (for use with jquery etc. 2025-09-27 14:21:54 -07:00
mbecker20
9bcee8122b tweak 2025-09-27 14:21:54 -07:00
mbecker20
a49c98946e add copy pubkeys 2025-09-27 14:21:54 -07:00
mbecker20
7d222a7241 dev-1 2025-09-27 14:21:54 -07:00
mbecker20
33501dac3e fix Core -> Periphery reconnection 2025-09-27 14:21:44 -07:00
mbecker20
4675dfa736 improve the logging 2025-09-27 14:21:44 -07:00
mbecker20
0be51dc784 move core connection handlers into core binary 2025-09-27 14:21:44 -07:00
mbecker20
52453d1320 set default allowed periphery public key 2025-09-27 14:21:44 -07:00
mbecker20
25da97ac1a basic configure auth 2025-09-27 14:21:44 -07:00
mbecker20
02db5a11d3 pipe through core side private / public key handling 2025-09-27 14:21:44 -07:00
mbecker20
89a5272246 rename passkey -> private_key 2025-09-27 14:21:44 -07:00
mbecker20
ae51ea1ad6 Copy core public key 2025-09-27 14:21:44 -07:00
mbecker20
3bdb4bea16 Core includes public key in CoreInfo 2025-09-27 14:21:44 -07:00
mbecker20
677bb14b5d auth forward error 2025-09-27 14:21:44 -07:00
mbecker20
6700700a80 clean up websocket handlers with many params 2025-09-27 14:21:44 -07:00
mbecker20
996d4aa129 standardize server size header identifier extraction 2025-09-27 14:21:44 -07:00
mbecker20
75894a7282 wire through private keys 2025-09-27 14:21:44 -07:00
mbecker20
2a065edcf1 avoid looping periphery client error 2025-09-27 14:21:44 -07:00
mbecker20
6f3703acfb periphery client makes more sense 2025-09-27 14:21:44 -07:00
mbecker20
59e989ecdf noise library and cli key utilities 2025-09-27 14:21:44 -07:00
mbecker20
951ff34a9e abstract websocket handling implementations on both sides 2025-09-27 14:21:12 -07:00
mbecker20
2d83105500 clean up 2025-09-27 14:21:12 -07:00
mbecker20
3d455f5142 implement noise auth basic 2025-09-27 14:21:12 -07:00
mbecker20
01de8c4a9b use standardized websocket wrappers / traits 2025-09-27 14:21:12 -07:00
mbecker20
d5de338561 outbound connection mode working 2025-09-27 14:21:12 -07:00
mbecker20
58c1afb8ef add login draft for transport 2025-09-27 14:21:12 -07:00
mbecker20
230f357b5a everything over ws working 2025-09-27 14:21:12 -07:00
mbecker20
991c95fff0 execute basically working, still need to clear the response channel upon completion 2025-09-27 14:21:12 -07:00
mbecker20
f6243fe6b1 more cleanup 2025-09-27 14:21:12 -07:00
mbecker20
9feeccba6e container terminal over connection 2025-09-27 14:21:12 -07:00
mbecker20
673c7f3a6b multiplex requests + terminal over single WS 2025-09-27 14:21:12 -07:00
mbecker20
39f900d651 standardize and consolidate logic in transport lib 2025-09-27 14:21:12 -07:00
mbecker20
8a06a0d6ce is work 2025-09-27 14:21:12 -07:00
mbecker20
7789ee4f4a prog 2025-09-27 14:21:12 -07:00
mbecker20
0472b6a7f7 fix after 1.19.4 2025-09-27 14:21:12 -07:00
mbecker20
d1d2227d36 prog 2025-09-27 14:21:11 -07:00
mbecker20
cea7c5fc5e prog on ws connect 2025-09-27 14:21:11 -07:00
1019 changed files with 54740 additions and 78906 deletions

View File

@@ -1,6 +1,6 @@
services:
dev:
image: mcr.microsoft.com/devcontainers/rust:1-bookworm
image: mcr.microsoft.com/devcontainers/rust:1-1-bullseye
volumes:
# Mount the root folder that contains .git
- ../:/workspace:cached
@@ -10,19 +10,13 @@ services:
- stacks:/etc/komodo/stacks
command: sleep infinity
ports:
- "9120:9120"
- "9121:9121"
environment:
KOMODO_HOST: http://localhost:9120
KOMODO_FIRST_SERVER: http://localhost:8120
KOMODO_DATABASE_ADDRESS: db
KOMODO_ENABLE_NEW_USERS: true
KOMODO_LOCAL_AUTH: true
KOMODO_JWT_SECRET: a_random_secret
VITE_KOMODO_HOST: http://localhost:9120
KOMODO_CORS_ALLOWED_ORIGINS: http://localhost:5173
KOMODO_CORS_ALLOW_CREDENTIALS: true
PERIPHERY_SSL_ENABLED: false
KOMODO_SESSION_ALLOW_CROSS_SITE: true
links:
- db
# ...
@@ -36,4 +30,4 @@ volumes:
data:
repo-cache:
repos:
stacks:
stacks:

View File

@@ -10,7 +10,7 @@
// Features to add to the dev container. More info: https://containers.dev/features.
"features": {
"ghcr.io/devcontainers/features/node:1": {
"version": "22.12.0"
"version": "20.12.2"
},
"ghcr.io/devcontainers-community/features/deno:1": {
@@ -28,8 +28,7 @@
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [
5173,
9120
9121
],
// Use 'postCreateCommand' to run commands after the container is created.
@@ -37,16 +36,11 @@
"runServices": [
"db"
],
]
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
"remoteEnv": {
// Avoid out of memory error when running `yarn build`
"NODE_OPTIONS": "--max-old-space-size=4096"
}
}

View File

@@ -1,9 +1,3 @@
#!/bin/sh
cargo install typeshare-cli
sudo mkdir -p /etc/komodo/keys
sudo chown -R $(whoami) /etc/komodo
sudo mkdir -p /config/keys
sudo chown -R $(whoami) /config
cargo install typeshare-cli

View File

@@ -1,56 +0,0 @@
name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
env:
CARGO_TERM_COLOR: always
jobs:
build:
name: Build & Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache cargo registry
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
fmt:
name: Format
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
components: rustfmt
- name: Check formatting
run: cargo fmt --all -- --check

4
.gitignore vendored
View File

@@ -6,4 +6,8 @@ deno.lock
.env.development
.DS_Store
.idea
/frontend/build
/lib/ts_client/build
.dev

28
.vscode/tasks.json vendored
View File

@@ -106,62 +106,62 @@
"problemMatcher": []
},
{
"label": "Init UI Client",
"label": "Init Frontend Client",
"type": "shell",
"command": "yarn link komodo_client && yarn install",
"options": {
"cwd": "${workspaceFolder}/ui",
"cwd": "${workspaceFolder}/frontend",
},
"problemMatcher": []
},
{
"label": "Init UI",
"label": "Init Frontend",
"dependsOn": [
"Build TS Client Types",
"Init TS Client",
"Init UI Client"
"Init Frontend Client"
],
"dependsOrder": "sequence",
"problemMatcher": []
},
{
"label": "Build UI",
"label": "Build Frontend",
"type": "shell",
"command": "yarn build",
"options": {
"cwd": "${workspaceFolder}/ui",
"cwd": "${workspaceFolder}/frontend",
},
"problemMatcher": []
},
{
"label": "Prepare UI For Run",
"label": "Prepare Frontend For Run",
"type": "shell",
"command": "cp -r ./client/core/ts/dist/. ui/public/client/.",
"command": "cp -r ./client/core/ts/dist/. frontend/public/client/.",
"options": {
"cwd": "${workspaceFolder}",
},
"dependsOn": [
"Build TS Client Types",
"Build UI"
"Build Frontend"
],
"dependsOrder": "sequence",
"problemMatcher": []
},
{
"label": "Run UI",
"label": "Run Frontend",
"type": "shell",
"command": "yarn dev",
"options": {
"cwd": "${workspaceFolder}/ui",
"cwd": "${workspaceFolder}/frontend",
},
"dependsOn": ["Prepare UI For Run"],
"dependsOn": ["Prepare Frontend For Run"],
"problemMatcher": []
},
{
"label": "Init",
"dependsOn": [
"Build Backend",
"Init UI"
"Init Frontend"
],
"dependsOrder": "sequence",
"problemMatcher": []
@@ -171,7 +171,7 @@
"dependsOn": [
"Run Core",
"Run Periphery",
"Run UI"
"Run Frontend"
],
"problemMatcher": []
},

2842
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
version = "2.0.1-dev-2"
version = "2.0.0-dev-90"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -22,117 +22,128 @@ strip = "debuginfo"
# LOCAL
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
environment_file = { path = "lib/environment_file" }
environment = { path = "lib/environment" }
interpolate = { path = "lib/interpolate" }
secret_file = { path = "lib/secret_file" }
formatting = { path = "lib/formatting" }
transport = { path = "lib/transport" }
database = { path = "lib/database" }
encoding = { path = "lib/encoding" }
response = { path = "lib/response" }
command = { path = "lib/command" }
config = { path = "lib/config" }
logger = { path = "lib/logger" }
cache = { path = "lib/cache" }
noise = { path = "lib/noise" }
git = { path = "lib/git" }
# MOGH
serror = { version = "0.5.3", default-features = false }
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
mogh_error = { version = "1.0.3", default-features = false }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
async_timing_util = "1.1.0"
mogh_auth_client = "1.2.2"
mogh_auth_server = "1.2.13"
mogh_secret_file = "1.0.1"
mogh_validations = "1.0.1"
mogh_rate_limit = "1.0.1"
partial_derive2 = "0.4.5"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "2.0.2"
mogh_resolver = "1.0.0"
mogh_config = "1.0.4"
mogh_logger = "1.3.2"
mogh_server = "1.4.5"
resolver_api = "3.0.0"
toml_pretty = "2.0.0"
mogh_cache = "1.1.1"
mogh_pki = "1.1.3"
mungos = "3.2.2"
svi = "1.2.0"
# ASYNC
reqwest = { version = "0.13.2", default-features = false, features = ["json", "stream", "form", "query", "rustls"] }
tokio = { version = "1.50.0", features = ["full"] }
tokio-util = { version = "0.7.18", features = ["io", "codec"] }
tokio-stream = { version = "0.1.18", features = ["sync"] }
pin-project-lite = "0.2.17"
futures-util = "0.3.32"
arc-swap = "1.9.0"
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.48.0", features = ["full"] }
tokio-util = { version = "0.7.17", features = ["io", "codec"] }
tokio-stream = { version = "0.1.17", features = ["sync"] }
pin-project-lite = "0.2.16"
futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
tokio-tungstenite = { version = "0.29.0", features = ["rustls-tls-native-roots"] }
axum = { version = "0.8.8", features = ["ws", "json", "macros"] }
axum-extra = { version = "0.12.5", features = ["typed-header"] }
# OPENAPI
utoipa-scalar = { version = "0.3.0", features = ["axum"] }
utoipa = "5.4.0"
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.12.1", features = ["typed-header"] }
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
# SER/DE
ipnetwork = { version = "0.21.1", features = ["serde"] }
indexmap = { version = "2.13.0", features = ["serde"] }
indexmap = { version = "2.12.0", features = ["serde"] }
serde = { version = "1.0.227", features = ["derive"] }
strum = { version = "0.28.0", features = ["derive"] }
strum = { version = "0.27.2", features = ["derive"] }
bson = { version = "2.15.0" } # must keep in sync with mongodb version
toml = "1.1.0"
serde_yaml_ng = "0.10.0"
serde_json = "1.0.149"
serde_qs = "1.1.0"
url = "2.5.8"
serde_json = "1.0.145"
serde_qs = "0.15.0"
toml = "0.9.8"
url = "2.5.7"
# ERROR
anyhow = "1.0.102"
thiserror = "2.0.18"
anyhow = "1.0.100"
thiserror = "2.0.17"
# LOGGING
tracing = "0.1.44"
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.20", features = ["json"] }
opentelemetry-semantic-conventions = "0.31.0"
tracing-opentelemetry = "0.32.0"
opentelemetry = "0.31.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.60", features = ["derive"] }
clap = { version = "4.5.51", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.21.0", features = ["v4", "fast-rng", "serde"] }
rustls = { version = "0.23.37", features = ["aws-lc-rs"] }
data-encoding = "2.10.0"
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.35", features = ["aws-lc-rs"] }
pem-rfc7468 = { version = "1.0.0", features = ["alloc"] }
openidconnect = "4.0.1"
urlencoding = "2.1.3"
bcrypt = "0.19.0"
bcrypt = "0.17.1"
base64 = "0.22.1"
pkcs8 = "0.10.2"
snow = "0.10.0"
hmac = "0.12.1"
sha1 = "0.10.6"
sha2 = "0.10.9"
rand = "0.10.0"
rand = "0.9.2"
hex = "0.4.3"
spki = "0.7.3"
der = "0.7.10"
# SYSTEM
hickory-resolver = "0.25.2"
portable-pty = "0.9.0"
shell-escape = "0.1.5"
crossterm = "0.29.0"
bollard = "0.20.2"
sysinfo = "0.38.4"
bollard = "0.19.4"
sysinfo = "0.37.1"
shlex = "1.3.0"
# CLOUD
aws-config = "1.8.15"
aws-sdk-ec2 = "1.220.0"
aws-credential-types = "1.2.14"
aws-config = "1.8.10"
aws-sdk-ec2 = "1.184.0"
aws-credential-types = "1.2.9"
## CRON
english-to-cron = "0.1.7"
english-to-cron = "0.1.6"
chrono-tz = "0.10.4"
chrono = "0.4.44"
chrono = "0.4.42"
croner = "3.0.1"
# MISC
async-compression = { version = "0.4.41", features = ["tokio", "gzip"] }
async-compression = { version = "0.4.33", features = ["tokio", "gzip"] }
derive_builder = "0.20.2"
comfy-table = "7.2.2"
typeshare = "1.0.5"
comfy-table = "7.2.1"
typeshare = "1.0.4"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.1.1"
bytes = "1.11.1"
regex = "1.12.3"
colored = "3.0.0"
bytes = "1.10.1"
regex = "1.12.2"

View File

@@ -1,7 +1,7 @@
## Builds the Komodo Core, Periphery, and Util binaries
## for a specific architecture. Requires OpenSSL 3 or later.
## for a specific architecture.
FROM rust:1.94.0-bookworm AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder

View File

@@ -1,9 +1,9 @@
## Builds the Komodo Core, Periphery, and Util binaries
## for a specific architecture. Requires OpenSSL 3 or later.
## for a specific architecture.
## Uses chef for dependency caching to help speed up back-to-back builds.
FROM lukemathwalker/cargo-chef:latest-rust-1.94.0-bookworm AS chef
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
WORKDIR /builder
# Plan just the RECIPE to see if things have changed

View File

@@ -14,12 +14,12 @@ path = "src/main.rs"
[dependencies]
# local
komodo_client = { workspace = true, features = ["cli"] }
mogh_secret_file.workspace = true
mogh_pki.workspace = true
environment_file.workspace = true
komodo_client.workspace = true
database.workspace = true
mogh_config.workspace = true
mogh_logger.workspace = true
config.workspace = true
logger.workspace = true
noise.workspace = true
# external
futures-util.workspace = true
comfy-table.workspace = true
@@ -32,9 +32,7 @@ tracing.workspace = true
colored.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
bcrypt.workspace = true
chrono.workspace = true
rustls.workspace = true
tokio.workspace = true
serde.workspace = true
clap.workspace = true

View File

@@ -1,4 +1,4 @@
FROM rust:1.94.0-bullseye AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder

View File

@@ -1,7 +1,7 @@
## Assumes the latest binaries for x86_64 and aarch64 are already built (by binaries.Dockerfile).
## Since theres no heavy build here, QEMU multi-arch builds are fine for this image.
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:2
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
ARG X86_64_BINARIES=${BINARIES_IMAGE}-x86_64
ARG AARCH64_BINARIES=${BINARIES_IMAGE}-aarch64

View File

@@ -1,6 +1,6 @@
## Assumes the latest binaries for the required arch are already built (by binaries.Dockerfile).
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:2
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
# This is required to work with COPY --from
FROM ${BINARIES_IMAGE} AS binaries

View File

@@ -1,13 +0,0 @@
use anyhow::Context as _;
use komodo_client::api::read::GetCoreInfo;
pub async fn handle() -> anyhow::Result<()> {
let client = super::komodo_client().await?;
let info = client.read(GetCoreInfo {}).await?;
println!(
"{}",
serde_json::to_string_pretty(&info)
.context("Failed to serialize core info to JSON")?
);
Ok(())
}

View File

@@ -1,101 +0,0 @@
use anyhow::Context as _;
use database::bson::doc;
use komodo_client::{
api::read::FindUser,
entities::{
api_key::ApiKey, config::cli::args::create::CreateApiKey,
komodo_timestamp, random_string,
},
};
use serde_json::json;
use crate::config::cli_config;
pub async fn create(
CreateApiKey {
name,
for_user,
expires,
use_api,
}: &CreateApiKey,
) -> anyhow::Result<()> {
let expires = if let Some(expires_days) = expires {
// now + expires in ms
komodo_timestamp() + expires_days * 24 * 60 * 60 * 1000
} else {
0
};
if *use_api {
// USE API
let client = crate::command::komodo_client().await?;
let keys = if let Some(username) = for_user {
// For service user
let user = client
.read(FindUser {
user: username.to_string(),
})
.await?;
client
.write(
komodo_client::api::write::CreateApiKeyForServiceUser {
user_id: user.id,
name: name.clone().unwrap_or_default(),
expires,
},
)
.await?
} else {
// For self
client
.auth_manage(komodo_client::api::auth::manage::CreateApiKey {
name: name.clone().unwrap_or_default(),
expires: expires as u64,
})
.await?
};
println!(
"{}",
serde_json::to_string_pretty(&keys)
.context("Failed to serialize api keys to JSON")?
);
} else {
// USE DATABASE
let db = database::Client::new(&cli_config().database).await?;
let user = db
.users
.find_one(doc! { "username": for_user })
.await
.context("Failed to query database for user")?
.context("No user found with given username")?;
let key = format!("K_{}_K", random_string(40));
let secret = format!("S_{}_S", random_string(40));
let hashed_secret = bcrypt::hash(&secret, 10)
.context("Failed at hashing secret string")?;
db.api_keys
.insert_one(&ApiKey {
name: name.clone().unwrap_or_default(),
user_id: user.id,
key: key.clone(),
secret: hashed_secret.clone(),
created_at: komodo_timestamp(),
expires,
})
.await?;
println!(
"{}",
serde_json::to_string_pretty(
&json!({ "key": key, "secret": secret })
)
.context("Failed to serialize api keys to JSON")?
);
}
Ok(())
}

View File

@@ -1,13 +0,0 @@
use komodo_client::entities::config::cli::args::create::CreateCommand;
mod api_key;
mod onboarding_key;
pub async fn handle(command: &CreateCommand) -> anyhow::Result<()> {
match command {
CreateCommand::ApiKey(api_key) => api_key::create(api_key).await,
CreateCommand::OnboardingKey(onboarding_key) => {
onboarding_key::create(onboarding_key).await
}
}
}

View File

@@ -1,46 +0,0 @@
use anyhow::Context as _;
use komodo_client::entities::{
config::cli::args::create::CreateOnboardingKey, komodo_timestamp,
};
pub async fn create(
CreateOnboardingKey {
name,
expires,
private_key,
tags,
privileged,
copy_server,
create_builder,
}: &CreateOnboardingKey,
) -> anyhow::Result<()> {
let expires = if let Some(expires_days) = expires {
// now + expires in ms
komodo_timestamp() + expires_days * 24 * 60 * 60 * 1000
} else {
0
};
// USE API
let client = crate::command::komodo_client().await?;
let key = client
.write(komodo_client::api::write::CreateOnboardingKey {
name: name.clone().unwrap_or_default(),
expires,
private_key: private_key.clone(),
tags: tags.clone(),
privileged: *privileged,
copy_server: copy_server.clone().unwrap_or_default(),
create_builder: *create_builder,
})
.await?;
println!(
"{}",
serde_json::to_string_pretty(&key)
.context("Failed to serialize onboarding key to JSON")?
);
Ok(())
}

View File

@@ -358,11 +358,6 @@ async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
.await
.context("Failed to downgrade Server schema")?;
db.collection::<Document>("Deployment")
.update_many(doc! {}, doc! { "$set": { "info": null } })
.await
.context("Failed to downgrade Deployment schema")?;
info!(
"V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅"
);

View File

@@ -221,33 +221,6 @@ pub async fn handle(
Execution::SendAlert(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveSwarmNodes(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveSwarmStacks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveSwarmServices(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CreateSwarmConfig(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RotateSwarmConfig(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveSwarmConfigs(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CreateSwarmSecret(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RotateSwarmSecret(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveSwarmSecrets(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::ClearRepoCache(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -515,42 +488,6 @@ pub async fn handle(
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RemoveSwarmNodes(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RemoveSwarmStacks(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RemoveSwarmServices(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::CreateSwarmConfig(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RotateSwarmConfig(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RemoveSwarmConfigs(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::CreateSwarmSecret(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RotateSwarmSecret(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RemoveSwarmSecrets(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::ClearRepoCache(request) => client
.execute(request)
.await

View File

@@ -15,8 +15,6 @@ use wildcard::Wildcard;
use crate::config::cli_config;
pub mod container;
pub mod core_info;
pub mod create;
pub mod database;
pub mod execute;
pub mod list;

View File

@@ -2,7 +2,7 @@ use anyhow::{Context, anyhow};
use colored::Colorize;
use komodo_client::{
api::{
read::{GetServer, ListAllDockerContainers, ListServers},
read::{ListAllDockerContainers, ListServers},
terminal::InitTerminal,
},
entities::{
@@ -26,7 +26,7 @@ pub async fn handle_connect(
recreate,
}: &Connect,
) -> anyhow::Result<()> {
handle_terminal_forwarding(server, async {
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_server_terminal(
@@ -56,28 +56,25 @@ pub async fn handle_exec(
}: &Exec,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(
&format!("{server}/{container}"),
async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: Some(shell.to_string()),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Exec),
}),
)
.await
},
)
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: Some(shell.to_string()),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Exec),
}),
)
.await
})
.await
}
@@ -89,28 +86,25 @@ pub async fn handle_attach(
}: &Attach,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(
&format!("{server}/{container}-attach"),
async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: None,
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Attach),
}),
)
.await
},
)
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: None,
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Attach),
}),
)
.await
})
.await
}
@@ -138,14 +132,11 @@ async fn get_server(
}
if containers.len() == 1 {
let server_id = containers
return containers
.pop()
.context("Shouldn't happen")?
.server_id
.context("Container doesn't have server_id")?;
let server_name =
client.read(GetServer { server: server_id }).await?.name;
return Ok(server_name);
.context("Container doesn't have server_id");
}
let servers = containers
@@ -172,7 +163,6 @@ async fn get_server(
async fn handle_terminal_forwarding<
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
>(
label: &str,
connect: C,
) -> anyhow::Result<()> {
// Need to forward multiple sources into ws write
@@ -257,16 +247,6 @@ async fn handle_terminal_forwarding<
let forward_read = async {
let mut stdout = tokio::io::stdout();
// Write connection message
if let Err(e) = write_connection_message(&mut stdout, label)
.await
.context("Failed to write text to stdout")
{
cancel.cancel();
return Some(e);
}
while let Some(msg) =
future_or_cancel(ws_read.receive_stdout(), &cancel).await
{
@@ -317,31 +297,6 @@ async fn handle_terminal_forwarding<
std::process::exit(0)
}
async fn write_connection_message(
stdout: &mut tokio::io::Stdout,
label: &str,
) -> anyhow::Result<()> {
// Use message without ansi for correct length
let message_clean = format!("# Connected to {label} (km) #");
let bounder = "=".repeat(message_clean.chars().count());
let message = format!(
"# {} to {} {} #",
"Connected".green().bold(),
label.bold(),
"(km)".dimmed()
);
stdout
.write_all(
format!("\n{bounder}\r\n{message}\r\n{bounder}\r\n").as_bytes(),
)
.await?;
let _ = stdout.flush().await;
Ok(())
}
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
let (cols, rows) = crossterm::terminal::size()
.context("Failed to get terminal size")?;

View File

@@ -26,9 +26,6 @@ pub async fn update(
UpdateUserCommand::SuperAdmin { enabled, yes } => {
update_super_admin(username, *enabled, *yes).await
}
UpdateUserCommand::Clear2fa { yes } => {
clear_2fa(username, *yes).await
}
}
}
@@ -123,20 +120,3 @@ async fn update_super_admin(
Ok(())
}
async fn clear_2fa(username: &str, yes: bool) -> anyhow::Result<()> {
println!("\n{}: Clear 2FA Methods\n", "Mode".dimmed());
println!(" - {}: {username}", "Username".dimmed());
crate::command::wait_for_enter("clear user 2FA methods", yes)?;
info!("Clearing 2FA methods...");
let db = database::Client::new(&cli_config().database).await?;
db.clear_user_2fa_methods(username).await?;
info!("2FA methods cleared ✅");
Ok(())
}

View File

@@ -3,6 +3,7 @@ use std::{path::PathBuf, sync::OnceLock};
use anyhow::Context;
use clap::Parser;
use colored::Colorize;
use environment_file::maybe_read_item_from_file;
use komodo_client::entities::{
config::{
DatabaseConfig,
@@ -13,7 +14,6 @@ use komodo_client::entities::{
},
logger::LogConfig,
};
use mogh_secret_file::maybe_read_item_from_file;
pub fn cli_args() -> &'static CliArgs {
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
@@ -74,7 +74,7 @@ pub fn cli_config() -> &'static CliConfig {
"Config File Keywords".dimmed(),
);
}
let mut unparsed_config = (mogh_config::ConfigLoader {
let mut unparsed_config = (config::ConfigLoader {
paths: &config_paths
.iter()
.map(PathBuf::as_path)
@@ -166,7 +166,7 @@ pub fn cli_config() -> &'static CliConfig {
else {
panic!("Profile config is not Object type.");
};
mogh_config::merge_config(
config::merge_config(
unparsed_config,
profile_config.clone(),
env.komodo_cli_merge_nested_config,

View File

@@ -12,10 +12,7 @@ mod config;
async fn app() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("Failed to install default crypto provider");
mogh_logger::init(&config::cli_config().cli_logging)?;
logger::init(&config::cli_config().cli_logging)?;
let args = config::cli_args();
let env = config::cli_env();
let debug_load =
@@ -45,7 +42,6 @@ async fn app() -> anyhow::Result<()> {
}
Ok(())
}
args::Command::CoreInfo => command::core_info::handle().await,
args::Command::Container(container) => {
command::container::handle(container).await
}
@@ -56,9 +52,6 @@ async fn app() -> anyhow::Result<()> {
args::Command::Execute(args) => {
command::execute::handle(&args.execution, args.yes).await
}
args::Command::Create { command } => {
command::create::handle(command).await
}
args::Command::Update { command } => {
command::update::handle(command).await
}
@@ -72,7 +65,7 @@ async fn app() -> anyhow::Result<()> {
command::terminal::handle_attach(attach).await
}
args::Command::Key { command } => {
mogh_pki::cli::handle(command, mogh_pki::PkiKind::Mutual).await
noise::key::command::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await

View File

@@ -15,62 +15,63 @@ path = "src/main.rs"
[dependencies]
# local
komodo_client = { workspace = true, features = ["core"] }
komodo_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
mogh_validations.workspace = true
environment_file.workspace = true
interpolate.workspace = true
mogh_secret_file.workspace = true
secret_file.workspace = true
formatting.workspace = true
mogh_rate_limit.workspace = true
transport.workspace = true
database.workspace = true
encoding.workspace = true
response.workspace = true
command.workspace = true
mogh_config.workspace = true
mogh_logger.workspace = true
mogh_cache.workspace = true
mogh_pki.workspace = true
config.workspace = true
logger.workspace = true
cache.workspace = true
noise.workspace = true
git.workspace = true
# mogh
mogh_error = { workspace = true, features = ["axum"] }
mogh_auth_client = { workspace = true, features = ["utoipa"] }
mogh_auth_server.workspace = true
serror = { workspace = true, features = ["axum"] }
async_timing_util.workspace = true
partial_derive2.workspace = true
mogh_resolver.workspace = true
mogh_server.workspace = true
derive_variants.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
slack.workspace = true
svi.workspace = true
# external
aws-credential-types.workspace = true
english-to-cron.workspace = true
data-encoding.workspace = true
serde_yaml_ng.workspace = true
utoipa-scalar.workspace = true
openidconnect.workspace = true
jsonwebtoken.workspace = true
futures-util.workspace = true
aws-sdk-ec2.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-config.workspace = true
tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
serde_yaml_ng.workspace = true
serde_qs.workspace = true
typeshare.workspace = true
chrono-tz.workspace = true
indexmap.workspace = true
wildcard.workspace = true
arc-swap.workspace = true
serde_qs.workspace = true
colored.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
bcrypt.workspace = true
croner.workspace = true
chrono.workspace = true
bcrypt.workspace = true
base64.workspace = true
rustls.workspace = true
utoipa.workspace = true
bytes.workspace = true
tokio.workspace = true
serde.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.94.0-trixie AS core-builder
FROM rust:1.90.0-trixie AS core-builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -17,13 +17,13 @@ RUN cargo build -p komodo_core --release && \
cargo build -p komodo_cli --release && \
cargo strip
# Build UI
FROM node:22.12-alpine AS ui-builder
# Build Frontend
FROM node:20.12-alpine AS frontend-builder
WORKDIR /builder
COPY ./ui ./ui
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd ui && yarn link komodo_client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:trixie-slim
@@ -37,7 +37,7 @@ WORKDIR /app
# Copy
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=ui-builder /builder/ui/dist /app/ui
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=core-builder /builder/target/release/core /usr/local/bin/core
COPY --from=core-builder /builder/target/release/km /usr/local/bin/km
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno

View File

@@ -2,15 +2,15 @@
## Sets up the necessary runtime container dependencies for Komodo Core.
## Since theres no heavy build here, QEMU multi-arch builds are fine for this image.
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:2
ARG UI_IMAGE=ghcr.io/moghtech/komodo-ui:2
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
ARG FRONTEND_IMAGE=ghcr.io/moghtech/komodo-frontend:latest
ARG X86_64_BINARIES=${BINARIES_IMAGE}-x86_64
ARG AARCH64_BINARIES=${BINARIES_IMAGE}-aarch64
# This is required to work with COPY --from
FROM ${X86_64_BINARIES} AS x86_64
FROM ${AARCH64_BINARIES} AS aarch64
FROM ${UI_IMAGE} AS ui
FROM ${FRONTEND_IMAGE} AS frontend
# Final Image
FROM debian:trixie-slim
@@ -33,9 +33,9 @@ COPY --from=x86_64 /km /app/km/linux/amd64
COPY --from=aarch64 /km /app/km/linux/arm64
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
# Copy default config / static ui / deno binary
# Copy default config / static frontend / deno binary
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=ui /ui /app/ui
COPY --from=frontend /frontend /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Set $DENO_DIR and preload external Deno deps

View File

@@ -1,18 +1,18 @@
## Assumes the latest binaries for the required arch are already built (by binaries.Dockerfile).
## Sets up the necessary runtime container dependencies for Komodo Core.
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:2
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
# This is required to work with COPY --from
FROM ${BINARIES_IMAGE} AS binaries
# Build UI
FROM node:22.12-alpine AS ui-builder
# Build Frontend
FROM node:20.12-alpine AS frontend-builder
WORKDIR /builder
COPY ./ui ./ui
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd ui && yarn link komodo_client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
FROM debian:trixie-slim
@@ -22,7 +22,7 @@ RUN sh ./debian-deps.sh && rm ./debian-deps.sh
# Copy
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=ui-builder /builder/ui/dist /app/ui
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=binaries /core /usr/local/bin/core
COPY --from=binaries /km /usr/local/bin/km
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno

View File

@@ -16,26 +16,6 @@ pub async fn send_alert(
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
)
}
AlertData::SwarmUnhealthy { id, name, err } => {
let link = resource_link(ResourceTargetVariant::Swarm, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | Swarm **{name}** is now **healthy**\n{link}"
)
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\n**error**: {e}"))
.unwrap_or_default();
format!(
"{level} | Swarm **{name}** is **unhealthy** ❌\n{link}{err}"
)
}
_ => unreachable!(),
}
}
AlertData::ServerVersionMismatch {
id,
name,
@@ -128,8 +108,6 @@ pub async fn send_alert(
AlertData::ContainerStateChange {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
@@ -137,64 +115,37 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to = fmt_docker_container_state(to);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"📦 Deployment **{name}** is now **{to}**{target}\nprevious: **{from}**\n{link}"
"📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"⬆ Deployment **{name}** has an update available{target}\nimage: **{image}**\n{link}"
"⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"⬆ Deployment **{name}** was updated automatically ⏫{target}\nimage: **{image}**\n{link}"
"⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
}
AlertData::StackStateChange {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
@@ -202,44 +153,26 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to = fmt_stack_state(to);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"🥞 Stack **{name}** is now {to}{target}\nprevious: **{from}**\n{link}"
"🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"⬆ Stack **{name}** has an update available{target}\nservice: **{service}**\nimage: **{image}**\n{link}"
"⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}"
)
}
AlertData::StackAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
images,
@@ -248,15 +181,8 @@ pub async fn send_alert(
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images = images.join(", ");
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: **{swarm}**")
} else if let Some(server) = server_name {
format!("\nserver: **{server}**")
} else {
String::new()
};
format!(
"⬆ Stack **{name}** was updated automatically ⏫{target}\n{images_label}: **{images}**\n{link}"
"⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}"
)
}
AlertData::AwsBuilderTerminationFailed {
@@ -306,9 +232,9 @@ pub async fn send_alert(
format!(
"{level} | {message}{}",
if details.is_empty() {
String::new()
format_args!("")
} else {
format!("\n{details}")
format_args!("\n{details}")
}
)
}
@@ -338,7 +264,7 @@ pub async fn send_alert(
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request to Discord: {sanitized_error}"
"Error with slack request: {sanitized_error}"
))
})
}

View File

@@ -1,5 +1,6 @@
use anyhow::{Context, anyhow};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use derive_variants::ExtractVariant;
use futures_util::future::join_all;
use interpolate::Interpolator;
use komodo_client::entities::{
@@ -80,14 +81,14 @@ pub async fn send_alert_to_alerter(
return Ok(());
}
let alert_variant: AlertDataVariant = (&alert.data).into();
let alert_type = alert.data.extract_variant();
// In the test case, we don't want the filters inside this
// block to stop the test from being sent to the alerting endpoint.
if alert_variant != AlertDataVariant::Test {
if alert_type != AlertDataVariant::Test {
// Don't send if alert type not configured on the alerter
if !alerter.config.alert_types.is_empty()
&& !alerter.config.alert_types.contains(&alert_variant)
&& !alerter.config.alert_types.contains(&alert_type)
{
return Ok(());
}
@@ -251,24 +252,6 @@ fn standard_alert_content(alert: &Alert) -> String {
"{level} | If you see this message, then Alerter {name} is working\n{link}",
)
}
AlertData::SwarmUnhealthy { id, name, err } => {
let link = resource_link(ResourceTargetVariant::Swarm, id);
match alert.level {
SeverityLevel::Ok => {
format!("{level} | Swarm {name} is now healthy\n{link}")
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {e}"))
.unwrap_or_default();
format!(
"{level} | Swarm {name} is unhealthy ❌\n{link}{err}"
)
}
_ => unreachable!(),
}
}
AlertData::ServerVersionMismatch {
id,
name,
@@ -359,8 +342,6 @@ fn standard_alert_content(alert: &Alert) -> String {
AlertData::ContainerStateChange {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
@@ -368,64 +349,37 @@ fn standard_alert_content(alert: &Alert) -> String {
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"📦Deployment {name} is now {to_state}{target}\nprevious: {from}\n{link}",
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"⬆ Deployment {name} has an update available{target}\nimage: {image}\n{link}",
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"⬆ Deployment {name} was updated automatically{target}\nimage: {image}\n{link}",
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
)
}
AlertData::StackStateChange {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
@@ -433,44 +387,26 @@ fn standard_alert_content(alert: &Alert) -> String {
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"🥞 Stack {name} is now {to_state}{target}\nprevious: {from}\n{link}",
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"⬆ Stack {name} has an update available{target}\nservice: {service}\nimage: {image}\n{link}",
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
)
}
AlertData::StackAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
images,
@@ -479,15 +415,8 @@ fn standard_alert_content(alert: &Alert) -> String {
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
let target = if let Some(swarm) = swarm_name {
format!("\nswarm: {swarm}")
} else if let Some(server) = server_name {
format!("\nserver: {server}")
} else {
String::new()
};
format!(
"⬆ Stack {name} was updated automatically ⏫{target}\n{images_label}: {images_str}\n{link}",
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
)
}
AlertData::AwsBuilderTerminationFailed {
@@ -537,9 +466,9 @@ fn standard_alert_content(alert: &Alert) -> String {
format!(
"{level} | {message}{}",
if details.is_empty() {
String::new()
format_args!("")
} else {
format!("\n{details}")
format_args!("\n{details}")
}
)
}

View File

@@ -31,7 +31,7 @@ pub async fn send_alert(
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request to Ntfy: {sanitized_error}"
"Error with slack request: {sanitized_error}"
))
})
}

View File

@@ -28,7 +28,7 @@ pub async fn send_alert(
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request to Pushover: {sanitized_error}"
"Error with slack request: {sanitized_error}"
))
})
}

View File

@@ -24,41 +24,6 @@ pub async fn send_alert(
];
(text, blocks.into())
}
AlertData::SwarmUnhealthy { id, name, err } => {
match alert.level {
SeverityLevel::Ok => {
let text =
format!("{level} | Swarm *{name}* is now *healthy*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"Swarm *{name}* is now *healthy*"
)),
];
(text, blocks.into())
}
SeverityLevel::Critical => {
let text =
format!("{level} | Swarm *{name}* is *unhealthy* ❌");
let err = err
.as_ref()
.map(|e| format!("\nerror: {e}"))
.unwrap_or_default();
let blocks = vec![
Block::header(level),
Block::section(format!(
"Swarm *{name}* is *unhealthy* ❌{err}"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => unreachable!(),
}
}
AlertData::ServerVersionMismatch {
id,
name,
@@ -102,7 +67,7 @@ pub async fn send_alert(
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is now *connected*"
"*{name}*{region} is now *connnected*"
)),
];
(text, blocks.into())
@@ -274,26 +239,19 @@ pub async fn send_alert(
}
AlertData::ContainerStateChange {
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
to,
id,
..
} => {
let to = fmt_docker_container_state(to);
let text = format!("📦 Container *{name}* is now *{to}*");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("{target}previous: {from}",)),
Block::section(format!(
"server: {server_name}\nprevious: {from}",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
@@ -304,24 +262,17 @@ pub async fn send_alert(
AlertData::DeploymentImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
server_id: _server_id,
image,
} => {
let text =
format!("⬆ Deployment *{name}* has an update available");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("{target}image: *{image}*",)),
Block::section(format!(
"server: *{server_name}*\nimage: *{image}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
@@ -332,24 +283,17 @@ pub async fn send_alert(
AlertData::DeploymentAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
server_id: _server_id,
image,
} => {
let text =
format!("⬆ Deployment *{name}* was updated automatically ⏫");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("{target}image: *{image}*",)),
Block::section(format!(
"server: *{server_name}*\nimage: *{image}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Deployment,
id,
@@ -359,26 +303,19 @@ pub async fn send_alert(
}
AlertData::StackStateChange {
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
from,
to,
id,
..
} => {
let to = fmt_stack_state(to);
let text = format!("🥞 Stack *{name}* is now *{to}*");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("{target}previous: *{from}*",)),
Block::section(format!(
"server: *{server_name}*\nprevious: *{from}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
id,
@@ -389,25 +326,16 @@ pub async fn send_alert(
AlertData::StackImageUpdateAvailable {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
server_id: _server_id,
service,
image,
} => {
let text = format!("⬆ Stack *{name}* has an update available");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(format!(
"{target}service: *{service}*\nimage: *{image}*",
"server: *{server_name}*\nservice: *{service}*\nimage: *{image}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
@@ -419,10 +347,8 @@ pub async fn send_alert(
AlertData::StackAutoUpdated {
id,
name,
swarm_id: _swarm_id,
swarm_name,
server_id: _server_id,
server_name,
server_id: _server_id,
images,
} => {
let text =
@@ -430,18 +356,11 @@ pub async fn send_alert(
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images = images.join(", ");
let target = if let Some(swarm) = swarm_name {
format!("swarm: *{swarm}*\n")
} else if let Some(server) = server_name {
format!("server: *{server}*\n")
} else {
String::new()
};
let blocks = vec![
Block::header(text.clone()),
Block::section(
format!("{target}{images_label}: *{images}*",),
),
Block::section(format!(
"server: *{server_name}*\n{images_label}: *{images}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Stack,
id,
@@ -572,7 +491,7 @@ pub async fn send_alert(
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request to Slack: {sanitized_error}"
"Error with slack request: {sanitized_error}"
))
})?;
Ok(())

159
bin/core/src/api/auth.rs Normal file
View File

@@ -0,0 +1,159 @@
use std::{sync::OnceLock, time::Instant};
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::auth::*, entities::user::User};
use reqwest::StatusCode;
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::{AddStatusCode, Json};
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::{
get_user_id_from_headers,
github::{self, client::github_oauth_client},
google::{self, client::google_oauth_client},
oidc::{self, client::oidc_client},
},
config::core_config,
helpers::query::get_user,
state::jwt_client,
};
use super::Variant;
#[derive(Default)]
pub struct AuthArgs {
pub headers: HeaderMap,
}
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[args(AuthArgs)]
#[response(Response)]
#[error(serror::Error)]
#[variant_derive(Debug)]
#[serde(tag = "type", content = "params")]
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
pub enum AuthRequest {
GetLoginOptions(GetLoginOptions),
SignUpLocalUser(SignUpLocalUser),
LoginLocalUser(LoginLocalUser),
ExchangeForJwt(ExchangeForJwt),
GetUser(GetUser),
}
pub fn router() -> Router {
let mut router = Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler));
if core_config().local_auth {
info!("🔑 Local Login Enabled");
}
if github_oauth_client().is_some() {
info!("🔑 Github Login Enabled");
router = router.nest("/github", github::router())
}
if google_oauth_client().is_some() {
info!("🔑 Google Login Enabled");
router = router.nest("/google", google::router())
}
if core_config().oidc_enabled {
info!("🔑 OIDC Login Enabled");
router = router.nest("/oidc", oidc::router())
}
router
}
async fn variant_handler(
headers: HeaderMap,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: AuthRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(headers, Json(req)).await
}
async fn handler(
headers: HeaderMap,
Json(request): Json<AuthRequest>,
) -> serror::Result<axum::response::Response> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/auth request {req_id} | METHOD: {:?}",
request.extract_variant()
);
let res = request.resolve(&AuthArgs { headers }).await;
if let Err(e) = &res {
debug!("/auth request {req_id} | error: {:#}", e.error);
}
let elapsed = timer.elapsed();
debug!("/auth request {req_id} | resolve time: {elapsed:?}");
res.map(|res| res.0)
}
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
static GET_LOGIN_OPTIONS_RESPONSE: OnceLock<
GetLoginOptionsResponse,
> = OnceLock::new();
GET_LOGIN_OPTIONS_RESPONSE.get_or_init(|| {
let config = core_config();
GetLoginOptionsResponse {
local: config.local_auth,
github: github_oauth_client().is_some(),
google: google_oauth_client().is_some(),
oidc: oidc_client().load().is_some(),
registration_disabled: config.disable_user_registration,
}
})
}
impl Resolve<AuthArgs> for GetLoginOptions {
async fn resolve(
self,
_: &AuthArgs,
) -> serror::Result<GetLoginOptionsResponse> {
Ok(*login_options_reponse())
}
}
impl Resolve<AuthArgs> for ExchangeForJwt {
async fn resolve(
self,
_: &AuthArgs,
) -> serror::Result<ExchangeForJwtResponse> {
jwt_client()
.redeem_exchange_token(&self.token)
.await
.map_err(Into::into)
}
}
impl Resolve<AuthArgs> for GetUser {
async fn resolve(
self,
AuthArgs { headers }: &AuthArgs,
) -> serror::Result<User> {
let user_id = get_user_id_from_headers(headers)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
get_user(&user_id)
.await
.status_code(StatusCode::UNAUTHORIZED)
}
}

View File

@@ -6,12 +6,16 @@ use std::{
use anyhow::Context;
use command::run_komodo_standard_command;
use config::merge_objects;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
};
use interpolate::Interpolator;
use komodo_client::{
api::execute::{BatchExecutionResponse, BatchRunAction, RunAction},
api::{
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
FileFormat, JsonObject,
action::Action,
@@ -25,20 +29,12 @@ use komodo_client::{
},
parsers::parse_key_value_list,
};
use mogh_auth_client::api::manage::{
CreateApiKey, CreateApiKeyResponse,
};
use mogh_auth_server::api::manage::api_key::{
create_api_key, delete_api_key,
};
use mogh_config::merge_objects;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use tokio::fs;
use crate::{
alert::send_alerts,
api::execute::ExecuteRequest,
auth::KomodoAuthImpl,
api::{execute::ExecuteRequest, user::UserArgs},
config::core_config,
helpers::{
query::{VariablesAndSecrets, get_variables_and_secrets},
@@ -66,15 +62,15 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
"BatchRunAction",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunAction>(&self.pattern, user)
.await?,
@@ -87,7 +83,7 @@ impl Resolve<ExecuteArgs> for RunAction {
"RunAction",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
action = self.action,
@@ -95,12 +91,8 @@ impl Resolve<ExecuteArgs> for RunAction {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut action = get_check_permissions::<Action>(
&self.action,
user,
@@ -143,87 +135,72 @@ impl Resolve<ExecuteArgs> for RunAction {
let args = serde_json::to_string(&args)
.context("Failed to serialize action run arguments")?;
let CreateApiKeyResponse { key, secret } = create_api_key(
&KomodoAuthImpl,
action_user().id.clone(),
CreateApiKey {
name: update.id.clone(),
expires: 0,
},
)
let CreateApiKeyResponse { key, secret } = CreateApiKey {
name: update.id.clone(),
expires: 0,
}
.resolve(&UserArgs {
user: action_user().to_owned(),
})
.await?;
// Do next steps in seperate error handling block,
// and delete the API key before unwrapping the error.
// If Komodo shuts down during these steps, there will
// be a dangling api key in the DB with user_id: "000000000000000000000002".
// These need to be
let res = async {
let contents = &mut action.config.file_contents;
let contents = &mut action.config.file_contents;
// Wrap the file contents in the execution context.
*contents = full_contents(contents, &args, &key, &secret);
// Wrap the file contents in the execution context.
*contents = full_contents(contents, &args, &key, &secret);
let replacers = interpolate(
contents,
&mut update,
key.clone(),
secret.clone(),
)
.await?
.into_iter()
.collect::<Vec<_>>();
let replacers =
interpolate(contents, &mut update, key.clone(), secret.clone())
.await?
.into_iter()
.collect::<Vec<_>>();
let file = format!("{}.ts", random_string(10));
let path = core_config().action_directory.join(&file);
let file = format!("{}.ts", random_string(10));
let path = core_config().action_directory.join(&file);
mogh_secret_file::write_async(&path, contents)
.await
.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
secret_file::write_async(&path, contents)
.await
.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
let CoreConfig { ssl_enabled, .. } = core_config();
let CoreConfig { ssl_enabled, .. } = core_config();
let https_cert_flag = if *ssl_enabled {
" --unsafely-ignore-certificate-errors=localhost"
} else {
""
};
let https_cert_flag = if *ssl_enabled {
" --unsafely-ignore-certificate-errors=localhost"
} else {
""
};
let reload = if action.config.reload_deno_deps {
" --reload"
} else {
""
};
let reload = if action.config.reload_deno_deps {
" --reload"
} else {
""
};
let mut res = run_komodo_standard_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-all{https_cert_flag}{reload} {}",
path.display()
),
)
.await;
res.stdout = svi::replace_in_string(&res.stdout, &replacers)
.replace(&key, "<ACTION_API_KEY>");
res.stderr = svi::replace_in_string(&res.stderr, &replacers)
.replace(&secret, "<ACTION_API_SECRET>");
cleanup_run(file + ".js", &path).await;
update.logs.push(res);
update.finalize();
mogh_error::Ok(update)
}
let mut res = run_komodo_standard_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-all{https_cert_flag}{reload} {}",
path.display()
),
)
.await;
if let Err(e) =
delete_api_key(&KomodoAuthImpl, &action_user().id, key).await
res.stdout = svi::replace_in_string(&res.stdout, &replacers)
.replace(&key, "<ACTION_API_KEY>");
res.stderr = svi::replace_in_string(&res.stderr, &replacers)
.replace(&secret, "<ACTION_API_SECRET>");
cleanup_run(file + ".js", &path).await;
if let Err(e) = (DeleteApiKey { key })
.resolve(&UserArgs {
user: action_user().to_owned(),
})
.await
{
warn!(
"Failed to delete API key after action execution | {:#}",
@@ -231,7 +208,8 @@ impl Resolve<ExecuteArgs> for RunAction {
);
};
let update = res?;
update.logs.push(res);
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with update_update.
@@ -279,7 +257,7 @@ async fn interpolate(
update: &mut Update,
key: String,
secret: String,
) -> mogh_error::Result<HashSet<(String, String)>> {
) -> serror::Result<HashSet<(String, String)>> {
let VariablesAndSecrets {
variables,
mut secrets,

View File

@@ -12,9 +12,9 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
alert::send_alert_to_alerter, helpers::update::update_update,
@@ -28,7 +28,7 @@ impl Resolve<ExecuteArgs> for TestAlerter {
"TestAlerter",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
alerter = self.alerter,
@@ -36,11 +36,7 @@ impl Resolve<ExecuteArgs> for TestAlerter {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerter = get_check_permissions::<Alerter>(
&self.alerter,
@@ -98,7 +94,7 @@ impl Resolve<ExecuteArgs> for SendAlert {
"SendAlert",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
request = format!("{self:?}"),
@@ -106,11 +102,7 @@ impl Resolve<ExecuteArgs> for SendAlert {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerters = list_full_for_user::<Alerter>(
Default::default(),

View File

@@ -37,8 +37,8 @@ use komodo_client::{
user::auto_redeploy_user,
},
};
use mogh_resolver::Resolve;
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
@@ -75,15 +75,15 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
"BatchRunBuild",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
.await?,
@@ -96,7 +96,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
"RunBuild",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
@@ -104,12 +104,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut build = get_check_permissions::<Build>(
&self.build,
user,
@@ -435,7 +431,7 @@ async fn handle_early_return(
build_id: String,
build_name: String,
is_cancel: bool,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -526,7 +522,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
"CancelBuild",
skip(user, update),
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
@@ -534,12 +530,8 @@ impl Resolve<ExecuteArgs> for CancelBuild {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
user,
@@ -634,7 +626,7 @@ async fn handle_post_build_redeploy(build_id: &str) {
.resolve(&ExecuteArgs {
user,
update,
task_id: Uuid::new_v4(),
id: Uuid::new_v4(),
})
.await
}
@@ -668,7 +660,7 @@ async fn validate_account_extract_registry_tokens(
..
}: &Build,
// Maps (domain, account) -> token
) -> mogh_error::Result<Vec<(String, String, String)>> {
) -> serror::Result<Vec<(String, String, String)>> {
let mut res = HashMap::with_capacity(image_registry.capacity());
for (domain, account) in image_registry

View File

@@ -1,39 +1,37 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use cache::TimeoutCache;
use formatting::format_serror;
use interpolate::Interpolator;
use komodo_client::{
api::execute::*,
entities::{
SwarmOrServer, Version,
Version,
build::{Build, ImageRegistryConfig},
deployment::{
Deployment, DeploymentImage, DeploymentInfo,
extract_registry_domain,
Deployment, DeploymentImage, extract_registry_domain,
},
komodo_timestamp, optional_string,
permission::PermissionLevel,
server::Server,
update::{Log, Update},
user::User,
},
};
use mogh_cache::TimeoutCache;
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client,
query::{VariablesAndSecrets, get_variables_and_secrets},
registry_token,
swarm::swarm_request,
update::update_update,
},
monitor::{refresh_server_cache, refresh_swarm_cache},
resource::{self, setup_deployment_execution},
monitor::update_cache_for_server,
permission::get_check_permissions,
resource,
state::action_states,
};
@@ -55,15 +53,15 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
"BatchDeploy",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeploy>(&self.pattern, user)
.await?,
@@ -71,12 +69,38 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
}
}
#[instrument("SetupDeploy", skip_all)]
async fn setup_deployment_execution(
deployment: &str,
user: &User,
) -> anyhow::Result<(Deployment, Server)> {
let deployment = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
if deployment.config.server_id.is_empty() {
return Err(anyhow!("Deployment has no Server configured"));
}
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
if !server.config.enabled {
return Err(anyhow!("Attached Server is not enabled"));
}
Ok((deployment, server))
}
impl Resolve<ExecuteArgs> for Deploy {
#[instrument(
"Deploy",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -86,21 +110,10 @@ impl Resolve<ExecuteArgs> for Deploy {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (mut deployment, swarm_or_server) =
setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
swarm_or_server.verify_has_target()?;
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -115,7 +128,7 @@ impl Resolve<ExecuteArgs> for Deploy {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
// This block resolves the attached Build to an actual versioned image
@@ -210,72 +223,27 @@ impl Resolve<ExecuteArgs> for Deploy {
update.version = version;
update_update(update.clone()).await?;
let deployment_id = deployment.id.clone();
match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
match swarm_request(
&swarm.config.server_ids,
api::swarm::CreateSwarmService {
deployment,
registry_token,
replacers: secret_replacers.into_iter().collect(),
},
)
.await
{
Ok(logs) => {
refresh_swarm_cache(&swarm, true).await;
update.logs.extend(logs)
}
Err(e) => {
update.push_error_log(
"Create Swarm Service",
format_serror(&e.into()),
);
}
};
}
SwarmOrServer::Server(server) => {
match periphery_client(&server)
.await?
.request(api::container::RunContainer {
deployment,
stop_signal: self.stop_signal,
stop_time: self.stop_time,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(log) => {
refresh_server_cache(&server, true).await;
update.logs.push(log)
}
Err(e) => {
update.push_error_log(
"Deploy Container",
format_serror(&e.into()),
);
}
};
}
}
if let Err(e) = resource::update_info::<Deployment>(
&deployment_id,
&DeploymentInfo {
latest_image_digest: Default::default(),
},
)
.await
match periphery_client(&server)
.await?
.request(api::container::Deploy {
deployment,
stop_signal: self.stop_signal,
stop_time: self.stop_time,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
warn!(
"Failed to update deployment {} info after deploy | {e:#}",
deployment_id
);
}
Ok(log) => update.logs.push(log),
Err(e) => {
update.push_error_log(
"Deploy Container",
format_serror(&e.into()),
);
}
};
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -405,7 +373,7 @@ pub async fn pull_deployment_inner(
Err(e) => Log::error("Pull image", format_serror(&e.into())),
};
refresh_server_cache(server, true).await;
update_cache_for_server(server, true).await;
anyhow::Ok(log)
}
.await;
@@ -422,7 +390,7 @@ impl Resolve<ExecuteArgs> for PullDeployment {
"PullDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -430,25 +398,10 @@ impl Resolve<ExecuteArgs> for PullDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("PullDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -462,7 +415,7 @@ impl Resolve<ExecuteArgs> for PullDeployment {
action_state.update(|state| state.pulling = true)?;
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = pull_deployment_inner(deployment, &server).await?;
@@ -480,7 +433,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
"StartDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -488,25 +441,10 @@ impl Resolve<ExecuteArgs> for StartDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("StartDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -521,7 +459,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)
@@ -539,7 +477,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -552,7 +490,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
"RestartDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -560,25 +498,10 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("RestartDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -593,7 +516,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)
@@ -613,7 +536,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -626,7 +549,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
"PauseDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -634,25 +557,10 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("PauseDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -667,7 +575,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)
@@ -685,7 +593,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -698,7 +606,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
"UnpauseDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -706,25 +614,10 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("UnpauseDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -739,7 +632,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)
@@ -759,7 +652,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -772,7 +665,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
"StopDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -782,25 +675,10 @@ impl Resolve<ExecuteArgs> for StopDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!("StopDeployment should not be called for Deployment in Swarm Mode")
.status_code(StatusCode::BAD_REQUEST),
);
};
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -815,7 +693,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)
@@ -841,7 +719,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -865,15 +743,15 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
"BatchDestroyDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDestroyDeployment>(
&self.pattern,
@@ -889,7 +767,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
"DestroyDeployment",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
@@ -899,20 +777,10 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
swarm_or_server.verify_has_target()?;
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -927,65 +795,34 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
match swarm_request(
&swarm.config.server_ids,
api::swarm::RemoveSwarmServices {
services: vec![deployment.name],
},
)
.await
{
Ok(log) => {
refresh_swarm_cache(&swarm, true).await;
log
}
Err(e) => Log::error(
"Remove Swarm Service",
format_serror(
&e.context("Failed to remove swarm service").into(),
),
),
}
}
SwarmOrServer::Server(server) => {
match periphery_client(&server)
.await?
.request(api::container::RemoveContainer {
name: deployment.name,
signal: self
.signal
.unwrap_or(deployment.config.termination_signal)
.into(),
time: self
.time
.unwrap_or(deployment.config.termination_timeout)
.into(),
})
.await
{
Ok(log) => {
refresh_server_cache(&server, true).await;
log
}
Err(e) => Log::error(
"Destroy Container",
format_serror(
&e.context("Failed to destroy container").into(),
),
),
}
}
let log = match periphery_client(&server)
.await?
.request(api::container::RemoveContainer {
name: deployment.name,
signal: self
.signal
.unwrap_or(deployment.config.termination_signal)
.into(),
time: self
.time
.unwrap_or(deployment.config.termination_timeout)
.into(),
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update.finalize();
update_cache_for_server(&server, true).await;
update_update(update.clone()).await?;
Ok(update)

View File

@@ -14,28 +14,22 @@ use komodo_client::{
RotateAllServerKeys, RotateCoreKeys,
},
entities::{
SwarmOrServer, deployment::DeploymentState, server::ServerState,
deployment::DeploymentState, server::ServerState,
stack::StackState,
},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use tokio::sync::Mutex;
use crate::{
api::{
execute::ExecuteArgs,
write::{
check_deployment_for_update_inner, check_stack_for_update_inner,
},
api::execute::{
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
},
config::{core_config, core_keys},
helpers::{
periphery_client, query::find_swarm_or_server,
update::update_update,
},
helpers::{periphery_client, update::update_update},
resource::rotate_server_keys,
state::{
db_client, deployment_status_cache, server_status_cache,
@@ -54,18 +48,14 @@ impl Resolve<ExecuteArgs> for ClearRepoCache {
"ClearRepoCache",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -137,18 +127,14 @@ impl Resolve<ExecuteArgs> for BackupCoreDatabase {
"BackupCoreDatabase",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -194,18 +180,14 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
"GlobalAutoUpdate",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -227,9 +209,6 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
let servers = find_collect(&db_client().servers, None, None)
.await
.context("Failed to query for servers from database")?;
let swarms = find_collect(&db_client().swarms, None, None)
.await
.context("Failed to query for swarms from database")?;
let query = doc! {
"$or": [
@@ -238,10 +217,11 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
]
};
let stacks =
find_collect(&db_client().stacks, query.clone(), None)
.await
.context("Failed to query for stacks from database")?;
let (stacks, repos) = tokio::try_join!(
find_collect(&db_client().stacks, query.clone(), None),
find_collect(&db_client().repos, None, None)
)
.context("Failed to query for resources from database")?;
let server_status_cache = server_status_cache();
let stack_status_cache = stack_status_cache();
@@ -254,23 +234,10 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
else {
continue;
};
// Only pull running stacks.
if !matches!(status.curr.state, StackState::Running) {
continue;
}
let swarm_or_server = find_swarm_or_server(
&stack.config.swarm_id,
&swarms,
&stack.config.server_id,
&servers,
)?;
if let SwarmOrServer::None = &swarm_or_server {
continue;
}
if let Some(server) =
servers.iter().find(|s| s.id == stack.config.server_id)
// This check is probably redundant along with running check
@@ -281,28 +248,39 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
.map(|s| matches!(s.state, ServerState::Ok))
.unwrap_or_default()
{
if let Err(e) = check_stack_for_update_inner(
stack.id,
&swarm_or_server,
self.skip_auto_update,
true,
false,
)
.await
let name = stack.name.clone();
let repo = if stack.config.linked_repo.is_empty() {
None
} else {
let Some(repo) =
repos.iter().find(|r| r.id == stack.config.linked_repo)
else {
update.push_error_log(
&format!("Pull Stack {name}"),
format!(
"Did not find any Repo matching {}",
stack.config.linked_repo
),
);
continue;
};
Some(repo.clone())
};
if let Err(e) =
pull_stack_inner(stack, Vec::new(), server, repo, None)
.await
{
update.push_error_log(
&format!("Check Stack {}", stack.name),
&format!("Pull Stack {name}"),
format_serror(&e.into()),
);
} else {
if !update.logs[0].stdout.is_empty() {
update.logs[0].stdout.push('\n');
}
update.logs[0].stdout.push_str(&format!(
"Checked Stack {}",
bold(&stack.name)
));
update.logs[0]
.stdout
.push_str(&format!("Pulled Stack {}", bold(name)));
}
}
}
@@ -312,51 +290,43 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
find_collect(&db_client().deployments, query, None)
.await
.context("Failed to query for deployments from database")?;
for deployment in deployments {
let Some(status) =
deployment_status_cache.get(&deployment.id).await
else {
continue;
};
// Only pull running deployments.
if !matches!(status.curr.state, DeploymentState::Running) {
continue;
}
let swarm_or_server = find_swarm_or_server(
&deployment.config.swarm_id,
&swarms,
&deployment.config.server_id,
&servers,
)?;
if let SwarmOrServer::None = &swarm_or_server {
continue;
}
let name = deployment.name.clone();
if let Err(e) = check_deployment_for_update_inner(
deployment,
&swarm_or_server,
self.skip_auto_update,
true,
)
.await
if let Some(server) =
servers.iter().find(|s| s.id == deployment.config.server_id)
// This check is probably redundant along with running check
// but shouldn't hurt
&& server_status_cache
.get(&server.id)
.await
.map(|s| matches!(s.state, ServerState::Ok))
.unwrap_or_default()
{
update.push_error_log(
&format!("Check Deployment {name}"),
format_serror(&e.into()),
);
} else {
if !update.logs[0].stdout.is_empty() {
update.logs[0].stdout.push('\n');
let name = deployment.name.clone();
if let Err(e) =
pull_deployment_inner(deployment, server).await
{
update.push_error_log(
&format!("Pull Deployment {name}"),
format_serror(&e.into()),
);
} else {
if !update.logs[0].stdout.is_empty() {
update.logs[0].stdout.push('\n');
}
update.logs[0].stdout.push_str(&format!(
"Pulled Deployment {}",
bold(name)
));
}
update.logs[0]
.stdout
.push_str(&format!("Checked Deployment {}", bold(name)));
}
}
@@ -380,18 +350,14 @@ impl Resolve<ExecuteArgs> for RotateAllServerKeys {
"RotateAllServerKeys",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -498,7 +464,7 @@ impl Resolve<ExecuteArgs> for RotateCoreKeys {
"RotateCoreKeys",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
force = self.force,
@@ -506,11 +472,7 @@ impl Resolve<ExecuteArgs> for RotateCoreKeys {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -561,10 +523,7 @@ impl Resolve<ExecuteArgs> for RotateCoreKeys {
);
}
let public_key = core_keys
.rotate(mogh_pki::PkiKind::Mutual)
.await?
.into_inner();
let public_key = core_keys.rotate().await?.into_inner();
info!("New Public Key: {public_key}");

View File

@@ -6,6 +6,7 @@ use axum::{
};
use axum_extra::{TypedHeader, headers::ContentType};
use database::mungos::by_id::find_one_by_id;
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures_util::future::join_all;
use komodo_client::{
@@ -17,18 +18,17 @@ use komodo_client::{
user::User,
},
};
use mogh_auth_server::middleware::authenticate_request;
use mogh_error::Json;
use mogh_error::JsonString;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use response::JsonString;
use serde::{Deserialize, Serialize};
use serde_json::json;
use strum::{Display, EnumDiscriminants};
use serror::Json;
use strum::Display;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::KomodoAuthImpl,
auth::auth_request,
helpers::update::{init_execution_update, update_update},
resource::{KomodoResource, list_full_for_user_using_pattern},
state::db_client,
@@ -43,29 +43,55 @@ mod procedure;
mod repo;
mod server;
mod stack;
mod swarm;
mod sync;
use super::Variant;
pub use {
deployment::pull_deployment_inner, stack::pull_stack_inner,
};
pub struct ExecuteArgs {
/// The task id.
/// Unique for every '/execute' call.
pub task_id: Uuid,
/// The execution id.
/// Unique for every /execute call.
pub id: Uuid,
pub user: User,
pub update: Update,
}
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumDiscriminants,
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[strum_discriminants(name(ExecuteRequestMethod), derive(Display))]
#[variant_derive(Debug, Display)]
#[args(ExecuteArgs)]
#[response(JsonString)]
#[error(mogh_error::Error)]
#[error(serror::Error)]
#[serde(tag = "type", content = "params")]
pub enum ExecuteRequest {
// ==== SERVER ====
StartContainer(StartContainer),
RestartContainer(RestartContainer),
PauseContainer(PauseContainer),
UnpauseContainer(UnpauseContainer),
StopContainer(StopContainer),
DestroyContainer(DestroyContainer),
StartAllContainers(StartAllContainers),
RestartAllContainers(RestartAllContainers),
PauseAllContainers(PauseAllContainers),
UnpauseAllContainers(UnpauseAllContainers),
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
DeleteNetwork(DeleteNetwork),
PruneNetworks(PruneNetworks),
DeleteImage(DeleteImage),
PruneImages(PruneImages),
DeleteVolume(DeleteVolume),
PruneVolumes(PruneVolumes),
PruneDockerBuilders(PruneDockerBuilders),
PruneBuildx(PruneBuildx),
PruneSystem(PruneSystem),
// ==== STACK ====
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
@@ -116,46 +142,12 @@ pub enum ExecuteRequest {
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// ==== SYNC ====
RunSync(RunSync),
// ==== ALERTER ====
TestAlerter(TestAlerter),
SendAlert(SendAlert),
// ==== SERVER ====
StartContainer(StartContainer),
RestartContainer(RestartContainer),
PauseContainer(PauseContainer),
UnpauseContainer(UnpauseContainer),
StopContainer(StopContainer),
DestroyContainer(DestroyContainer),
StartAllContainers(StartAllContainers),
RestartAllContainers(RestartAllContainers),
PauseAllContainers(PauseAllContainers),
UnpauseAllContainers(UnpauseAllContainers),
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
DeleteNetwork(DeleteNetwork),
PruneNetworks(PruneNetworks),
DeleteImage(DeleteImage),
PruneImages(PruneImages),
DeleteVolume(DeleteVolume),
PruneVolumes(PruneVolumes),
PruneDockerBuilders(PruneDockerBuilders),
PruneBuildx(PruneBuildx),
PruneSystem(PruneSystem),
// ==== SWARM ====
RemoveSwarmNodes(RemoveSwarmNodes),
RemoveSwarmStacks(RemoveSwarmStacks),
RemoveSwarmServices(RemoveSwarmServices),
CreateSwarmConfig(CreateSwarmConfig),
RotateSwarmConfig(RotateSwarmConfig),
RemoveSwarmConfigs(RemoveSwarmConfigs),
CreateSwarmSecret(CreateSwarmSecret),
RotateSwarmSecret(RotateSwarmSecret),
RemoveSwarmSecrets(RemoveSwarmSecrets),
// ==== SYNC ====
RunSync(RunSync),
// ==== MAINTENANCE ====
ClearRepoCache(ClearRepoCache),
@@ -169,16 +161,14 @@ pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(
authenticate_request::<KomodoAuthImpl, true>,
))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> mogh_error::Result<(TypedHeader<ContentType>, String)> {
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let req: ExecuteRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
@@ -189,7 +179,7 @@ async fn variant_handler(
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
) -> mogh_error::Result<(TypedHeader<ContentType>, String)> {
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let res = match inner_handler(request, user).await? {
ExecutionResult::Single(update) => serde_json::to_string(&update)
.context("Failed to serialize Update")?,
@@ -249,19 +239,11 @@ pub fn inner_handler(
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!(
api = "Execute",
task_id = task_id.to_string(),
"/execute request task error: {e:#}",
);
warn!("/execute request {task_id} task error: {e:#}",);
Log::error("Task Error", format_serror(&e.into()))
}
Err(e) => {
warn!(
api = "Execute",
task_id = task_id.to_string(),
"/execute request spawn error: {e:?}",
);
warn!("/execute request {task_id} spawn error: {e:?}",);
Log::error("Spawn Error", format!("{e:#?}"))
}
_ => return,
@@ -275,8 +257,8 @@ pub fn inner_handler(
let mut update =
find_one_by_id(&db_client().updates, &update_id)
.await
.context("Failed to query to db")?
.context("No Update exists with given id")?;
.context("failed to query to db")?
.context("no update exists with given id")?;
update.logs.push(log);
update.finalize();
update_update(update).await
@@ -285,10 +267,7 @@ pub fn inner_handler(
if let Err(e) = res {
warn!(
api = "Execute",
task_id = task_id.to_string(),
update_id,
"Failed to modify Update with task error log | {e:#}"
"failed to update update with task error log | {e:#}"
);
}
}
@@ -304,44 +283,25 @@ async fn task(
user: User,
update: Update,
) -> anyhow::Result<String> {
let method: ExecuteRequestMethod = (&request).into();
let user_id = user.id.clone();
let username = user.username.clone();
let variant = request.extract_variant();
info!(
api = "Execute",
task_id = id.to_string(),
method = method.to_string(),
user_id,
username,
"EXECUTE REQUEST",
"/execute request {id} | {variant} | user: {}",
user.username
);
let res = match request
.resolve(&ExecuteArgs {
user,
update,
task_id: id,
})
.await
{
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e).context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
let res =
match request.resolve(&ExecuteArgs { user, update, id }).await {
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e)
.context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
if let Err(e) = &res {
warn!(
api = "Execute",
task_id = id.to_string(),
method = method.to_string(),
user_id,
username,
"EXECUTE REQUEST | ERROR: {e:#}"
);
warn!("/execute request {id} error: {e:#}");
}
res

View File

@@ -17,7 +17,7 @@ use komodo_client::{
user::User,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{
@@ -46,7 +46,7 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunProcedure>(&self.pattern, user)
.await?,
@@ -59,7 +59,7 @@ impl Resolve<ExecuteArgs> for RunProcedure {
"RunProcedure",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
procedure = self.procedure,
@@ -67,12 +67,8 @@ impl Resolve<ExecuteArgs> for RunProcedure {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
Ok(
resolve_inner(self.procedure, user.clone(), update.clone())
.await?,

View File

@@ -22,8 +22,8 @@ use komodo_client::{
update::{Log, Update},
},
};
use mogh_resolver::Resolve;
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use crate::{
@@ -55,15 +55,15 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
"BatchCloneRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
.await?,
@@ -76,7 +76,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
"CloneRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
@@ -84,12 +84,8 @@ impl Resolve<ExecuteArgs> for CloneRepo {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
user,
@@ -190,15 +186,15 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
"BatchPullRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
.await?,
@@ -211,7 +207,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
"PullRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
@@ -219,12 +215,8 @@ impl Resolve<ExecuteArgs> for PullRepo {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
user,
@@ -324,7 +316,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
)]
async fn handle_repo_update_return(
update: Update,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
@@ -371,15 +363,15 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
"BatchBuildRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
.await?,
@@ -392,7 +384,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
"BuildRepo",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
@@ -400,12 +392,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
user,
@@ -626,7 +614,7 @@ async fn handle_builder_early_return(
repo_id: String,
repo_name: String,
is_cancel: bool,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -717,7 +705,7 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
"CancelRepoBuild",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
@@ -725,12 +713,8 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
&self.repo,
user,

View File

@@ -9,12 +9,12 @@ use komodo_client::{
update::{Log, Update},
},
};
use mogh_resolver::Resolve;
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{periphery_client, update::update_update},
monitor::refresh_server_cache,
monitor::update_cache_for_server,
permission::get_check_permissions,
state::action_states,
};
@@ -26,7 +26,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
"StartContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -35,12 +35,8 @@ impl Resolve<ExecuteArgs> for StartContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -61,7 +57,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -74,13 +70,13 @@ impl Resolve<ExecuteArgs> for StartContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Start Container",
format_serror(&e.context("Failed to start container").into()),
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -94,7 +90,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
"RestartContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -103,12 +99,8 @@ impl Resolve<ExecuteArgs> for RestartContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -129,7 +121,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -142,15 +134,15 @@ impl Resolve<ExecuteArgs> for RestartContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Restart Container",
"restart container",
format_serror(
&e.context("Failed to restart container").into(),
&e.context("failed to restart container").into(),
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -164,7 +156,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
"PauseContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -173,12 +165,8 @@ impl Resolve<ExecuteArgs> for PauseContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -199,7 +187,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -212,13 +200,13 @@ impl Resolve<ExecuteArgs> for PauseContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Pause Container",
format_serror(&e.context("Failed to pause container").into()),
"pause container",
format_serror(&e.context("failed to pause container").into()),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -232,7 +220,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
"UnpauseContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -241,12 +229,8 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -267,7 +251,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -280,15 +264,15 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Unpause Container",
"unpause container",
format_serror(
&e.context("Failed to unpause container").into(),
&e.context("failed to unpause container").into(),
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -302,7 +286,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
"StopContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -313,12 +297,8 @@ impl Resolve<ExecuteArgs> for StopContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -339,7 +319,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -354,13 +334,13 @@ impl Resolve<ExecuteArgs> for StopContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Stop Container",
format_serror(&e.context("Failed to stop container").into()),
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -374,7 +354,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
"DestroyContainer",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -385,12 +365,8 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let DestroyContainer {
server,
container,
@@ -417,7 +393,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
let mut update = update.clone();
// Send update after setting action state, this way UI gets correct state.
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server).await?;
@@ -432,15 +408,13 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
{
Ok(log) => log,
Err(e) => Log::error(
"Remove Container",
format_serror(
&e.context("Failed to remove container").into(),
),
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -454,7 +428,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
"StartAllContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -462,12 +436,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -494,18 +464,18 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
.await?
.request(api::container::StartAllContainers {})
.await
.context("Failed to start all containers on host")?;
.context("failed to start all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"Start All Containers",
"start all containers",
String::from("All containers have been started on the host."),
);
}
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -518,7 +488,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
"RestartAllContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -526,12 +496,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -558,20 +524,20 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
.await?
.request(api::container::RestartAllContainers {})
.await
.context("Failed to restart all containers on host")?;
.context("failed to restart all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"Restart All Containers",
"restart all containers",
String::from(
"All containers have been restarted on the host.",
),
);
}
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -584,7 +550,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
"PauseAllContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -592,12 +558,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -624,18 +586,18 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
.await?
.request(api::container::PauseAllContainers {})
.await
.context("Failed to pause all containers on host")?;
.context("failed to pause all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"Pause All Containers",
"pause all containers",
String::from("All containers have been paused on the host."),
);
}
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -648,7 +610,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
"UnpauseAllContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -656,12 +618,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -688,20 +646,20 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
.await?
.request(api::container::UnpauseAllContainers {})
.await
.context("Failed to unpause all containers on host")?;
.context("failed to unpause all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"Unpause All Containers",
"unpause all containers",
String::from(
"All containers have been unpaused on the host.",
),
);
}
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -714,7 +672,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
"StopAllContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -722,12 +680,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -754,18 +708,18 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
.await?
.request(api::container::StopAllContainers {})
.await
.context("Failed to stop all containers on host")?;
.context("failed to stop all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"Stop All Containers",
"stop all containers",
String::from("All containers have been stopped on the host."),
);
}
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -778,7 +732,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
"PruneContainers",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -786,12 +740,8 @@ impl Resolve<ExecuteArgs> for PruneContainers {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -820,20 +770,20 @@ impl Resolve<ExecuteArgs> for PruneContainers {
.request(api::container::PruneContainers {})
.await
.context(format!(
"Failed to prune containers on server {}",
"failed to prune containers on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"Prune Containers",
"prune containers",
format_serror(
&e.context("Failed to prune containers").into(),
&e.context("failed to prune containers").into(),
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -847,7 +797,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
"DeleteNetwork",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -856,12 +806,8 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -881,15 +827,15 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
})
.await
.context(format!(
"Failed to delete network {} on server {}",
"failed to delete network {} on server {}",
self.name, server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"Delete Network",
"delete network",
format_serror(
&e.context(format!(
"Failed to delete network {}",
"failed to delete network {}",
self.name
))
.into(),
@@ -898,7 +844,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -912,7 +858,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
"PruneNetworks",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -920,12 +866,8 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -954,18 +896,18 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
.request(api::docker::PruneNetworks {})
.await
.context(format!(
"Failed to prune networks on server {}",
"failed to prune networks on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"Prune Networks",
format_serror(&e.context("Failed to prune networks").into()),
"prune networks",
format_serror(&e.context("failed to prune networks").into()),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -979,7 +921,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
"DeleteImage",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -988,12 +930,8 @@ impl Resolve<ExecuteArgs> for DeleteImage {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1013,21 +951,21 @@ impl Resolve<ExecuteArgs> for DeleteImage {
})
.await
.context(format!(
"Failed to delete image {} on server {}",
"failed to delete image {} on server {}",
self.name, server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"delete image",
format_serror(
&e.context(format!("Failed to delete image {}", self.name))
&e.context(format!("failed to delete image {}", self.name))
.into(),
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1041,7 +979,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
"PruneImages",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1049,12 +987,8 @@ impl Resolve<ExecuteArgs> for PruneImages {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1083,16 +1017,16 @@ impl Resolve<ExecuteArgs> for PruneImages {
match periphery.request(api::docker::PruneImages {}).await {
Ok(log) => log,
Err(e) => Log::error(
"Prune Images",
"prune images",
format!(
"Failed to prune images on server {} | {e:#?}",
"failed to prune images on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1106,7 +1040,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
"DeleteVolume",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1115,12 +1049,8 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1140,7 +1070,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
})
.await
.context(format!(
"Failed to delete volume {} on server {}",
"failed to delete volume {} on server {}",
self.name, server.name
)) {
Ok(log) => log,
@@ -1148,7 +1078,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
"delete volume",
format_serror(
&e.context(format!(
"Failed to delete volume {}",
"failed to delete volume {}",
self.name
))
.into(),
@@ -1157,7 +1087,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1171,7 +1101,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
"PruneVolumes",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1179,12 +1109,8 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1213,16 +1139,16 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
match periphery.request(api::docker::PruneVolumes {}).await {
Ok(log) => log,
Err(e) => Log::error(
"Prune Volumes",
"prune volumes",
format!(
"Failed to prune volumes on server {} | {e:#?}",
"failed to prune volumes on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1236,7 +1162,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
"PruneDockerBuilders",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1244,12 +1170,8 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1278,16 +1200,16 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
match periphery.request(api::build::PruneBuilders {}).await {
Ok(log) => log,
Err(e) => Log::error(
"Prune Builders",
"prune builders",
format!(
"Failed to docker builder prune on server {} | {e:#?}",
"failed to docker builder prune on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1301,7 +1223,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
"PruneBuildx",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1309,12 +1231,8 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1343,16 +1261,16 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
match periphery.request(api::build::PruneBuildx {}).await {
Ok(log) => log,
Err(e) => Log::error(
"Prune Buildx",
"prune buildx",
format!(
"Failed to docker buildx prune on server {} | {e:#?}",
"failed to docker buildx prune on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1366,7 +1284,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
"PruneSystem",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
@@ -1374,12 +1292,8 @@ impl Resolve<ExecuteArgs> for PruneSystem {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -1407,16 +1321,16 @@ impl Resolve<ExecuteArgs> for PruneSystem {
let log = match periphery.request(api::PruneSystem {}).await {
Ok(log) => log,
Err(e) => Log::error(
"Prune System",
"prune system",
format!(
"Failed to docker system prune on server {} | {e:#?}",
"failed to docker system prune on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
refresh_server_cache(&server, true).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;

View File

@@ -1,6 +1,6 @@
use std::{collections::HashSet, str::FromStr};
use anyhow::{Context, anyhow};
use anyhow::Context;
use database::mungos::mongodb::bson::{
doc, oid::ObjectId, to_bson, to_document,
};
@@ -9,7 +9,7 @@ use interpolate::Interpolator;
use komodo_client::{
api::{execute::*, write::RefreshStackCache},
entities::{
FileContents, SwarmOrServer,
FileContents,
permission::PermissionLevel,
repo::Repo,
server::Server,
@@ -20,12 +20,8 @@ use komodo_client::{
user::User,
},
};
use mogh_error::AddStatusCodeError as _;
use mogh_resolver::Resolve;
use periphery_client::api::{
DeployStackResponse, compose::*, swarm::DeploySwarmStack,
};
use reqwest::StatusCode;
use periphery_client::api::compose::*;
use resolver_api::Resolve;
use uuid::Uuid;
use crate::{
@@ -34,20 +30,14 @@ use crate::{
periphery_client,
query::{VariablesAndSecrets, get_variables_and_secrets},
stack_git_token,
swarm::swarm_request,
update::{
add_update_without_send, init_execution_update, update_update,
},
},
monitor::{refresh_server_cache, refresh_swarm_cache},
monitor::update_cache_for_server,
permission::get_check_permissions,
resource,
stack::{
execute::{
execute_compose, execute_compose_with_stack_and_server,
},
setup_stack_execution,
},
stack::{execute::execute_compose, get_stack_and_server},
state::{action_states, db_client},
};
@@ -69,15 +59,15 @@ impl Resolve<ExecuteArgs> for BatchDeployStack {
"BatchDeployStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeployStack>(&self.pattern, user)
.await?,
@@ -90,7 +80,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
"DeployStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -100,21 +90,16 @@ impl Resolve<ExecuteArgs> for DeployStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (mut stack, swarm_or_server) = setup_stack_execution(
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&self.stack,
user,
PermissionLevel::Execute.into(),
true,
)
.await?;
swarm_or_server.verify_has_target()?;
let mut repo = if !stack.config.files_on_host
&& !stack.config.linked_repo.is_empty()
{
@@ -180,45 +165,27 @@ impl Resolve<ExecuteArgs> for DeployStack {
Default::default()
};
let DeployStackResponse {
let ComposeUpResponse {
logs,
deployed,
services,
file_contents,
missing_files,
remote_errors,
merged_config,
compose_config,
commit_hash,
commit_message,
} = match &swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
swarm_request(
&swarm.config.server_ids,
DeploySwarmStack {
stack: stack.clone(),
repo,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
},
)
.await?
}
SwarmOrServer::Server(server) => {
periphery_client(server)
.await?
.request(ComposeUp {
stack: stack.clone(),
services: self.services,
repo,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await?
}
};
} = periphery_client(&server)
.await?
.request(ComposeUp {
stack: stack.clone(),
services: self.services,
repo,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await?;
update.logs.extend(logs);
@@ -252,7 +219,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
})
.collect(),
),
merged_config,
compose_config,
commit_hash.clone(),
commit_message.clone(),
)
@@ -290,7 +257,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
};
let info = to_document(&info)
.context("Failed to serialize stack info to bson")?;
.context("failed to serialize stack info to bson")?;
db_client()
.stacks
@@ -299,30 +266,22 @@ impl Resolve<ExecuteArgs> for DeployStack {
doc! { "$set": { "info": info } },
)
.await
.context("Failed to update stack info on db")?;
.context("failed to update stack info on db")?;
anyhow::Ok(())
};
// This will be weird with single service deploys. Come back to it.
if let Err(e) = update_info.await {
update.push_error_log(
"Refresh Stack Info",
"refresh stack info",
format_serror(
&e.context("Failed to refresh stack info on db").into(),
&e.context("failed to refresh stack info on db").into(),
),
)
}
// Ensure cached stack state up to date by updating server cache
match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
refresh_swarm_cache(&swarm, true).await;
}
SwarmOrServer::Server(server) => {
refresh_server_cache(&server, true).await;
}
}
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -346,15 +305,15 @@ impl Resolve<ExecuteArgs> for BatchDeployStackIfChanged {
"BatchDeployStackIfChanged",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeployStackIfChanged>(
&self.pattern,
@@ -370,7 +329,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
"DeployStackIfChanged",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -379,12 +338,8 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
@@ -441,7 +396,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
.resolve(&ExecuteArgs {
user: user.clone(),
update,
task_id: *task_id,
id: *id,
})
.await
}
@@ -563,7 +518,7 @@ async fn deploy_services(
stack: String,
services: Vec<String>,
user: &User,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
// The existing update is initialized to DeployStack,
// but also has not been created on database.
// Setup a new update here.
@@ -580,7 +535,7 @@ async fn deploy_services(
.resolve(&ExecuteArgs {
user: user.clone(),
update,
task_id: Uuid::new_v4(),
id: Uuid::new_v4(),
})
.await
}
@@ -597,7 +552,7 @@ async fn restart_services(
stack: String,
services: Vec<String>,
user: &User,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
// The existing update is initialized to DeployStack,
// but also has not been created on database.
// Setup a new update here.
@@ -611,7 +566,7 @@ async fn restart_services(
.resolve(&ExecuteArgs {
user: user.clone(),
update,
task_id: Uuid::new_v4(),
id: Uuid::new_v4(),
})
.await
}
@@ -774,15 +729,15 @@ impl Resolve<ExecuteArgs> for BatchPullStack {
"BatchPullStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullStack>(&self.pattern, user)
.await?,
@@ -886,7 +841,7 @@ pub async fn pull_stack_inner(
.await?;
// Ensure cached stack state up to date by updating server cache
refresh_server_cache(server, true).await;
update_cache_for_server(server, true).await;
Ok(res)
}
@@ -896,7 +851,7 @@ impl Resolve<ExecuteArgs> for PullStack {
"PullStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -905,28 +860,16 @@ impl Resolve<ExecuteArgs> for PullStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (stack, swarm_or_server) = setup_stack_execution(
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (stack, server) = get_stack_and_server(
&self.stack,
user,
PermissionLevel::Execute.into(),
true,
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!(
"PullStack should not be called for Stack in Swarm Mode"
)
.status_code(StatusCode::BAD_REQUEST),
);
};
let repo = if !stack.config.files_on_host
&& !stack.config.linked_repo.is_empty()
{
@@ -971,7 +914,7 @@ impl Resolve<ExecuteArgs> for StartStack {
"StartStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -980,12 +923,8 @@ impl Resolve<ExecuteArgs> for StartStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<StartStack>(
&self.stack,
self.services,
@@ -1004,7 +943,7 @@ impl Resolve<ExecuteArgs> for RestartStack {
"RestartStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1013,12 +952,8 @@ impl Resolve<ExecuteArgs> for RestartStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<RestartStack>(
&self.stack,
self.services,
@@ -1039,7 +974,7 @@ impl Resolve<ExecuteArgs> for PauseStack {
"PauseStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1048,12 +983,8 @@ impl Resolve<ExecuteArgs> for PauseStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<PauseStack>(
&self.stack,
self.services,
@@ -1072,7 +1003,7 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
"UnpauseStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1081,12 +1012,8 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<UnpauseStack>(
&self.stack,
self.services,
@@ -1105,7 +1032,7 @@ impl Resolve<ExecuteArgs> for StopStack {
"StopStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1114,12 +1041,8 @@ impl Resolve<ExecuteArgs> for StopStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<StopStack>(
&self.stack,
self.services,
@@ -1150,15 +1073,15 @@ impl Resolve<ExecuteArgs> for BatchDestroyStack {
"BatchDestroyStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, task_id, .. }: &ExecuteArgs,
) -> mogh_error::Result<BatchExecutionResponse> {
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDestroyStack>(&self.pattern, user)
.await
.map_err(Into::into)
@@ -1170,7 +1093,7 @@ impl Resolve<ExecuteArgs> for DestroyStack {
"DestroyStack",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1181,86 +1104,18 @@ impl Resolve<ExecuteArgs> for DestroyStack {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (stack, swarm_or_server) = setup_stack_execution(
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<DestroyStack>(
&self.stack,
self.services,
user,
PermissionLevel::Execute.into(),
|state| state.destroying = true,
update.clone(),
(self.stop_time, self.remove_orphans),
)
.await?;
swarm_or_server.verify_has_target()?;
match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
if !self.services.is_empty() {
return Err(
anyhow!("Cannot destroy specific Stack services when in Swarm mode.")
.status_code(StatusCode::BAD_REQUEST)
);
}
// get the action state for the stack (or insert default).
let action_state = action_states()
.stack
.get_or_insert_default(&stack.id)
.await;
// Will check to ensure stack not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.destroying = true)?;
let mut update = update.clone();
// Send update here for UI to recheck action state
update_update(update.clone()).await?;
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmStacks {
stacks: vec![stack.project_name(false)],
detach: false,
},
)
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"Destroy Stack",
format_serror(
&e.context("Failed to 'docker stack rm' on swarm")
.into(),
),
),
}
// Ensure cached stack state up to date by updating swarm cache
refresh_swarm_cache(&swarm, true).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
SwarmOrServer::Server(server) => {
execute_compose_with_stack_and_server::<DestroyStack>(
stack,
server,
self.services,
|state| state.destroying = true,
update.clone(),
(self.stop_time, self.remove_orphans),
)
.await
.map_err(Into::into)
}
}
.await
.map_err(Into::into)
}
}
@@ -1269,7 +1124,7 @@ impl Resolve<ExecuteArgs> for RunStackService {
"RunStackService",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
@@ -1279,28 +1134,16 @@ impl Resolve<ExecuteArgs> for RunStackService {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
let (mut stack, swarm_or_server) = setup_stack_execution(
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&self.stack,
user,
PermissionLevel::Execute.into(),
true,
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
return Err(
anyhow!(
"RunStackService should not be called for Stack in Swarm Mode"
)
.status_code(StatusCode::BAD_REQUEST),
);
};
let mut repo = if !stack.config.files_on_host
&& !stack.config.linked_repo.is_empty()
{

View File

@@ -1,552 +0,0 @@
use formatting::format_serror;
use komodo_client::{
api::execute::{
CreateSwarmConfig, CreateSwarmSecret, RemoveSwarmConfigs,
RemoveSwarmNodes, RemoveSwarmSecrets, RemoveSwarmServices,
RemoveSwarmStacks, RotateSwarmConfig, RotateSwarmSecret,
},
entities::{permission::PermissionLevel, swarm::Swarm},
};
use mogh_resolver::Resolve;
use crate::{
api::execute::ExecuteArgs,
helpers::{swarm::swarm_request, update::update_update},
monitor::refresh_swarm_cache,
permission::get_check_permissions,
};
impl Resolve<ExecuteArgs> for RemoveSwarmNodes {
#[instrument(
"RemoveSwarmNodes",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
nodes = serde_json::to_string(&self.nodes).unwrap_or_else(|e| e.to_string()),
force = self.force,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmNodes {
nodes: self.nodes,
force: self.force,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Remove Swarm Nodes",
format_serror(
&e.context("Failed to remove swarm nodes").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RemoveSwarmStacks {
#[instrument(
"RemoveSwarmStacks",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
stacks = serde_json::to_string(&self.stacks).unwrap_or_else(|e| e.to_string()),
detach = self.detach,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmStacks {
stacks: self.stacks,
detach: self.detach,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Remove Swarm Stacks",
format_serror(
&e.context("Failed to remove swarm stacks").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RemoveSwarmServices {
#[instrument(
"RemoveSwarmServices",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
services = serde_json::to_string(&self.services).unwrap_or_else(|e| e.to_string()),
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmServices {
services: self.services,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Remove Swarm Services",
format_serror(
&e.context("Failed to remove swarm services").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for CreateSwarmConfig {
#[instrument(
"CreateSwarmConfig",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
config = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::CreateSwarmConfig {
name: self.name,
data: self.data,
labels: self.labels,
template_driver: self.template_driver,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Create Swarm Config",
format_serror(
&e.context("Failed to create swarm config").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RotateSwarmConfig {
#[instrument(
"RotateSwarmConfig",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
config = self.config,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RotateSwarmConfig {
config: self.config,
data: self.data,
},
)
.await
{
Ok(logs) => {
update.logs.extend(logs);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Rotate Swarm Config",
format_serror(
&e.context("Failed to rotate swarm config").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RemoveSwarmConfigs {
#[instrument(
"RemoveSwarmConfigs",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
configs = serde_json::to_string(&self.configs).unwrap_or_else(|e| e.to_string()),
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmConfigs {
configs: self.configs,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Remove Swarm Configs",
format_serror(
&e.context("Failed to remove swarm configs").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for CreateSwarmSecret {
#[instrument(
"CreateSwarmSecret",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
secret = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::CreateSwarmSecret {
name: self.name,
data: self.data,
driver: self.driver,
labels: self.labels,
template_driver: self.template_driver,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Create Swarm Secret",
format_serror(
&e.context("Failed to create swarm secret").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RotateSwarmSecret {
#[instrument(
"RotateSwarmSecret",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
secret = self.secret,
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RotateSwarmSecret {
secret: self.secret,
data: self.data,
},
)
.await
{
Ok(logs) => {
update.logs.extend(logs);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Rotate Swarm Secret",
format_serror(
&e.context("Failed to rotate swarm secret").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RemoveSwarmSecrets {
#[instrument(
"RemoveSwarmSecrets",
skip_all,
fields(
task_id = task_id.to_string(),
operator = user.id,
update_id = update.id,
swarm = self.swarm,
secrets = serde_json::to_string(&self.secrets).unwrap_or_else(|e| e.to_string()),
)
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Execute.into(),
)
.await?;
update_update(update.clone()).await?;
let mut update = update.clone();
match swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::RemoveSwarmSecrets {
secrets: self.secrets,
},
)
.await
{
Ok(log) => {
update.logs.push(log);
refresh_swarm_cache(&swarm, true).await;
}
Err(e) => update.push_error_log(
"Remove Swarm Secrets",
format_serror(
&e.context("Failed to remove swarm secrets").into(),
),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -21,13 +21,12 @@ use komodo_client::{
repo::Repo,
server::Server,
stack::Stack,
swarm::Swarm,
sync::ResourceSync,
update::{Log, Update},
user::sync_user,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
@@ -54,7 +53,7 @@ impl Resolve<ExecuteArgs> for RunSync {
"RunSync",
skip_all,
fields(
task_id = task_id.to_string(),
id = id.to_string(),
operator = user.id,
update_id = update.id,
sync = self.sync,
@@ -64,12 +63,8 @@ impl Resolve<ExecuteArgs> for RunSync {
)]
async fn resolve(
self,
ExecuteArgs {
user,
update,
task_id,
}: &ExecuteArgs,
) -> mogh_error::Result<Update> {
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let RunSync {
sync,
resource_type: match_resource_type,
@@ -139,36 +134,52 @@ impl Resolve<ExecuteArgs> for RunSync {
let Some(resource_type) = match_resource_type else {
return Some(name_or_id);
};
macro_rules! resolve_id_to_name {
($(($Variant:ident, $field:ident)),* $(,)?) => {
match ObjectId::from_str(&name_or_id) {
Ok(_) => match resource_type {
$(
ResourceTargetVariant::$Variant => all_resources
.$field
.get(&name_or_id)
.map(|r| r.name.clone()),
)*
ResourceTargetVariant::System => None,
},
Err(_) => Some(name_or_id),
}
};
match ObjectId::from_str(&name_or_id) {
Ok(_) => match resource_type {
ResourceTargetVariant::Alerter => all_resources
.alerters
.get(&name_or_id)
.map(|a| a.name.clone()),
ResourceTargetVariant::Build => all_resources
.builds
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Builder => all_resources
.builders
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Deployment => all_resources
.deployments
.get(&name_or_id)
.map(|d| d.name.clone()),
ResourceTargetVariant::Procedure => all_resources
.procedures
.get(&name_or_id)
.map(|p| p.name.clone()),
ResourceTargetVariant::Action => all_resources
.actions
.get(&name_or_id)
.map(|p| p.name.clone()),
ResourceTargetVariant::Repo => all_resources
.repos
.get(&name_or_id)
.map(|r| r.name.clone()),
ResourceTargetVariant::Server => all_resources
.servers
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::Stack => all_resources
.stacks
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ResourceSync => all_resources
.syncs
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::System => None,
},
Err(_) => Some(name_or_id),
}
// New resource types need to be added here manually.
resolve_id_to_name!(
(Server, servers),
(Swarm, swarms),
(Stack, stacks),
(Deployment, deployments),
(Build, builds),
(Repo, repos),
(Procedure, procedures),
(Action, actions),
(ResourceSync, syncs),
(Builder, builders),
(Alerter, alerters),
)
})
.collect::<Vec<_>>()
});
@@ -216,39 +227,136 @@ impl Resolve<ExecuteArgs> for RunSync {
let delete = sync.config.managed || sync.config.delete;
macro_rules! get_deltas {
($(($var:ident, $Type:ident, $field:ident)),* $(,)?) => {
$(
let $var = if sync.config.include_resources {
get_updates_for_execution::<$Type>(
resources.$field,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
)*
};
}
// New resource types need to be added here manually.
get_deltas!(
(server_deltas, Server, servers),
(swarm_deltas, Swarm, swarms),
(stack_deltas, Stack, stacks),
(deployment_deltas, Deployment, deployments),
(build_deltas, Build, builds),
(repo_deltas, Repo, repos),
(procedure_deltas, Procedure, procedures),
(action_deltas, Action, actions),
(builder_deltas, Builder, builders),
(alerter_deltas, Alerter, alerters),
(resource_sync_deltas, ResourceSync, resource_syncs),
);
let server_deltas = if sync.config.include_resources {
get_updates_for_execution::<Server>(
resources.servers,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let stack_deltas = if sync.config.include_resources {
get_updates_for_execution::<Stack>(
resources.stacks,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let deployment_deltas = if sync.config.include_resources {
get_updates_for_execution::<Deployment>(
resources.deployments,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let build_deltas = if sync.config.include_resources {
get_updates_for_execution::<Build>(
resources.builds,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let repo_deltas = if sync.config.include_resources {
get_updates_for_execution::<Repo>(
resources.repos,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let procedure_deltas = if sync.config.include_resources {
get_updates_for_execution::<Procedure>(
resources.procedures,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let action_deltas = if sync.config.include_resources {
get_updates_for_execution::<Action>(
resources.actions,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let builder_deltas = if sync.config.include_resources {
get_updates_for_execution::<Builder>(
resources.builders,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let alerter_deltas = if sync.config.include_resources {
get_updates_for_execution::<Alerter>(
resources.alerters,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let resource_sync_deltas = if sync.config.include_resources {
get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let (
variables_to_create,
@@ -286,7 +394,6 @@ impl Resolve<ExecuteArgs> for RunSync {
if deploy_cache.is_empty()
&& resource_sync_deltas.no_changes()
&& server_deltas.no_changes()
&& swarm_deltas.no_changes()
&& deployment_deltas.no_changes()
&& stack_deltas.no_changes()
&& build_deltas.no_changes()
@@ -314,11 +421,7 @@ impl Resolve<ExecuteArgs> for RunSync {
return Ok(update);
}
// =====================================================
// The ordering these are executed does matter, since
// latter resources may depend on prior synced resources
// already being updated with the declared state.
// =====================================================
// =================
// No deps
maybe_extend(
@@ -357,10 +460,6 @@ impl Resolve<ExecuteArgs> for RunSync {
);
// Dependent on server
maybe_extend(
&mut update.logs,
Swarm::execute_sync_updates(swarm_deltas).await,
);
maybe_extend(
&mut update.logs,
Builder::execute_sync_updates(builder_deltas).await,

View File

@@ -1,4 +0,0 @@
pub mod github;
pub mod gitlab;
use super::{ExtractBranch, VerifySecret};

View File

@@ -1,57 +1,11 @@
use axum::{Extension, Router, routing::get};
use komodo_client::entities::user::User;
use mogh_auth_server::middleware::authenticate_request;
use mogh_error::Json;
use mogh_server::{
cors::cors_layer, session::memory_session_layer,
ui::serve_static_ui,
};
use crate::{auth::KomodoAuthImpl, config::core_config, ts_client};
pub mod auth;
pub mod execute;
pub mod read;
pub mod terminal;
pub mod user;
pub mod write;
mod listener;
mod openapi;
mod terminal;
mod ws;
#[derive(serde::Deserialize)]
struct Variant {
variant: String,
}
pub fn app() -> Router {
let config = core_config();
Router::new()
.merge(openapi::serve_docs())
.route("/version", get(|| async { env!("CARGO_PKG_VERSION") }))
.nest("/auth", mogh_auth_server::api::router::<KomodoAuthImpl>())
.nest("/user", user_router())
.nest("/read", read::router())
.nest("/write", write::router())
.nest("/execute", execute::router())
.nest("/terminal", terminal::router())
.nest("/listener", listener::router())
.nest("/ws", ws::router())
.nest("/client", ts_client::router())
.layer(memory_session_layer(config))
.fallback_service(serve_static_ui(
&config.ui_path,
config.ui_index_force_no_cache,
))
.layer(cors_layer(config))
}
fn user_router() -> Router {
Router::new()
.route(
"/",
get(|Extension(user): Extension<User>| async { Json(user) }),
)
.layer(axum::middleware::from_fn(
authenticate_request::<KomodoAuthImpl, false>,
))
}

View File

@@ -1,18 +0,0 @@
<!doctype html>
<html>
<head>
<title>Komodo API Docs</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
</head>
<body>
<script id="api-reference" type="application/json">
$spec
</script>
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
</body>
</html>

View File

@@ -1,8 +0,0 @@
use komodo_client::openapi::KomodoApi;
use utoipa::OpenApi as _;
use utoipa_scalar::{Scalar, Servable as _};
pub fn serve_docs() -> Scalar<utoipa::openapi::OpenApi> {
Scalar::with_url("/docs", KomodoApi::openapi())
.custom_html(include_str!("docs.html"))
}

View File

@@ -8,7 +8,7 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -23,7 +23,7 @@ impl Resolve<ReadArgs> for GetAction {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Action> {
) -> serror::Result<Action> {
Ok(
get_check_permissions::<Action>(
&self.action,
@@ -39,7 +39,7 @@ impl Resolve<ReadArgs> for ListActions {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<ActionListItem>> {
) -> serror::Result<Vec<ActionListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for ListFullActions {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullActionsResponse> {
) -> serror::Result<ListFullActionsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -83,7 +83,7 @@ impl Resolve<ReadArgs> for GetActionActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ActionActionState> {
) -> serror::Result<ActionActionState> {
let action = get_check_permissions::<Action>(
&self.action,
user,
@@ -104,7 +104,7 @@ impl Resolve<ReadArgs> for GetActionsSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetActionsSummaryResponse> {
) -> serror::Result<GetActionsSummaryResponse> {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
user,

View File

@@ -8,15 +8,15 @@ use komodo_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::permission::PermissionLevel,
entities::{
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, sync::ResourceSync,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{
check_user_target_access, user_resource_target_query,
},
config::core_config, permission::list_resource_ids_for_user,
state::db_client,
};
@@ -28,11 +28,41 @@ impl Resolve<ReadArgs> for ListAlerts {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListAlertsResponse> {
// Alerts
let query = user_resource_target_query(user, self.query)
.await?
.unwrap_or_default();
) -> serror::Result<ListAlertsResponse> {
let mut query = self.query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
)?;
// All of the vecs will be non-none if !admin and !transparent mode.
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Stack", "target.id": { "$in": &stack_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "ResourceSync", "target.id": { "$in": &sync_ids } },
]
});
}
let alerts = find_collect(
&db_client().alerts,
@@ -61,21 +91,13 @@ impl Resolve<ReadArgs> for ListAlerts {
impl Resolve<ReadArgs> for GetAlert {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetAlertResponse> {
let alert = find_one_by_id(&db_client().alerts, &self.id)
.await
.context("failed to query db for alert")?
.context("no alert found with given id")?;
if user.admin || core_config().transparent_mode {
return Ok(alert);
}
check_user_target_access(
&alert.target,
user,
PermissionLevel::Read.into(),
_: &ReadArgs,
) -> serror::Result<GetAlertResponse> {
Ok(
find_one_by_id(&db_client().alerts, &self.id)
.await
.context("failed to query db for alert")?
.context("no alert found with given id")?,
)
.await?;
Ok(alert)
}
}

View File

@@ -8,7 +8,7 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -23,7 +23,7 @@ impl Resolve<ReadArgs> for GetAlerter {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Alerter> {
) -> serror::Result<Alerter> {
Ok(
get_check_permissions::<Alerter>(
&self.alerter,
@@ -39,7 +39,7 @@ impl Resolve<ReadArgs> for ListAlerters {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<AlerterListItem>> {
) -> serror::Result<Vec<AlerterListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for ListFullAlerters {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullAlertersResponse> {
) -> serror::Result<ListFullAlertersResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -83,7 +83,7 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetAlertersSummaryResponse> {
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match list_resource_ids_for_user::<Alerter>(
None,
user,

View File

@@ -16,7 +16,7 @@ use komodo_client::{
update::UpdateStatus,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -31,7 +31,7 @@ impl Resolve<ReadArgs> for GetBuild {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Build> {
) -> serror::Result<Build> {
Ok(
get_check_permissions::<Build>(
&self.build,
@@ -47,7 +47,7 @@ impl Resolve<ReadArgs> for ListBuilds {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<BuildListItem>> {
) -> serror::Result<Vec<BuildListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -69,7 +69,7 @@ impl Resolve<ReadArgs> for ListFullBuilds {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullBuildsResponse> {
) -> serror::Result<ListFullBuildsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -91,7 +91,7 @@ impl Resolve<ReadArgs> for GetBuildActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<BuildActionState> {
) -> serror::Result<BuildActionState> {
let build = get_check_permissions::<Build>(
&self.build,
user,
@@ -112,7 +112,7 @@ impl Resolve<ReadArgs> for GetBuildsSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetBuildsSummaryResponse> {
) -> serror::Result<GetBuildsSummaryResponse> {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
user,
@@ -160,7 +160,7 @@ impl Resolve<ReadArgs> for GetBuildMonthlyStats {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<GetBuildMonthlyStatsResponse> {
) -> serror::Result<GetBuildMonthlyStatsResponse> {
let curr_ts = unix_timestamp_ms() as i64;
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
@@ -216,7 +216,7 @@ impl Resolve<ReadArgs> for ListBuildVersions {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<BuildVersionResponseItem>> {
) -> serror::Result<Vec<BuildVersionResponseItem>> {
let ListBuildVersions {
build,
major,
@@ -273,7 +273,7 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListCommonBuildExtraArgsResponse> {
) -> serror::Result<ListCommonBuildExtraArgsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {

View File

@@ -8,7 +8,7 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -23,7 +23,7 @@ impl Resolve<ReadArgs> for GetBuilder {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Builder> {
) -> serror::Result<Builder> {
Ok(
get_check_permissions::<Builder>(
&self.builder,
@@ -39,7 +39,7 @@ impl Resolve<ReadArgs> for ListBuilders {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<BuilderListItem>> {
) -> serror::Result<Vec<BuilderListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for ListFullBuilders {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullBuildersResponse> {
) -> serror::Result<ListFullBuildersResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -83,7 +83,7 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetBuildersSummaryResponse> {
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match list_resource_ids_for_user::<Builder>(
None,
user,

View File

@@ -4,31 +4,23 @@ use anyhow::{Context, anyhow};
use komodo_client::{
api::read::*,
entities::{
SwarmOrServer,
deployment::{
Deployment, DeploymentActionState, DeploymentConfig,
DeploymentListItem, DeploymentState,
},
docker::{
container::{Container, ContainerStats},
service::SwarmService,
},
docker::container::{Container, ContainerStats},
permission::PermissionLevel,
server::{Server, ServerState},
update::Log,
},
};
use mogh_error::AddStatusCodeError as _;
use mogh_resolver::Resolve;
use periphery_client::api::{self, container::InspectContainer};
use reqwest::StatusCode;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client, query::get_all_tags, swarm::swarm_request,
},
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource::{self, setup_deployment_execution},
resource,
state::{
action_states, deployment_status_cache, server_status_cache,
},
@@ -40,7 +32,7 @@ impl Resolve<ReadArgs> for GetDeployment {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Deployment> {
) -> serror::Result<Deployment> {
Ok(
get_check_permissions::<Deployment>(
&self.deployment,
@@ -56,7 +48,7 @@ impl Resolve<ReadArgs> for ListDeployments {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<DeploymentListItem>> {
) -> serror::Result<Vec<DeploymentListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -86,7 +78,7 @@ impl Resolve<ReadArgs> for ListFullDeployments {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullDeploymentsResponse> {
) -> serror::Result<ListFullDeploymentsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -108,7 +100,7 @@ impl Resolve<ReadArgs> for GetDeploymentContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetDeploymentContainerResponse> {
) -> serror::Result<GetDeploymentContainerResponse> {
let deployment = get_check_permissions::<Deployment>(
&self.deployment,
user,
@@ -133,49 +125,36 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Log> {
) -> serror::Result<Log> {
let GetDeploymentLog {
deployment,
tail,
timestamps,
} = self;
let (deployment, swarm_or_server) = setup_deployment_execution(
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.logs(),
)
.await?;
swarm_or_server.verify_has_target()?;
let log = match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLog {
service: deployment.name,
tail,
timestamps,
no_task_ids: false,
no_resolve: false,
details: false,
},
)
if server_id.is_empty() {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLog {
name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
timestamps,
})
.await
.context("Failed to get service log from swarm")?,
SwarmOrServer::Server(server) => periphery_client(&server)
.await?
.request(api::container::GetContainerLog {
name: deployment.name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
timestamps,
})
.await
.context("failed at call to periphery")?,
};
Ok(log)
.context("failed at call to periphery")?;
Ok(res)
}
}
@@ -183,7 +162,7 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Log> {
) -> serror::Result<Log> {
let SearchDeploymentLog {
deployment,
terms,
@@ -191,47 +170,32 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
invert,
timestamps,
} = self;
let (deployment, swarm_or_server) = setup_deployment_execution(
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.logs(),
)
.await?;
swarm_or_server.verify_has_target()?;
let log = match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLogSearch {
service: deployment.name,
terms,
combinator,
invert,
timestamps,
no_task_ids: false,
no_resolve: false,
details: false,
},
)
if server_id.is_empty() {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLogSearch {
name,
terms,
combinator,
invert,
timestamps,
})
.await
.context("Failed to search service log from swarm")?,
SwarmOrServer::Server(server) => periphery_client(&server)
.await?
.request(api::container::GetContainerLogSearch {
name: deployment.name,
terms,
combinator,
invert,
timestamps,
})
.await
.context("Failed to search container log from server")?,
};
Ok(log)
.context("failed at call to periphery")?;
Ok(res)
}
}
@@ -239,80 +203,44 @@ impl Resolve<ReadArgs> for InspectDeploymentContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Container> {
) -> serror::Result<Container> {
let InspectDeploymentContainer { deployment } = self;
let (deployment, swarm_or_server) = setup_deployment_execution(
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.inspect(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
if server_id.is_empty() {
return Err(
anyhow!(
"InspectDeploymentContainer should not be called for Deployment in Swarm Mode"
"Cannot inspect deployment, not attached to any server"
)
.status_code(StatusCode::BAD_REQUEST),
.into(),
);
};
}
let server = resource::get::<Server>(&server_id).await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(
anyhow!(
"Cannot inspect container: Server is {:?}",
"Cannot inspect container: server is {:?}",
cache.state
)
.into(),
);
}
periphery_client(&server)
let res = periphery_client(&server)
.await?
.request(InspectContainer {
name: deployment.name,
})
.await
.context("Failed to inspect container on server")
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for InspectDeploymentSwarmService {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SwarmService> {
let InspectDeploymentSwarmService { deployment } = self;
let (deployment, swarm_or_server) = setup_deployment_execution(
&deployment,
user,
PermissionLevel::Read.logs(),
)
.await?;
let SwarmOrServer::Swarm(swarm) = swarm_or_server else {
return Err(
anyhow!(
"InspectDeploymentSwarmService should only be called for Deployment in Swarm Mode"
)
.status_code(StatusCode::BAD_REQUEST),
);
};
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmService {
service: deployment.name,
},
)
.await
.context("Failed to inspect service on swarm")
.map_err(Into::into)
.request(InspectContainer { name })
.await?;
Ok(res)
}
}
@@ -320,7 +248,7 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ContainerStats> {
) -> serror::Result<ContainerStats> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
@@ -350,7 +278,7 @@ impl Resolve<ReadArgs> for GetDeploymentActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<DeploymentActionState> {
) -> serror::Result<DeploymentActionState> {
let deployment = get_check_permissions::<Deployment>(
&self.deployment,
user,
@@ -371,7 +299,7 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetDeploymentsSummaryResponse> {
) -> serror::Result<GetDeploymentsSummaryResponse> {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
user,
@@ -414,7 +342,7 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListCommonDeploymentExtraArgsResponse> {
) -> serror::Result<ListCommonDeploymentExtraArgsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {

View File

@@ -1,4 +1,4 @@
use std::collections::HashSet;
use std::{collections::HashSet, time::Instant};
use anyhow::{Context, anyhow};
use axum::{
@@ -18,19 +18,16 @@ use komodo_client::{
user::User,
},
};
use mogh_auth_server::middleware::authenticate_request;
use mogh_error::Response;
use mogh_error::{AddStatusCodeError, Json};
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use strum::{Display, EnumDiscriminants};
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::KomodoAuthImpl,
auth::auth_request,
config::{core_config, core_keys},
helpers::periphery_client,
resource,
@@ -52,7 +49,6 @@ mod repo;
mod schedule;
mod server;
mod stack;
mod swarm;
mod sync;
mod tag;
mod terminal;
@@ -67,13 +63,10 @@ pub struct ReadArgs {
}
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumDiscriminants,
)]
#[strum_discriminants(name(ReadRequestMethod), derive(Display))]
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[args(ReadArgs)]
#[response(Response)]
#[error(mogh_error::Error)]
#[error(serror::Error)]
#[serde(tag = "type", content = "params")]
enum ReadRequest {
GetVersion(GetVersion),
@@ -82,28 +75,36 @@ enum ReadRequest {
ListGitProvidersFromConfig(ListGitProvidersFromConfig),
ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig),
// ==== SWARM ====
GetSwarmsSummary(GetSwarmsSummary),
GetSwarm(GetSwarm),
GetSwarmActionState(GetSwarmActionState),
ListSwarms(ListSwarms),
InspectSwarm(InspectSwarm),
ListFullSwarms(ListFullSwarms),
ListSwarmNodes(ListSwarmNodes),
InspectSwarmNode(InspectSwarmNode),
ListSwarmConfigs(ListSwarmConfigs),
InspectSwarmConfig(InspectSwarmConfig),
ListSwarmSecrets(ListSwarmSecrets),
InspectSwarmSecret(InspectSwarmSecret),
ListSwarmStacks(ListSwarmStacks),
InspectSwarmStack(InspectSwarmStack),
ListSwarmTasks(ListSwarmTasks),
InspectSwarmTask(InspectSwarmTask),
ListSwarmServices(ListSwarmServices),
InspectSwarmService(InspectSwarmService),
GetSwarmServiceLog(GetSwarmServiceLog),
SearchSwarmServiceLog(SearchSwarmServiceLog),
ListSwarmNetworks(ListSwarmNetworks),
// ==== USER ====
GetUsername(GetUsername),
GetPermission(GetPermission),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
GetUserGroup(GetUserGroup),
ListUserGroups(ListUserGroups),
// ==== PROCEDURE ====
GetProceduresSummary(GetProceduresSummary),
GetProcedure(GetProcedure),
GetProcedureActionState(GetProcedureActionState),
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== ACTION ====
GetActionsSummary(GetActionsSummary),
GetAction(GetAction),
GetActionActionState(GetActionActionState),
ListActions(ListActions),
ListFullActions(ListFullActions),
// ==== SCHEDULE ====
ListSchedules(ListSchedules),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
@@ -111,6 +112,7 @@ enum ReadRequest {
GetServerState(GetServerState),
GetPeripheryInformation(GetPeripheryInformation),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
@@ -137,7 +139,6 @@ enum ReadRequest {
// ==== SERVER STATS ====
GetSystemInformation(GetSystemInformation),
GetSystemStats(GetSystemStats),
GetHistoricalServerStats(GetHistoricalServerStats),
ListSystemProcesses(ListSystemProcesses),
// ==== STACK ====
@@ -147,7 +148,6 @@ enum ReadRequest {
GetStackLog(GetStackLog),
SearchStackLog(SearchStackLog),
InspectStackContainer(InspectStackContainer),
InspectStackSwarmService(InspectStackSwarmService),
ListStacks(ListStacks),
ListFullStacks(ListFullStacks),
ListStackServices(ListStackServices),
@@ -163,7 +163,6 @@ enum ReadRequest {
GetDeploymentLog(GetDeploymentLog),
SearchDeploymentLog(SearchDeploymentLog),
InspectDeploymentContainer(InspectDeploymentContainer),
InspectDeploymentSwarmService(InspectDeploymentSwarmService),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
@@ -185,23 +184,6 @@ enum ReadRequest {
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
// ==== PROCEDURE ====
GetProceduresSummary(GetProceduresSummary),
GetProcedure(GetProcedure),
GetProcedureActionState(GetProcedureActionState),
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== ACTION ====
GetActionsSummary(GetActionsSummary),
GetAction(GetAction),
GetActionActionState(GetActionActionState),
ListActions(ListActions),
ListFullActions(ListFullActions),
// ==== SCHEDULE ====
ListSchedules(ListSchedules),
// ==== SYNC ====
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
@@ -229,20 +211,6 @@ enum ReadRequest {
GetTag(GetTag),
ListTags(ListTags),
// ==== USER ====
GetUsername(GetUsername),
GetPermission(GetPermission),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
ListPermissions(ListPermissions),
ListUserTargetPermissions(ListUserTargetPermissions),
// ==== USER GROUP ====
GetUserGroup(GetUserGroup),
ListUserGroups(ListUserGroups),
// ==== UPDATE ====
GetUpdate(GetUpdate),
ListUpdates(ListUpdates),
@@ -269,16 +237,14 @@ pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(
authenticate_request::<KomodoAuthImpl, true>,
))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> mogh_error::Result<axum::response::Response> {
) -> serror::Result<axum::response::Response> {
let req: ReadRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
@@ -289,34 +255,16 @@ async fn variant_handler(
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
) -> mogh_error::Result<axum::response::Response> {
) -> serror::Result<axum::response::Response> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
let method: ReadRequestMethod = (&request).into();
let user_id = user.id.clone();
let username = user.username.clone();
trace!(
req_id = req_id.to_string(),
method = method.to_string(),
user_id,
username,
"READ REQUEST",
);
debug!("/read request | user: {}", user.username);
let res = request.resolve(&ReadArgs { user }).await;
if let Err(e) = &res {
trace!(
req_id = req_id.to_string(),
method = method.to_string(),
user_id,
username,
"READ REQUEST | ERROR: {:#}",
e.error
);
debug!("/read request {req_id} error: {:#}", e.error);
}
let elapsed = timer.elapsed();
debug!("/read request {req_id} | resolve time: {elapsed:?}");
res.map(|res| res.0)
}
@@ -324,20 +272,18 @@ impl Resolve<ReadArgs> for GetVersion {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<GetVersionResponse> {
) -> serror::Result<GetVersionResponse> {
Ok(GetVersionResponse {
version: env!("CARGO_PKG_VERSION").to_string(),
})
}
}
//
impl Resolve<ReadArgs> for GetCoreInfo {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<GetCoreInfoResponse> {
) -> serror::Result<GetCoreInfoResponse> {
let config = core_config();
let info = GetCoreInfoResponse {
title: config.title.clone(),
@@ -360,13 +306,11 @@ impl Resolve<ReadArgs> for GetCoreInfo {
}
}
//
impl Resolve<ReadArgs> for ListSecrets {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<ListSecretsResponse> {
) -> serror::Result<ListSecretsResponse> {
let mut secrets = core_config()
.secrets
.keys()
@@ -388,8 +332,7 @@ impl Resolve<ReadArgs> for ListSecrets {
}
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`")
.status_code(StatusCode::BAD_REQUEST),
anyhow!("target must be `Server` or `Builder`").into(),
);
}
};
@@ -416,13 +359,11 @@ impl Resolve<ReadArgs> for ListSecrets {
}
}
//
impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListGitProvidersFromConfigResponse> {
) -> serror::Result<ListGitProvidersFromConfigResponse> {
let mut providers = core_config().git_providers.clone();
if let Some(target) = self.target {
@@ -450,8 +391,7 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
}
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`")
.status_code(StatusCode::BAD_REQUEST),
anyhow!("target must be `Server` or `Builder`").into(),
);
}
}
@@ -521,13 +461,11 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
}
}
//
impl Resolve<ReadArgs> for ListDockerRegistriesFromConfig {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<ListDockerRegistriesFromConfigResponse> {
) -> serror::Result<ListDockerRegistriesFromConfigResponse> {
let mut registries = core_config().docker_registries.clone();
if let Some(target) = self.target {
@@ -571,7 +509,7 @@ impl Resolve<ReadArgs> for ListDockerRegistriesFromConfig {
async fn merge_git_providers_for_server(
providers: &mut Vec<GitProvider>,
server_id: &str,
) -> mogh_error::Result<()> {
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)
.await?
@@ -610,7 +548,7 @@ fn merge_git_providers(
async fn merge_docker_registries_for_server(
registries: &mut Vec<DockerRegistry>,
server_id: &str,
) -> mogh_error::Result<()> {
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)
.await?

View File

@@ -5,9 +5,9 @@ use database::mungos::find::find_collect;
use komodo_client::api::read::{
ListOnboardingKeys, ListOnboardingKeysResponse,
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{api::read::ReadArgs, state::db_client};
@@ -17,7 +17,7 @@ impl Resolve<ReadArgs> for ListOnboardingKeys {
async fn resolve(
self,
ReadArgs { user: admin }: &ReadArgs,
) -> mogh_error::Result<ListOnboardingKeysResponse> {
) -> serror::Result<ListOnboardingKeysResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")

View File

@@ -8,7 +8,7 @@ use komodo_client::{
},
entities::permission::PermissionLevel,
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_user_permission_on_target, state::db_client,
@@ -20,7 +20,7 @@ impl Resolve<ReadArgs> for ListPermissions {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListPermissionsResponse> {
) -> serror::Result<ListPermissionsResponse> {
let res = find_collect(
&db_client().permissions,
doc! {
@@ -39,7 +39,7 @@ impl Resolve<ReadArgs> for GetPermission {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetPermissionResponse> {
) -> serror::Result<GetPermissionResponse> {
if user.admin {
return Ok(PermissionLevel::Write.all());
}
@@ -51,7 +51,7 @@ impl Resolve<ReadArgs> for ListUserTargetPermissions {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListUserTargetPermissionsResponse> {
) -> serror::Result<ListUserTargetPermissionsResponse> {
if !user.admin {
return Err(anyhow!("this method is admin only").into());
}

View File

@@ -6,7 +6,7 @@ use komodo_client::{
procedure::{Procedure, ProcedureState},
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -21,7 +21,7 @@ impl Resolve<ReadArgs> for GetProcedure {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetProcedureResponse> {
) -> serror::Result<GetProcedureResponse> {
Ok(
get_check_permissions::<Procedure>(
&self.procedure,
@@ -37,7 +37,7 @@ impl Resolve<ReadArgs> for ListProcedures {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListProceduresResponse> {
) -> serror::Result<ListProceduresResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -59,7 +59,7 @@ impl Resolve<ReadArgs> for ListFullProcedures {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullProceduresResponse> {
) -> serror::Result<ListFullProceduresResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -81,7 +81,7 @@ impl Resolve<ReadArgs> for GetProceduresSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetProceduresSummaryResponse> {
) -> serror::Result<GetProceduresSummaryResponse> {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
user,
@@ -127,7 +127,7 @@ impl Resolve<ReadArgs> for GetProcedureActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetProcedureActionStateResponse> {
) -> serror::Result<GetProcedureActionStateResponse> {
let procedure = get_check_permissions::<Procedure>(
&self.procedure,
user,

View File

@@ -5,7 +5,7 @@ use database::mungos::{
mongodb::options::FindOptions,
};
use komodo_client::api::read::*;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::state::db_client;
@@ -15,7 +15,7 @@ impl Resolve<ReadArgs> for GetGitProviderAccount {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetGitProviderAccountResponse> {
) -> serror::Result<GetGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can read git provider accounts").into(),
@@ -35,7 +35,7 @@ impl Resolve<ReadArgs> for ListGitProviderAccounts {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListGitProviderAccountsResponse> {
) -> serror::Result<ListGitProviderAccountsResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can read git provider accounts").into(),
@@ -65,7 +65,7 @@ impl Resolve<ReadArgs> for GetDockerRegistryAccount {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetDockerRegistryAccountResponse> {
) -> serror::Result<GetDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can read docker registry accounts")
@@ -87,7 +87,7 @@ impl Resolve<ReadArgs> for ListDockerRegistryAccounts {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListDockerRegistryAccountsResponse> {
) -> serror::Result<ListDockerRegistryAccountsResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can read docker registry accounts")

View File

@@ -6,7 +6,7 @@ use komodo_client::{
repo::{Repo, RepoActionState, RepoListItem, RepoState},
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
@@ -21,7 +21,7 @@ impl Resolve<ReadArgs> for GetRepo {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Repo> {
) -> serror::Result<Repo> {
Ok(
get_check_permissions::<Repo>(
&self.repo,
@@ -37,7 +37,7 @@ impl Resolve<ReadArgs> for ListRepos {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<RepoListItem>> {
) -> serror::Result<Vec<RepoListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -59,7 +59,7 @@ impl Resolve<ReadArgs> for ListFullRepos {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullReposResponse> {
) -> serror::Result<ListFullReposResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -81,7 +81,7 @@ impl Resolve<ReadArgs> for GetRepoActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<RepoActionState> {
) -> serror::Result<RepoActionState> {
let repo = get_check_permissions::<Repo>(
&self.repo,
user,
@@ -102,7 +102,7 @@ impl Resolve<ReadArgs> for GetReposSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetReposSummaryResponse> {
) -> serror::Result<GetReposSummaryResponse> {
let repos = resource::list_full_for_user::<Repo>(
Default::default(),
user,

View File

@@ -10,7 +10,7 @@ use komodo_client::{
schedule::Schedule,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::{get_all_tags, get_last_run_at},
@@ -24,7 +24,7 @@ impl Resolve<ReadArgs> for ListSchedules {
async fn resolve(
self,
args: &ReadArgs,
) -> mogh_error::Result<Vec<Schedule>> {
) -> serror::Result<Vec<Schedule>> {
let all_tags = get_all_tags(None).await?;
let (actions, procedures) = tokio::try_join!(
list_full_for_user::<Action>(

View File

@@ -35,8 +35,6 @@ use komodo_client::{
update::Log,
},
};
use mogh_error::AddStatusCode;
use mogh_resolver::Resolve;
use periphery_client::api::{
self as periphery,
container::InspectContainer,
@@ -45,6 +43,8 @@ use periphery_client::api::{
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use tokio::sync::Mutex;
use crate::{
@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for GetServersSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetServersSummaryResponse> {
) -> serror::Result<GetServersSummaryResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
user,
@@ -103,7 +103,7 @@ impl Resolve<ReadArgs> for GetServer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Server> {
) -> serror::Result<Server> {
Ok(
get_check_permissions::<Server>(
&self.server,
@@ -119,7 +119,7 @@ impl Resolve<ReadArgs> for ListServers {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<ServerListItem>> {
) -> serror::Result<Vec<ServerListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -141,7 +141,7 @@ impl Resolve<ReadArgs> for ListFullServers {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullServersResponse> {
) -> serror::Result<ListFullServersResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -163,7 +163,7 @@ impl Resolve<ReadArgs> for GetServerState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetServerStateResponse> {
) -> serror::Result<GetServerStateResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -185,7 +185,7 @@ impl Resolve<ReadArgs> for GetServerActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ServerActionState> {
) -> serror::Result<ServerActionState> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -206,7 +206,7 @@ impl Resolve<ReadArgs> for GetPeripheryInformation {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetPeripheryInformationResponse> {
) -> serror::Result<GetPeripheryInformationResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -229,7 +229,7 @@ impl Resolve<ReadArgs> for GetSystemInformation {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SystemInformation> {
) -> serror::Result<SystemInformation> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -253,7 +253,7 @@ impl Resolve<ReadArgs> for GetSystemStats {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetSystemStatsResponse> {
) -> serror::Result<GetSystemStatsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -285,7 +285,7 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSystemProcessesResponse> {
) -> serror::Result<ListSystemProcessesResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -320,7 +320,7 @@ impl Resolve<ReadArgs> for GetHistoricalServerStats {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetHistoricalServerStatsResponse> {
) -> serror::Result<GetHistoricalServerStatsResponse> {
let GetHistoricalServerStats {
server,
granularity,
@@ -373,7 +373,7 @@ impl Resolve<ReadArgs> for ListDockerContainers {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListDockerContainersResponse> {
) -> serror::Result<ListDockerContainersResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -383,8 +383,8 @@ impl Resolve<ReadArgs> for ListDockerContainers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
Ok(docker.containers.clone())
if let Some(containers) = &cache.containers {
Ok(containers.clone())
} else {
Ok(Vec::new())
}
@@ -395,7 +395,7 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListAllDockerContainersResponse> {
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
ServerQuery::builder().names(self.servers.clone()).build(),
user,
@@ -410,11 +410,10 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
let Some(docker) = &cache.docker else {
let Some(more) = &cache.containers else {
continue;
};
let more = docker
.containers
let more = more
.iter()
.filter(|container| {
self.containers.is_empty()
@@ -432,7 +431,7 @@ impl Resolve<ReadArgs> for GetDockerContainersSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetDockerContainersSummaryResponse> {
) -> serror::Result<GetDockerContainersSummaryResponse> {
let servers = resource::list_full_for_user::<Server>(
Default::default(),
user,
@@ -449,8 +448,8 @@ impl Resolve<ReadArgs> for GetDockerContainersSummary {
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
for container in &docker.containers {
if let Some(containers) = &cache.containers {
for container in containers {
res.total += 1;
match container.state {
ContainerStateStatusEnum::Created
@@ -472,7 +471,7 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Container> {
) -> serror::Result<Container> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -507,7 +506,7 @@ impl Resolve<ReadArgs> for GetContainerLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Log> {
) -> serror::Result<Log> {
let GetContainerLog {
server,
container,
@@ -537,7 +536,7 @@ impl Resolve<ReadArgs> for SearchContainerLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Log> {
) -> serror::Result<Log> {
let SearchContainerLog {
server,
container,
@@ -571,7 +570,7 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetResourceMatchingContainerResponse> {
) -> serror::Result<GetResourceMatchingContainerResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -632,7 +631,7 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListDockerNetworksResponse> {
) -> serror::Result<ListDockerNetworksResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -642,8 +641,8 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
Ok(docker.networks.clone())
if let Some(networks) = &cache.networks {
Ok(networks.clone())
} else {
Ok(Vec::new())
}
@@ -654,7 +653,7 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Network> {
) -> serror::Result<Network> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -685,7 +684,7 @@ impl Resolve<ReadArgs> for ListDockerImages {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListDockerImagesResponse> {
) -> serror::Result<ListDockerImagesResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -695,8 +694,8 @@ impl Resolve<ReadArgs> for ListDockerImages {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
Ok(docker.images.clone())
if let Some(images) = &cache.images {
Ok(images.clone())
} else {
Ok(Vec::new())
}
@@ -707,7 +706,7 @@ impl Resolve<ReadArgs> for InspectDockerImage {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Image> {
) -> serror::Result<Image> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -735,7 +734,7 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<ImageHistoryResponseItem>> {
) -> serror::Result<Vec<ImageHistoryResponseItem>> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -766,7 +765,7 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListDockerVolumesResponse> {
) -> serror::Result<ListDockerVolumesResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -776,8 +775,8 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
Ok(docker.volumes.clone())
if let Some(volumes) = &cache.volumes {
Ok(volumes.clone())
} else {
Ok(Vec::new())
}
@@ -788,7 +787,7 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Volume> {
) -> serror::Result<Volume> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -816,7 +815,7 @@ impl Resolve<ReadArgs> for ListComposeProjects {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListComposeProjectsResponse> {
) -> serror::Result<ListComposeProjectsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -826,8 +825,8 @@ impl Resolve<ReadArgs> for ListComposeProjects {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(docker) = &cache.docker {
Ok(docker.projects.clone())
if let Some(projects) = &cache.projects {
Ok(projects.clone())
} else {
Ok(Vec::new())
}

View File

@@ -4,30 +4,24 @@ use anyhow::{Context, anyhow};
use komodo_client::{
api::read::*,
entities::{
SwarmOrServer,
docker::{
container::Container, service::SwarmService, stack::SwarmStack,
},
docker::container::Container,
permission::PermissionLevel,
server::{Server, ServerState},
stack::{Stack, StackActionState, StackListItem, StackState},
},
};
use mogh_error::AddStatusCodeError as _;
use mogh_resolver::Resolve;
use periphery_client::api::{
compose::{GetComposeLog, GetComposeLogSearch},
container::InspectContainer,
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client, query::get_all_tags, swarm::swarm_request,
},
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource,
stack::setup_stack_execution,
state::{action_states, stack_status_cache},
stack::get_stack_and_server,
state::{action_states, server_status_cache, stack_status_cache},
};
use super::ReadArgs;
@@ -36,7 +30,7 @@ impl Resolve<ReadArgs> for GetStack {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Stack> {
) -> serror::Result<Stack> {
Ok(
get_check_permissions::<Stack>(
&self.stack,
@@ -52,7 +46,7 @@ impl Resolve<ReadArgs> for ListStackServices {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListStackServicesResponse> {
) -> serror::Result<ListStackServicesResponse> {
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
@@ -76,59 +70,31 @@ impl Resolve<ReadArgs> for GetStackLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetStackLogResponse> {
) -> serror::Result<GetStackLogResponse> {
let GetStackLog {
stack,
mut services,
services,
tail,
timestamps,
} = self;
let (stack, swarm_or_server) = setup_stack_execution(
let (stack, server) = get_stack_and_server(
&stack,
user,
PermissionLevel::Read.logs(),
true,
)
.await?;
swarm_or_server.verify_has_target()?;
let log = match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
let service = services.pop().context(
"Must pass single service for Swarm mode Stack logs",
)?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLog {
// The actual service name on swarm will be stackname_servicename
service: format!(
"{}_{service}",
stack.project_name(false)
),
tail,
timestamps,
no_task_ids: false,
no_resolve: false,
details: false,
},
)
.await
.context("Failed to get stack service log from swarm")?
}
SwarmOrServer::Server(server) => periphery_client(&server)
.await?
.request(GetComposeLog {
project: stack.project_name(false),
services,
tail,
timestamps,
})
.await
.context("Failed to get stack log from periphery")?,
};
Ok(log)
let res = periphery_client(&server)
.await?
.request(GetComposeLog {
project: stack.project_name(false),
services,
tail,
timestamps,
})
.await
.context("Failed to get stack log from periphery")?;
Ok(res)
}
}
@@ -136,61 +102,35 @@ impl Resolve<ReadArgs> for SearchStackLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SearchStackLogResponse> {
) -> serror::Result<SearchStackLogResponse> {
let SearchStackLog {
stack,
mut services,
services,
terms,
combinator,
invert,
timestamps,
} = self;
let (stack, swarm_or_server) = setup_stack_execution(
let (stack, server) = get_stack_and_server(
&stack,
user,
PermissionLevel::Read.logs(),
true,
)
.await?;
swarm_or_server.verify_has_target()?;
let log = match swarm_or_server {
SwarmOrServer::None => unreachable!(),
SwarmOrServer::Swarm(swarm) => {
let service = services.pop().context(
"Must pass single service for Swarm mode Stack logs",
)?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLogSearch {
service,
terms,
combinator,
invert,
timestamps,
no_task_ids: false,
no_resolve: false,
details: false,
},
)
.await
.context("Failed to get stack service log from swarm")?
}
SwarmOrServer::Server(server) => periphery_client(&server)
.await?
.request(GetComposeLogSearch {
project: stack.project_name(false),
services,
terms,
combinator,
invert,
timestamps,
})
.await
.context("Failed to search stack log from periphery")?,
};
Ok(log)
let res = periphery_client(&server)
.await?
.request(GetComposeLogSearch {
project: stack.project_name(false),
services,
terms,
combinator,
invert,
timestamps,
})
.await
.context("Failed to search stack log from periphery")?;
Ok(res)
}
}
@@ -198,31 +138,40 @@ impl Resolve<ReadArgs> for InspectStackContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Container> {
) -> serror::Result<Container> {
let InspectStackContainer { stack, service } = self;
let (stack, swarm_or_server) = setup_stack_execution(
let stack = get_check_permissions::<Stack>(
&stack,
user,
PermissionLevel::Read.inspect(),
)
.await?;
let SwarmOrServer::Server(server) = swarm_or_server else {
if stack.config.server_id.is_empty() {
return Err(
anyhow!("Cannot inspect stack, not attached to any server")
.into(),
);
}
let server =
resource::get::<Server>(&stack.config.server_id).await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(
anyhow!(
"InspectStackContainer should not be called for Stack in Swarm Mode"
"Cannot inspect container: server is {:?}",
cache.state
)
.status_code(StatusCode::BAD_REQUEST),
.into(),
);
};
}
let services = &stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.services;
let Some(name) = services
.iter()
.find(|s| s.service == service)
@@ -232,106 +181,19 @@ impl Resolve<ReadArgs> for InspectStackContainer {
"No service found matching '{service}'. Was the stack last deployed manually?"
).into());
};
let res = periphery_client(&server)
.await?
.request(InspectContainer { name })
.await
.context("Failed to inspect container on server")?;
.await?;
Ok(res)
}
}
impl Resolve<ReadArgs> for InspectStackSwarmService {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SwarmService> {
let InspectStackSwarmService { stack, service } = self;
let (stack, swarm_or_server) = setup_stack_execution(
&stack,
user,
PermissionLevel::Read.inspect(),
)
.await?;
let SwarmOrServer::Swarm(swarm) = swarm_or_server else {
return Err(
anyhow!(
"InspectStackSwarmService should only be called for Stack in Swarm Mode"
)
.status_code(StatusCode::BAD_REQUEST),
);
};
let services = &stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.services;
let Some(service) = services
.iter()
.find(|s| s.service == service)
.and_then(|s| {
s.swarm_service.as_ref().and_then(|c| c.name.clone())
})
else {
return Err(anyhow!(
"No service found matching '{service}'. Was the stack last deployed manually?"
).into());
};
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmService { service },
)
.await
.context("Failed to inspect service on swarm")
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for InspectStackSwarmInfo {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SwarmStack> {
let (stack, swarm_or_server) = setup_stack_execution(
&self.stack,
user,
PermissionLevel::Read.inspect(),
)
.await?;
let SwarmOrServer::Swarm(swarm) = swarm_or_server else {
return Err(
anyhow!(
"InspectStackSwarmInfo should only be called for Stack in Swarm Mode"
)
.status_code(StatusCode::BAD_REQUEST),
);
};
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmStack {
stack: stack.project_name(false),
},
)
.await
.context("Failed to inspect stack info on swarm")
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListCommonStackExtraArgsResponse> {
) -> serror::Result<ListCommonStackExtraArgsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -344,7 +206,7 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
&all_tags,
)
.await
.context("Failed to get resources matching query")?;
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -365,7 +227,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListCommonStackBuildExtraArgsResponse> {
) -> serror::Result<ListCommonStackBuildExtraArgsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -378,7 +240,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
&all_tags,
)
.await
.context("Failed to get resources matching query")?;
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -399,7 +261,7 @@ impl Resolve<ReadArgs> for ListStacks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<StackListItem>> {
) -> serror::Result<Vec<StackListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -435,7 +297,7 @@ impl Resolve<ReadArgs> for ListFullStacks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullStacksResponse> {
) -> serror::Result<ListFullStacksResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -457,7 +319,7 @@ impl Resolve<ReadArgs> for GetStackActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<StackActionState> {
) -> serror::Result<StackActionState> {
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
@@ -478,7 +340,7 @@ impl Resolve<ReadArgs> for GetStacksSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetStacksSummaryResponse> {
) -> serror::Result<GetStacksSummaryResponse> {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
user,
@@ -486,7 +348,7 @@ impl Resolve<ReadArgs> for GetStacksSummary {
&[],
)
.await
.context("Failed to get stacks from database")?;
.context("failed to get stacks from db")?;
let mut res = GetStacksSummaryResponse::default();

View File

@@ -1,522 +0,0 @@
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel,
swarm::{Swarm, SwarmActionState, SwarmListItem, SwarmState},
},
};
use mogh_resolver::Resolve;
use crate::{
helpers::{query::get_all_tags, swarm::swarm_request},
permission::get_check_permissions,
resource,
state::{action_states, server_status_cache, swarm_status_cache},
};
use super::ReadArgs;
impl Resolve<ReadArgs> for GetSwarm {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Swarm> {
Ok(
get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?,
)
}
}
impl Resolve<ReadArgs> for ListSwarms {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<SwarmListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Swarm>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
impl Resolve<ReadArgs> for ListFullSwarms {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullSwarmsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
Ok(
resource::list_full_for_user::<Swarm>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
impl Resolve<ReadArgs> for GetSwarmActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SwarmActionState> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let action_state = action_states()
.swarm
.get(&swarm.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<ReadArgs> for GetSwarmsSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetSwarmsSummaryResponse> {
let swarms = resource::list_full_for_user::<Swarm>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
.context("failed to get swarms from db")?;
let mut res = GetSwarmsSummaryResponse::default();
let cache = swarm_status_cache();
for swarm in swarms {
res.total += 1;
match cache
.get(&swarm.id)
.await
.map(|status| status.state)
.unwrap_or_default()
{
SwarmState::Unknown => {
res.unknown += 1;
}
SwarmState::Healthy => {
res.healthy += 1;
}
SwarmState::Unhealthy => {
res.unhealthy += 1;
}
SwarmState::Down => {
res.down += 1;
}
}
}
Ok(res)
}
}
impl Resolve<ReadArgs> for InspectSwarm {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
let inspect = cache
.inspect
.as_ref()
.cloned()
.context("SwarmInspectInfo not available")?;
Ok(inspect)
}
}
impl Resolve<ReadArgs> for ListSwarmNodes {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmNodesResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.nodes.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmNode {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmNodeResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmNode {
node: self.node,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmServices {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmServicesResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.services.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmService {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmServiceResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmService {
service: self.service,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for GetSwarmServiceLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetSwarmServiceLogResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.logs(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLog {
service: self.service,
tail: self.tail,
timestamps: self.timestamps,
no_task_ids: self.no_task_ids,
no_resolve: self.no_resolve,
details: self.details,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for SearchSwarmServiceLog {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<SearchSwarmServiceLogResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.logs(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::GetSwarmServiceLogSearch {
service: self.service,
terms: self.terms,
combinator: self.combinator,
invert: self.invert,
timestamps: self.timestamps,
no_task_ids: self.no_task_ids,
no_resolve: self.no_resolve,
details: self.details,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmTasks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmTasksResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.tasks.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmTask {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmTaskResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmTask {
task: self.task,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmSecrets {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmSecretsResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.secrets.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmSecret {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmSecretResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmSecret {
secret: self.secret,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmConfigs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmConfigsResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.configs.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmConfig {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmConfigResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmConfig {
config: self.config,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmStacks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListSwarmStacksResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache =
swarm_status_cache().get_or_insert_default(&swarm.id).await;
if let Some(lists) = &cache.lists {
Ok(lists.stacks.clone())
} else {
Ok(Vec::new())
}
}
}
impl Resolve<ReadArgs> for InspectSwarmStack {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<InspectSwarmStackResponse> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.inspect(),
)
.await?;
swarm_request(
&swarm.config.server_ids,
periphery_client::api::swarm::InspectSwarmStack {
stack: self.stack,
},
)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListSwarmNetworks {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> Result<Self::Response, Self::Error> {
let swarm = get_check_permissions::<Swarm>(
&self.swarm,
user,
PermissionLevel::Read.into(),
)
.await?;
let cache = server_status_cache();
for server_id in swarm.config.server_ids {
let Some(status) = cache.get(&server_id).await else {
continue;
};
let Some(docker) = &status.docker else {
continue;
};
let networks = docker
.networks
.iter()
.filter(|network| {
network.driver.as_deref() == Some("overlay")
})
.cloned()
.collect::<Vec<_>>();
return Ok(networks);
}
Err(
anyhow!(
"Failed to retrieve swarm networks from any manager node."
)
.into(),
)
}
}

View File

@@ -8,7 +8,7 @@ use komodo_client::{
},
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
@@ -21,7 +21,7 @@ impl Resolve<ReadArgs> for GetResourceSync {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
Ok(
get_check_permissions::<ResourceSync>(
&self.sync,
@@ -37,7 +37,7 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Vec<ResourceSyncListItem>> {
) -> serror::Result<Vec<ResourceSyncListItem>> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -59,7 +59,7 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListFullResourceSyncsResponse> {
) -> serror::Result<ListFullResourceSyncsResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
@@ -81,7 +81,7 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ResourceSyncActionState> {
) -> serror::Result<ResourceSyncActionState> {
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
user,
@@ -102,7 +102,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetResourceSyncsSummaryResponse> {
) -> serror::Result<GetResourceSyncsSummaryResponse> {
let resource_syncs =
resource::list_full_for_user::<ResourceSync>(
Default::default(),
@@ -120,7 +120,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
for resource_sync in resource_syncs {
res.total += 1;
if !(resource_sync.info.pending_deploys.is_empty()
if !(resource_sync.info.pending_deploy.to_deploy == 0
&& resource_sync.info.resource_updates.is_empty()
&& resource_sync.info.variable_updates.is_empty()
&& resource_sync.info.user_group_updates.is_empty())

View File

@@ -7,23 +7,20 @@ use komodo_client::{
api::read::{GetTag, ListTags},
entities::tag::Tag,
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{helpers::query::get_tag, state::db_client};
use super::ReadArgs;
impl Resolve<ReadArgs> for GetTag {
async fn resolve(self, _: &ReadArgs) -> mogh_error::Result<Tag> {
async fn resolve(self, _: &ReadArgs) -> serror::Result<Tag> {
Ok(get_tag(&self.tag).await?)
}
}
impl Resolve<ReadArgs> for ListTags {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<Vec<Tag>> {
async fn resolve(self, _: &ReadArgs) -> serror::Result<Vec<Tag>> {
let res = find_collect(
&db_client().tags,
self.query,

View File

@@ -13,9 +13,9 @@ use komodo_client::{
user::User,
},
};
use mogh_error::AddStatusCode;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::periphery_client, permission::get_check_permissions,
@@ -30,7 +30,7 @@ impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListTerminalsResponse> {
) -> serror::Result<ListTerminalsResponse> {
let Some(target) = self.target else {
return list_all_terminals_for_user(user, self.use_names).await;
};
@@ -88,7 +88,7 @@ impl Resolve<ReadArgs> for ListTerminals {
async fn list_all_terminals_for_user(
user: &User,
use_names: bool,
) -> mogh_error::Result<Vec<Terminal>> {
) -> serror::Result<Vec<Terminal>> {
let (mut servers, stacks, deployments) = tokio::try_join!(
resource::list_full_for_user::<Server>(
Default::default(),
@@ -230,7 +230,7 @@ async fn list_all_terminals_for_user(
async fn list_terminals_on_server(
server: &Server,
target: Option<TerminalTarget>,
) -> mogh_error::Result<Vec<Terminal>> {
) -> serror::Result<Vec<Terminal>> {
periphery_client(server)
.await?
.request(periphery_client::api::terminal::ListTerminals {

View File

@@ -11,11 +11,10 @@ use komodo_client::{
builder::Builder, deployment::Deployment,
permission::PermissionLevel, procedure::Procedure, repo::Repo,
resource::ResourceQuery, server::Server, stack::Stack,
swarm::Swarm, sync::ResourceSync, toml::ResourcesToml,
user::User,
sync::ResourceSync, toml::ResourcesToml, user::User,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
helpers::query::{
@@ -38,45 +37,122 @@ async fn get_all_targets(
user: &User,
) -> anyhow::Result<Vec<ResourceTarget>> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
macro_rules! extend_targets {
($($Type:ident),* $(,)?) => {
$(
targets.extend(
resource::list_full_for_user::<$Type>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::$Type(resource.id)),
);
)*
};
}
extend_targets!(
Alerter,
Builder,
Server,
Swarm,
Stack,
Deployment,
Build,
Repo,
Procedure,
Action,
ResourceSync,
targets.extend(
resource::list_full_for_user::<Alerter>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Builder>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Server>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Stack>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Deployment>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Build>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Repo>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Procedure>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_full_for_user::<Action>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
// These will already be filtered by [ExportResourcesToToml]
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
Ok(targets)
}
@@ -84,7 +160,7 @@ impl Resolve<ReadArgs> for ExportAllResourcesToToml {
async fn resolve(
self,
args: &ReadArgs,
) -> mogh_error::Result<ExportAllResourcesToTomlResponse> {
) -> serror::Result<ExportAllResourcesToTomlResponse> {
let targets = if self.include_resources {
get_all_targets(&self.tags, &args.user).await?
} else {
@@ -110,7 +186,6 @@ impl Resolve<ReadArgs> for ExportAllResourcesToToml {
targets,
user_groups,
include_variables: self.include_variables,
existing: self.existing,
}
.resolve(args)
.await
@@ -121,69 +196,31 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
async fn resolve(
self,
args: &ReadArgs,
) -> mogh_error::Result<ExportResourcesToTomlResponse> {
) -> serror::Result<ExportResourcesToTomlResponse> {
let ExportResourcesToToml {
targets,
user_groups,
include_variables,
existing,
} = self;
let mut res = ResourcesToml::default();
let id_to_tags = get_id_to_tags(None).await?;
let ReadArgs { user } = args;
macro_rules! convert_target {
($id:expr, $Type:ident, $field:ident) => {{
let mut resource = get_check_permissions::<$Type>(
&$id,
user,
PermissionLevel::Read.into(),
)
.await?;
$Type::replace_ids(&mut resource);
let (deploy, after) = existing
.as_ref()
.and_then(|e| {
e.$field.iter().find(|r| r.name == resource.name)
})
.map(|r| (r.deploy, r.after.clone()))
.unwrap_or_default();
res.$field.push(convert_resource::<$Type>(
resource,
deploy,
after,
&id_to_tags,
));
}};
}
for target in targets {
match target {
ResourceTarget::Server(id) => {
convert_target!(id, Server, servers)
}
ResourceTarget::Swarm(id) => {
convert_target!(id, Swarm, swarms)
}
ResourceTarget::Stack(id) => {
convert_target!(id, Stack, stacks)
}
ResourceTarget::Deployment(id) => {
convert_target!(id, Deployment, deployments)
}
ResourceTarget::Build(id) => {
convert_target!(id, Build, builds)
}
ResourceTarget::Repo(id) => convert_target!(id, Repo, repos),
ResourceTarget::Procedure(id) => {
convert_target!(id, Procedure, procedures)
}
ResourceTarget::Action(id) => {
convert_target!(id, Action, actions)
}
ResourceTarget::Builder(id) => {
convert_target!(id, Builder, builders)
}
ResourceTarget::Alerter(id) => {
convert_target!(id, Alerter, alerters)
let mut alerter = get_check_permissions::<Alerter>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Alerter::replace_ids(&mut alerter);
res.alerters.push(convert_resource::<Alerter>(
alerter,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::ResourceSync(id) => {
let mut sync = get_check_permissions::<ResourceSync>(
@@ -206,6 +243,126 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
}
ResourceTarget::Server(id) => {
let mut server = get_check_permissions::<Server>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Server::replace_ids(&mut server);
res.servers.push(convert_resource::<Server>(
server,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Builder(id) => {
let mut builder = get_check_permissions::<Builder>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Builder::replace_ids(&mut builder);
res.builders.push(convert_resource::<Builder>(
builder,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Build(id) => {
let mut build = get_check_permissions::<Build>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Build::replace_ids(&mut build);
res.builds.push(convert_resource::<Build>(
build,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Deployment(id) => {
let mut deployment = get_check_permissions::<Deployment>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Deployment::replace_ids(&mut deployment);
res.deployments.push(convert_resource::<Deployment>(
deployment,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Repo(id) => {
let mut repo = get_check_permissions::<Repo>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Repo::replace_ids(&mut repo);
res.repos.push(convert_resource::<Repo>(
repo,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Stack(id) => {
let mut stack = get_check_permissions::<Stack>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Stack::replace_ids(&mut stack);
res.stacks.push(convert_resource::<Stack>(
stack,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Procedure(id) => {
let mut procedure = get_check_permissions::<Procedure>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Procedure::replace_ids(&mut procedure);
res.procedures.push(convert_resource::<Procedure>(
procedure,
false,
vec![],
&id_to_tags,
));
}
ResourceTarget::Action(id) => {
let mut action = get_check_permissions::<Action>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
Action::replace_ids(&mut action);
res.actions.push(convert_resource::<Action>(
action,
false,
vec![],
&id_to_tags,
));
}
ResourceTarget::System(_) => continue,
};
}
@@ -261,32 +418,85 @@ fn serialize_resources_toml(
) -> anyhow::Result<String> {
let mut toml = String::new();
macro_rules! serialize_resources {
($(($Type:ident, $field:ident, $header:literal)),* $(,)?) => {
$(
for resource in resources.$field {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str(concat!("[[",$header,"]]\n"));
$Type::push_to_toml_string(resource, &mut toml)?;
}
)*
};
for server in resources.servers {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[server]]\n");
Server::push_to_toml_string(server, &mut toml)?;
}
for stack in resources.stacks {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[stack]]\n");
Stack::push_to_toml_string(stack, &mut toml)?;
}
for deployment in resources.deployments {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[deployment]]\n");
Deployment::push_to_toml_string(deployment, &mut toml)?;
}
for build in resources.builds {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[build]]\n");
Build::push_to_toml_string(build, &mut toml)?;
}
for repo in resources.repos {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[repo]]\n");
Repo::push_to_toml_string(repo, &mut toml)?;
}
for procedure in resources.procedures {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[procedure]]\n");
Procedure::push_to_toml_string(procedure, &mut toml)?;
}
for action in resources.actions {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[action]]\n");
Action::push_to_toml_string(action, &mut toml)?;
}
for alerter in resources.alerters {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[alerter]]\n");
Alerter::push_to_toml_string(alerter, &mut toml)?;
}
for builder in resources.builders {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[builder]]\n");
Builder::push_to_toml_string(builder, &mut toml)?;
}
for resource_sync in resources.resource_syncs {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[resource_sync]]\n");
ResourceSync::push_to_toml_string(resource_sync, &mut toml)?;
}
serialize_resources!(
(Server, servers, "server"),
(Swarm, swarms, "swarm"),
(Stack, stacks, "stack"),
(Deployment, deployments, "deployment"),
(Build, builds, "build"),
(Repo, repos, "repo"),
(Procedure, procedures, "procedure"),
(Action, actions, "action"),
(Alerter, alerters, "alerter"),
(Builder, builders, "builder"),
(ResourceSync, resource_syncs, "resource_sync"),
);
for variable in &resources.variables {
if !toml.is_empty() {

View File

@@ -1,6 +1,6 @@
use std::collections::HashMap;
use anyhow::Context;
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::find_one_by_id,
find::find_collect,
@@ -9,18 +9,27 @@ use database::mungos::{
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
stack::Stack,
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{
check_user_target_access, user_resource_target_query,
},
permission::{get_check_permissions, list_resource_ids_for_user},
state::db_client,
};
@@ -32,8 +41,159 @@ impl Resolve<ReadArgs> for ListUpdates {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListUpdatesResponse> {
let query = user_resource_target_query(user, self.query).await?;
) -> serror::Result<ListUpdatesResponse> {
let query = if user.admin || core_config().transparent_mode {
self.query
} else {
let server_query = list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = list_resource_ids_for_user::<Build>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = list_resource_ids_for_user::<Repo>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = list_resource_ids_for_user::<Procedure>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = list_resource_ids_for_user::<Action>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query =
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = self.query.unwrap_or_default();
query.extend(doc! {
"$or": [
server_query,
deployment_query,
stack_query,
build_query,
repo_query,
procedure_query,
action_query,
alerter_query,
builder_query,
resource_sync_query,
]
});
query.into()
};
let usernames = find_collect(&db_client().users, None, None)
.await
@@ -92,7 +252,7 @@ impl Resolve<ReadArgs> for GetUpdate {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let update = find_one_by_id(&db_client().updates, &self.id)
.await
.context("failed to query to db")?
@@ -100,12 +260,93 @@ impl Resolve<ReadArgs> for GetUpdate {
if user.admin || core_config().transparent_mode {
return Ok(update);
}
check_user_target_access(
&update.target,
user,
PermissionLevel::Read.into(),
)
.await?;
match &update.target {
ResourceTarget::System(_) => {
return Err(
anyhow!("user must be admin to view system updates").into(),
);
}
ResourceTarget::Server(id) => {
get_check_permissions::<Server>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Deployment(id) => {
get_check_permissions::<Deployment>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Build(id) => {
get_check_permissions::<Build>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Repo(id) => {
get_check_permissions::<Repo>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Builder(id) => {
get_check_permissions::<Builder>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Alerter(id) => {
get_check_permissions::<Alerter>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Procedure(id) => {
get_check_permissions::<Procedure>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Action(id) => {
get_check_permissions::<Action>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
get_check_permissions::<ResourceSync>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
ResourceTarget::Stack(id) => {
get_check_permissions::<Stack>(
id,
user,
PermissionLevel::Read.into(),
)
.await?;
}
}
Ok(update)
}
}

View File

@@ -13,7 +13,7 @@ use komodo_client::{
},
entities::user::{UserConfig, admin_service_user},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{helpers::query::get_user, state::db_client};
@@ -23,7 +23,7 @@ impl Resolve<ReadArgs> for GetUsername {
async fn resolve(
self,
_: &ReadArgs,
) -> mogh_error::Result<GetUsernameResponse> {
) -> serror::Result<GetUsernameResponse> {
if let Some(user) = admin_service_user(&self.user_id) {
return Ok(GetUsernameResponse {
username: user.username,
@@ -53,7 +53,7 @@ impl Resolve<ReadArgs> for FindUser {
async fn resolve(
self,
ReadArgs { user: admin }: &ReadArgs,
) -> mogh_error::Result<FindUserResponse> {
) -> serror::Result<FindUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only.").into());
}
@@ -65,26 +65,15 @@ impl Resolve<ReadArgs> for ListUsers {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListUsersResponse> {
) -> serror::Result<ListUsersResponse> {
if !user.admin {
return Err(
anyhow!("this route is only accessable by admins").into(),
);
}
let filter = match self.service_users {
komodo_client::api::read::ServiceUserQueryBehavior::Include => {
None
}
komodo_client::api::read::ServiceUserQueryBehavior::Exclude => {
Some(doc! { "config.type": { "$ne": "Service" } })
}
komodo_client::api::read::ServiceUserQueryBehavior::Only => {
Some(doc! { "config.type": "Service" })
}
};
let mut users = find_collect(
&db_client().users,
filter,
None,
FindOptions::builder().sort(doc! { "username": 1 }).build(),
)
.await
@@ -98,7 +87,7 @@ impl Resolve<ReadArgs> for ListApiKeys {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListApiKeysResponse> {
) -> serror::Result<ListApiKeysResponse> {
let api_keys = find_collect(
&db_client().api_keys,
doc! { "user_id": &user.id },
@@ -120,7 +109,7 @@ impl Resolve<ReadArgs> for ListApiKeysForServiceUser {
async fn resolve(
self,
ReadArgs { user: admin }: &ReadArgs,
) -> mogh_error::Result<ListApiKeysForServiceUserResponse> {
) -> serror::Result<ListApiKeysForServiceUserResponse> {
if !admin.admin {
return Err(anyhow!("This method is admin only.").into());
}

View File

@@ -9,7 +9,7 @@ use database::mungos::{
},
};
use komodo_client::api::read::*;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::state::db_client;
@@ -19,7 +19,7 @@ impl Resolve<ReadArgs> for GetUserGroup {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetUserGroupResponse> {
) -> serror::Result<GetUserGroupResponse> {
let mut filter = match ObjectId::from_str(&self.user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &self.user_group },
@@ -43,7 +43,7 @@ impl Resolve<ReadArgs> for ListUserGroups {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListUserGroupsResponse> {
) -> serror::Result<ListUserGroupsResponse> {
let mut filter = Document::new();
if !user.admin {
filter.insert("users", &user.id);

View File

@@ -4,7 +4,7 @@ use database::mungos::{
find::find_collect, mongodb::options::FindOptions,
};
use komodo_client::api::read::*;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{helpers::query::get_variable, state::db_client};
@@ -14,7 +14,7 @@ impl Resolve<ReadArgs> for GetVariable {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<GetVariableResponse> {
) -> serror::Result<GetVariableResponse> {
let mut variable = get_variable(&self.name).await?;
if !variable.is_secret || user.admin {
return Ok(variable);
@@ -28,7 +28,7 @@ impl Resolve<ReadArgs> for ListVariables {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> mogh_error::Result<ListVariablesResponse> {
) -> serror::Result<ListVariablesResponse> {
let variables = find_collect(
&db_client().variables,
None,

View File

@@ -1,19 +1,16 @@
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{api::terminal::*, entities::user::User};
use mogh_auth_server::middleware::authenticate_request;
use mogh_error::Json;
use serror::Json;
use crate::{
auth::KomodoAuthImpl, helpers::terminal::setup_target_for_user,
auth::auth_request, helpers::terminal::setup_target_for_user,
};
pub fn router() -> Router {
Router::new()
.route("/execute", post(execute_terminal))
.layer(middleware::from_fn(
authenticate_request::<KomodoAuthImpl, true>,
))
.layer(middleware::from_fn(auth_request))
}
// =================
@@ -38,11 +35,8 @@ async fn execute_terminal(
command,
init,
}): Json<ExecuteTerminalBody>,
) -> mogh_error::Result<axum::body::Body> {
info!(
"TERMINAL EXECUTE REQUEST | USER: {} ({})",
user.username, user.id
);
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute request | user: {}", user.username);
let (target, terminal, periphery) =
setup_target_for_user(target, terminal, init, &user).await?;

209
bin/core/src/api/user.rs Normal file
View File

@@ -0,0 +1,209 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{Context, anyhow};
use axum::{
Extension, Json, Router, extract::Path, middleware, routing::post,
};
use database::mongo_indexed::doc;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_bson,
};
use derive_variants::EnumVariants;
use komodo_client::entities::random_string;
use komodo_client::{
api::user::*,
entities::{api_key::ApiKey, komodo_timestamp, user::User},
};
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request, helpers::query::get_user, state::db_client,
};
use super::Variant;
pub struct UserArgs {
pub user: User,
}
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[args(UserArgs)]
#[response(Response)]
#[error(serror::Error)]
#[serde(tag = "type", content = "params")]
enum UserRequest {
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
CreateApiKey(CreateApiKey),
DeleteApiKey(DeleteApiKey),
}
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: UserRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(user, Json(req)).await
}
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
) -> serror::Result<axum::response::Response> {
let timer = Instant::now();
let req_id = Uuid::new_v4();
debug!(
"/user request {req_id} | user: {} ({})",
user.username, user.id
);
let res = request.resolve(&UserArgs { user }).await;
if let Err(e) = &res {
warn!("/user request {req_id} error: {:#}", e.error);
}
let elapsed = timer.elapsed();
debug!("/user request {req_id} | resolve time: {elapsed:?}");
res.map(|res| res.0)
}
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<UserArgs> for PushRecentlyViewed {
async fn resolve(
self,
UserArgs { user }: &UserArgs,
) -> serror::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (resource_type, id) = self.resource.extract_variant_id();
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { format!("recents.{resource_type}"): to_bson(&recents)? }
}
None => {
doc! { format!("recents.{resource_type}"): [id] }
}
};
update_one_by_id(
&db_client().users,
&user.id,
database::mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| {
format!("failed to update recents.{resource_type}")
})?;
Ok(PushRecentlyViewedResponse {})
}
}
impl Resolve<UserArgs> for SetLastSeenUpdate {
async fn resolve(
self,
UserArgs { user }: &UserArgs,
) -> serror::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().users,
&user.id,
database::mungos::update::Update::Set(doc! {
"last_update_view": komodo_timestamp()
}),
None,
)
.await
.context("failed to update user last_update_view")?;
Ok(SetLastSeenUpdateResponse {})
}
}
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<UserArgs> for CreateApiKey {
#[instrument(
"CreateApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
) -> serror::Result<CreateApiKeyResponse> {
let user = get_user(&user.id).await?;
let key = format!("K-{}", random_string(SECRET_LENGTH));
let secret = format!("S-{}", random_string(SECRET_LENGTH));
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
.context("failed at hashing secret string")?;
let api_key = ApiKey {
name: self.name,
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: komodo_timestamp(),
expires: self.expires,
};
db_client()
.api_keys
.insert_one(api_key)
.await
.context("failed to create api key on db")?;
Ok(CreateApiKeyResponse { key, secret })
}
}
impl Resolve<UserArgs> for DeleteApiKey {
#[instrument(
"DeleteApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
) -> serror::Result<DeleteApiKeyResponse> {
let client = db_client();
let key = client
.api_keys
.find_one(doc! { "key": &self.key })
.await
.context("failed at db query")?
.context("no api key with key found")?;
if user.id != key.user_id {
return Err(anyhow!("api key does not belong to user").into());
}
client
.api_keys
.delete_one(doc! { "key": key.key })
.await
.context("failed to delete api key from db")?;
Ok(DeleteApiKeyResponse {})
}
}

View File

@@ -4,7 +4,7 @@ use komodo_client::{
action::Action, permission::PermissionLevel, update::Update,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
@@ -23,7 +23,7 @@ impl Resolve<WriteArgs> for CreateAction {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Action> {
) -> serror::Result<Action> {
resource::create::<Action>(&self.name, self.config, None, user)
.await
}
@@ -42,7 +42,7 @@ impl Resolve<WriteArgs> for CopyAction {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Action> {
) -> serror::Result<Action> {
let Action { config, .. } = get_check_permissions::<Action>(
&self.id,
user,
@@ -67,7 +67,7 @@ impl Resolve<WriteArgs> for UpdateAction {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Action> {
) -> serror::Result<Action> {
Ok(resource::update::<Action>(&self.id, self.config, user).await?)
}
}
@@ -85,7 +85,7 @@ impl Resolve<WriteArgs> for RenameAction {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Action>(&self.id, &self.name, user).await?)
}
}
@@ -102,7 +102,7 @@ impl Resolve<WriteArgs> for DeleteAction {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Action> {
) -> serror::Result<Action> {
Ok(resource::delete::<Action>(&self.id, user).await?)
}
}

View File

@@ -3,9 +3,9 @@ use std::str::FromStr;
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
use komodo_client::{api::write::CloseAlert, entities::NoData};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{api::write::WriteArgs, state::db_client};

View File

@@ -4,7 +4,7 @@ use komodo_client::{
alerter::Alerter, permission::PermissionLevel, update::Update,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
@@ -23,7 +23,7 @@ impl Resolve<WriteArgs> for CreateAlerter {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Alerter> {
) -> serror::Result<Alerter> {
resource::create::<Alerter>(&self.name, self.config, None, user)
.await
}
@@ -42,7 +42,7 @@ impl Resolve<WriteArgs> for CopyAlerter {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Alerter> {
) -> serror::Result<Alerter> {
let Alerter { config, .. } = get_check_permissions::<Alerter>(
&self.id,
user,
@@ -66,7 +66,7 @@ impl Resolve<WriteArgs> for DeleteAlerter {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Alerter> {
) -> serror::Result<Alerter> {
Ok(resource::delete::<Alerter>(&self.id, user).await?)
}
}
@@ -84,7 +84,7 @@ impl Resolve<WriteArgs> for UpdateAlerter {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Alerter> {
) -> serror::Result<Alerter> {
Ok(
resource::update::<Alerter>(&self.id, self.config, user)
.await?,
@@ -105,7 +105,7 @@ impl Resolve<WriteArgs> for RenameAlerter {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Alerter>(&self.id, &self.name, user).await?)
}
}

View File

@@ -19,10 +19,10 @@ use komodo_client::{
update::Update,
},
};
use mogh_resolver::Resolve;
use periphery_client::api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
@@ -54,7 +54,7 @@ impl Resolve<WriteArgs> for CreateBuild {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Build> {
) -> serror::Result<Build> {
resource::create::<Build>(&self.name, self.config, None, user)
.await
}
@@ -73,7 +73,7 @@ impl Resolve<WriteArgs> for CopyBuild {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Build> {
) -> serror::Result<Build> {
let Build { mut config, .. } = get_check_permissions::<Build>(
&self.id,
user,
@@ -99,7 +99,7 @@ impl Resolve<WriteArgs> for DeleteBuild {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Build> {
) -> serror::Result<Build> {
Ok(resource::delete::<Build>(&self.id, user).await?)
}
}
@@ -117,7 +117,7 @@ impl Resolve<WriteArgs> for UpdateBuild {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Build> {
) -> serror::Result<Build> {
Ok(resource::update::<Build>(&self.id, self.config, user).await?)
}
}
@@ -135,7 +135,7 @@ impl Resolve<WriteArgs> for RenameBuild {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Build>(&self.id, &self.name, user).await?)
}
}
@@ -149,10 +149,7 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
build = self.build,
)
)]
async fn resolve(
self,
args: &WriteArgs,
) -> mogh_error::Result<Update> {
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
&args.user,
@@ -229,7 +226,7 @@ async fn write_dockerfile_contents_git(
args: &WriteArgs,
build: Build,
mut update: Update,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let WriteBuildFileContents { build: _, contents } = req;
let mut repo_args: RepoExecutionArgs = if !build
@@ -321,7 +318,7 @@ async fn write_dockerfile_contents_git(
return Ok(update);
}
if let Err(e) = mogh_secret_file::write_async(&full_path, &contents)
if let Err(e) = secret_file::write_async(&full_path, &contents)
.await
.with_context(|| {
format!("Failed to write dockerfile contents to {full_path:?}")
@@ -373,7 +370,7 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<NoData> {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// build should be able to do this.
let build = get_check_permissions::<Build>(

View File

@@ -4,7 +4,7 @@ use komodo_client::{
builder::Builder, permission::PermissionLevel, update::Update,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
@@ -23,7 +23,7 @@ impl Resolve<WriteArgs> for CreateBuilder {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Builder> {
) -> serror::Result<Builder> {
resource::create::<Builder>(&self.name, self.config, None, user)
.await
}
@@ -42,7 +42,7 @@ impl Resolve<WriteArgs> for CopyBuilder {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Builder> {
) -> serror::Result<Builder> {
let Builder { config, .. } = get_check_permissions::<Builder>(
&self.id,
user,
@@ -66,7 +66,7 @@ impl Resolve<WriteArgs> for DeleteBuilder {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Builder> {
) -> serror::Result<Builder> {
Ok(resource::delete::<Builder>(&self.id, user).await?)
}
}
@@ -84,7 +84,7 @@ impl Resolve<WriteArgs> for UpdateBuilder {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Builder> {
) -> serror::Result<Builder> {
Ok(
resource::update::<Builder>(&self.id, self.config, user)
.await?,
@@ -105,7 +105,7 @@ impl Resolve<WriteArgs> for RenameBuilder {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Builder>(&self.id, &self.name, user).await?)
}
}

View File

@@ -1,48 +1,33 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use futures_util::{StreamExt as _, stream::FuturesOrdered};
use komodo_client::{
api::{execute::Deploy, write::*},
api::write::*,
entities::{
Operation, ResourceTarget, SwarmOrServer,
alert::{Alert, AlertData, SeverityLevel},
Operation,
deployment::{
Deployment, DeploymentImage, DeploymentInfo, DeploymentState,
PartialDeploymentConfig, RestartMode, extract_registry_domain,
Deployment, DeploymentImage, DeploymentState,
PartialDeploymentConfig, RestartMode,
},
docker::container::RestartPolicyNameEnum,
komodo_timestamp, optional_string,
komodo_timestamp,
permission::PermissionLevel,
server::{Server, ServerState},
to_container_compatible_name,
update::Update,
user::{auto_redeploy_user, system_user},
},
};
use mogh_cache::SetCache;
use mogh_resolver::Resolve;
use periphery_client::api::{self, container::InspectContainer};
use resolver_api::Resolve;
use crate::{
alert::send_alerts,
api::execute::{self, ExecuteRequest, ExecutionResult},
helpers::{
periphery_client,
query::{get_deployment_state, get_swarm_or_server},
registry_token,
update::{add_update, make_update, poll_update_until_complete},
query::get_deployment_state,
update::{add_update, make_update},
},
permission::get_check_permissions,
resource::{
self, list_full_for_user_using_pattern,
setup_deployment_execution,
},
state::{
action_states, db_client, deployment_status_cache,
image_digest_cache, server_status_cache,
},
resource,
state::{action_states, db_client, server_status_cache},
};
use super::WriteArgs;
@@ -60,7 +45,7 @@ impl Resolve<WriteArgs> for CreateDeployment {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Deployment> {
) -> serror::Result<Deployment> {
resource::create::<Deployment>(
&self.name,
self.config,
@@ -84,7 +69,7 @@ impl Resolve<WriteArgs> for CopyDeployment {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Deployment> {
) -> serror::Result<Deployment> {
let Deployment { config, .. } =
get_check_permissions::<Deployment>(
&self.id,
@@ -115,7 +100,7 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Deployment> {
) -> serror::Result<Deployment> {
let server = get_check_permissions::<Server>(
&self.server,
user,
@@ -216,7 +201,7 @@ impl Resolve<WriteArgs> for DeleteDeployment {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Deployment> {
) -> serror::Result<Deployment> {
Ok(resource::delete::<Deployment>(&self.id, user).await?)
}
}
@@ -234,35 +219,11 @@ impl Resolve<WriteArgs> for UpdateDeployment {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Deployment> {
// If the update changes image,
// also update the stored latest image digest.
let image_update = self
.config
.image
.as_ref()
.map(|image| image.as_image().is_some())
.unwrap_or_default();
let deployment =
) -> serror::Result<Deployment> {
Ok(
resource::update::<Deployment>(&self.id, self.config, user)
.await?;
if image_update {
tokio::spawn(async move {
let _ = (CheckDeploymentForUpdate {
deployment: self.id,
skip_auto_update: false,
wait_for_auto_update: false,
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await;
});
}
Ok(deployment)
.await?,
)
}
}
@@ -279,7 +240,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let deployment = get_check_permissions::<Deployment>(
&self.id,
user,
@@ -353,322 +314,3 @@ impl Resolve<WriteArgs> for RenameDeployment {
Ok(update)
}
}
//
impl Resolve<WriteArgs> for CheckDeploymentForUpdate {
#[instrument(
"CheckDeploymentForUpdate",
skip_all,
fields(
operator = user.id,
deployment = self.deployment,
skip_auto_update = self.skip_auto_update,
wait_for_auto_update = self.wait_for_auto_update,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Self::Response> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// deployment should be able to do this.
let (deployment, swarm_or_server) = setup_deployment_execution(
&self.deployment,
user,
PermissionLevel::Execute.into(),
)
.await?;
swarm_or_server.verify_has_target()?;
check_deployment_for_update_inner(
deployment,
&swarm_or_server,
self.skip_auto_update,
self.wait_for_auto_update,
)
.await
.map_err(Into::into)
}
}
/// If it goes down the "update available" path,
/// only send alert if deployment id is not in this cache.
/// If alert is sent, add ID to cache.
/// If later it goes down non "update available" path,
/// remove the id from cache, so next time it does another alert
/// will be sent.
fn deployment_alert_sent_cache() -> &'static SetCache<String> {
static CACHE: OnceLock<SetCache<String>> = OnceLock::new();
CACHE.get_or_init(Default::default)
}
/// Checks remote registry for latest image digest,
/// and saves it to database associated with the deployment.
///
/// Returns true if update is available and auto deploy is false.
/// If auto deploy is true, this will deploy.
#[instrument(
"CheckDeploymentForUpdateInner",
skip_all,
fields(
deployment = deployment.id,
skip_auto_update,
wait_for_auto_update,
)
)]
pub async fn check_deployment_for_update_inner(
deployment: Deployment,
swarm_or_server: &SwarmOrServer,
skip_auto_update: bool,
// Otherwise spawns task to run in background
wait_for_auto_update: bool,
) -> anyhow::Result<CheckDeploymentForUpdateResponse> {
let alert_cache = deployment_alert_sent_cache();
let (image, account, token) = match &deployment.config.image {
DeploymentImage::Image { image } => {
if image.contains('@') {
// Images with a hardcoded digest can't have update.
return Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: false,
});
}
let domain = extract_registry_domain(image)?;
let account =
optional_string(&deployment.config.image_registry_account);
let token = if let Some(account) = &account {
registry_token(&domain, account).await?
} else {
None
};
(image, account, token)
}
DeploymentImage::Build { .. } => {
alert_cache.remove(&deployment.id).await;
// This method not used for build based deployments
// as deployed version vs built version can be inferred from Updates.
return Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: false,
});
}
};
let latest_digest = image_digest_cache()
.get(swarm_or_server, image, account, token)
.await?;
resource::update_info::<Deployment>(
&deployment.id,
&DeploymentInfo {
latest_image_digest: latest_digest.clone(),
},
)
.await?;
let Some((state, Some(current_digests))) =
deployment_status_cache()
.get(&deployment.id)
.await
.map(|s| (s.curr.state, s.curr.image_digests.clone()))
else {
alert_cache.remove(&deployment.id).await;
return Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: false,
});
};
// If not running or latest digest matches current, early return
if !matches!(state, DeploymentState::Running)
|| !latest_digest.update_available(&current_digests)
{
alert_cache.remove(&deployment.id).await;
return Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: false,
});
}
if !skip_auto_update && deployment.config.auto_update {
// Trigger deploy + alert
// Conservatively remove from alert cache so 'skip_auto_update'
// doesn't cause alerts not to be sent on subsequent calls.
alert_cache.remove(&deployment.id).await;
let swarm_id = swarm_or_server.swarm_id().map(str::to_string);
let swarm_name = swarm_or_server.swarm_name().map(str::to_string);
let server_id = swarm_or_server.server_id().map(str::to_string);
let server_name =
swarm_or_server.server_name().map(str::to_string);
let id = deployment.id.clone();
let name = deployment.name.clone();
let image = image.clone();
let run = async move {
match execute::inner_handler(
ExecuteRequest::Deploy(Deploy {
deployment: name.clone(),
stop_signal: None,
stop_time: None,
}),
auto_redeploy_user().to_owned(),
)
.await
{
Ok(res) => {
let ExecutionResult::Single(update) = res else {
unreachable!()
};
let Ok(update) =
poll_update_until_complete(&update.id).await
else {
return;
};
if update.success {
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Deployment(id.clone()),
data: AlertData::DeploymentAutoUpdated {
id,
name,
swarm_id,
swarm_name,
server_id,
server_name,
image,
},
};
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!(
"Failed to record DeploymentAutoUpdated to db | {e:#}"
);
}
send_alerts(&[alert]).await;
}
}
Err(e) => {
warn!("Failed to auto update Deployment {name} | {e:#}",)
}
}
};
if wait_for_auto_update {
run.await
} else {
tokio::spawn(run);
}
} else {
// Avoid spamming alerts
if alert_cache.contains(&deployment.id).await {
return Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: true,
});
}
alert_cache.insert(deployment.id.clone()).await;
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Deployment(deployment.id.clone()),
data: AlertData::DeploymentImageUpdateAvailable {
id: deployment.id.clone(),
name: deployment.name.clone(),
swarm_id: swarm_or_server.swarm_id().map(str::to_string),
swarm_name: swarm_or_server.swarm_name().map(str::to_string),
server_id: swarm_or_server.server_id().map(str::to_string),
server_name: swarm_or_server
.server_name()
.map(str::to_string),
image: image.clone(),
},
};
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!(
"Failed to record DeploymentImageUpdateAvailable to db | {e:#}"
);
}
send_alerts(&[alert]).await;
}
Ok(CheckDeploymentForUpdateResponse {
deployment: deployment.id,
update_available: !deployment.config.auto_update,
})
}
//
impl Resolve<WriteArgs> for BatchCheckDeploymentForUpdate {
#[instrument(
"BatchCheckDeploymentForUpdate",
skip_all,
fields(
operator = user.id,
pattern = self.pattern,
skip_auto_update = self.skip_auto_update,
wait_for_auto_update = self.wait_for_auto_update,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let deployments = list_full_for_user_using_pattern::<Deployment>(
&self.pattern,
Default::default(),
user,
PermissionLevel::Execute.into(),
&[],
)
.await?;
let res = deployments
.into_iter()
.map(|deployment| async move {
let swarm_or_server = get_swarm_or_server(
&deployment.config.swarm_id,
&deployment.config.server_id,
)
.await?;
swarm_or_server.verify_has_target().map_err(|e| e.error)?;
check_deployment_for_update_inner(
deployment,
&swarm_or_server,
self.skip_auto_update,
self.wait_for_auto_update,
)
.await
})
.collect::<FuturesOrdered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.filter_map(|res| {
res
.inspect_err(|e| {
warn!(
"Failed to check deployment for update in batch run | {e:#}"
)
})
.ok()
})
.collect();
Ok(res)
}
}

View File

@@ -2,19 +2,18 @@ use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::write::*, entities::user::User};
use mogh_auth_server::middleware::authenticate_request;
use mogh_error::Json;
use mogh_error::Response;
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use strum::Display;
use strum::EnumDiscriminants;
use typeshare::typeshare;
use uuid::Uuid;
use crate::auth::KomodoAuthImpl;
use crate::auth::auth_request;
use super::Variant;
@@ -24,7 +23,7 @@ mod alerter;
mod build;
mod builder;
mod deployment;
mod onboarding;
mod onboarding_key;
mod permissions;
mod procedure;
mod provider;
@@ -33,7 +32,6 @@ mod resource;
mod server;
mod service_user;
mod stack;
mod swarm;
mod sync;
mod tag;
mod terminal;
@@ -41,35 +39,50 @@ mod user;
mod user_group;
mod variable;
pub use {
deployment::check_deployment_for_update_inner,
stack::check_stack_for_update_inner,
};
pub struct WriteArgs {
pub user: User,
}
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumDiscriminants,
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[strum_discriminants(name(WriteRequestMethod), derive(Display))]
#[variant_derive(Debug, Display)]
#[args(WriteArgs)]
#[response(Response)]
#[error(mogh_error::Error)]
#[error(serror::Error)]
#[serde(tag = "type", content = "params")]
pub enum WriteRequest {
// ==== USER ====
CreateLocalUser(CreateLocalUser),
UpdateUserUsername(UpdateUserUsername),
UpdateUserPassword(UpdateUserPassword),
DeleteUser(DeleteUser),
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER GROUP ====
CreateUserGroup(CreateUserGroup),
RenameUserGroup(RenameUserGroup),
DeleteUserGroup(DeleteUserGroup),
AddUserToUserGroup(AddUserToUserGroup),
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
SetUsersInUserGroup(SetUsersInUserGroup),
SetEveryoneUserGroup(SetEveryoneUserGroup),
// ==== PERMISSIONS ====
UpdateUserAdmin(UpdateUserAdmin),
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== RESOURCE ====
UpdateResourceMeta(UpdateResourceMeta),
// ==== SWARM ====
CreateSwarm(CreateSwarm),
CopySwarm(CopySwarm),
DeleteSwarm(DeleteSwarm),
UpdateSwarm(UpdateSwarm),
RenameSwarm(RenameSwarm),
// ==== SERVER ====
CreateServer(CreateServer),
CopyServer(CopyServer),
@@ -80,12 +93,6 @@ pub enum WriteRequest {
UpdateServerPublicKey(UpdateServerPublicKey),
RotateServerKeys(RotateServerKeys),
// ==== TERMINAL ====
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
// ==== STACK ====
CreateStack(CreateStack),
CopyStack(CopyStack),
@@ -94,8 +101,6 @@ pub enum WriteRequest {
RenameStack(RenameStack),
WriteStackFileContents(WriteStackFileContents),
RefreshStackCache(RefreshStackCache),
CheckStackForUpdate(CheckStackForUpdate),
BatchCheckStackForUpdate(BatchCheckStackForUpdate),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
@@ -104,8 +109,6 @@ pub enum WriteRequest {
DeleteDeployment(DeleteDeployment),
UpdateDeployment(UpdateDeployment),
RenameDeployment(RenameDeployment),
CheckDeploymentForUpdate(CheckDeploymentForUpdate),
BatchCheckDeploymentForUpdate(BatchCheckDeploymentForUpdate),
// ==== BUILD ====
CreateBuild(CreateBuild),
@@ -116,6 +119,13 @@ pub enum WriteRequest {
WriteBuildFileContents(WriteBuildFileContents),
RefreshBuildCache(RefreshBuildCache),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
CopyBuilder(CopyBuilder),
DeleteBuilder(DeleteBuilder),
UpdateBuilder(UpdateBuilder),
RenameBuilder(RenameBuilder),
// ==== REPO ====
CreateRepo(CreateRepo),
CopyRepo(CopyRepo),
@@ -124,6 +134,13 @@ pub enum WriteRequest {
RenameRepo(RenameRepo),
RefreshRepoCache(RefreshRepoCache),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
CopyAlerter(CopyAlerter),
DeleteAlerter(DeleteAlerter),
UpdateAlerter(UpdateAlerter),
RenameAlerter(RenameAlerter),
// ==== PROCEDURE ====
CreateProcedure(CreateProcedure),
CopyProcedure(CopyProcedure),
@@ -148,51 +165,11 @@ pub enum WriteRequest {
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
CopyBuilder(CopyBuilder),
DeleteBuilder(DeleteBuilder),
UpdateBuilder(UpdateBuilder),
RenameBuilder(RenameBuilder),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
CopyAlerter(CopyAlerter),
DeleteAlerter(DeleteAlerter),
UpdateAlerter(UpdateAlerter),
RenameAlerter(RenameAlerter),
// ==== ONBOARDING KEY ====
CreateOnboardingKey(CreateOnboardingKey),
UpdateOnboardingKey(UpdateOnboardingKey),
DeleteOnboardingKey(DeleteOnboardingKey),
// ==== USER ====
PushRecentlyViewed(PushRecentlyViewed),
SetLastSeenUpdate(SetLastSeenUpdate),
CreateLocalUser(CreateLocalUser),
DeleteUser(DeleteUser),
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
// ==== USER GROUP ====
CreateUserGroup(CreateUserGroup),
RenameUserGroup(RenameUserGroup),
DeleteUserGroup(DeleteUserGroup),
AddUserToUserGroup(AddUserToUserGroup),
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
SetUsersInUserGroup(SetUsersInUserGroup),
SetEveryoneUserGroup(SetEveryoneUserGroup),
// ==== PERMISSIONS ====
UpdateUserAdmin(UpdateUserAdmin),
UpdateUserBasePermissions(UpdateUserBasePermissions),
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== TERMINAL ====
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
// ==== TAG ====
CreateTag(CreateTag),
@@ -215,6 +192,11 @@ pub enum WriteRequest {
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
// ==== ONBOARDING KEY ====
CreateOnboardingKey(CreateOnboardingKey),
UpdateOnboardingKey(UpdateOnboardingKey),
DeleteOnboardingKey(DeleteOnboardingKey),
// ==== ALERT ====
CloseAlert(CloseAlert),
}
@@ -223,16 +205,14 @@ pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(
authenticate_request::<KomodoAuthImpl, true>,
))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> mogh_error::Result<axum::response::Response> {
) -> serror::Result<axum::response::Response> {
let req: WriteRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
@@ -243,8 +223,10 @@ async fn variant_handler(
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<WriteRequest>,
) -> mogh_error::Result<axum::response::Response> {
let res = tokio::spawn(task(request, user))
) -> serror::Result<axum::response::Response> {
let req_id = Uuid::new_v4();
let res = tokio::spawn(task(req_id, request, user))
.await
.context("failure in spawned task");
@@ -252,38 +234,18 @@ async fn handler(
}
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> mogh_error::Result<axum::response::Response> {
let task_id = Uuid::new_v4();
let method: WriteRequestMethod = (&request).into();
let user_id = user.id.clone();
let username = user.username.clone();
if !matches!(
request,
WriteRequest::SetLastSeenUpdate(_)
| WriteRequest::PushRecentlyViewed(_)
) {
info!(
task_id = task_id.to_string(),
method = method.to_string(),
user_id,
username,
"WRITE REQUEST",
);
}
) -> serror::Result<axum::response::Response> {
let variant = request.extract_variant();
info!("/write request | {variant} | user: {}", user.username);
let res = request.resolve(&WriteArgs { user }).await;
if let Err(e) = &res {
warn!(
task_id = task_id.to_string(),
method = method.to_string(),
user_id,
username,
"WRITE REQUEST | ERROR: {:#}",
"/write request {req_id} | {variant} | error: {:#}",
e.error
);
}

View File

@@ -10,15 +10,12 @@ use komodo_client::{
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
},
};
use mogh_error::{AddStatusCode, AddStatusCodeError};
use mogh_pki::EncodedKeyPair;
use mogh_resolver::Resolve;
use noise::key::EncodedKeyPair;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::{AddStatusCode, AddStatusCodeError};
use crate::{
api::write::WriteArgs, helpers::query::get_all_tags,
state::db_client,
};
use crate::{api::write::WriteArgs, state::db_client};
//
@@ -38,7 +35,7 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<CreateOnboardingKeyResponse> {
) -> serror::Result<CreateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -48,29 +45,11 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
let private_key = if let Some(private_key) = self.private_key {
private_key
} else {
format!("O_{}_O", random_string(28))
};
let public_key = EncodedKeyPair::from_private_key(
mogh_pki::PkiKind::Mutual,
&private_key,
)?
.public
.into_inner();
let tags = if self.tags.is_empty() {
self.tags
} else {
// fix_tags by ensuring existence, and force replace with tag name.
let all_tags = get_all_tags(None).await?;
self
.tags
.into_iter()
.filter_map(|tag| {
let tag =
all_tags.iter().find(|t| t.id == tag || t.name == tag)?;
Some(tag.name.clone())
})
.collect()
format!("O-{}", random_string(30))
};
let public_key = EncodedKeyPair::from_private_key(&private_key)?
.public
.into_inner();
let onboarding_key = OnboardingKey {
public_key,
name: self.name,
@@ -78,8 +57,7 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
onboarded: Default::default(),
created_at: komodo_timestamp(),
expires: self.expires,
tags,
privileged: self.privileged,
tags: self.tags,
copy_server: self.copy_server,
create_builder: self.create_builder,
};
@@ -121,7 +99,7 @@ impl Resolve<WriteArgs> for UpdateOnboardingKey {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UpdateOnboardingKeyResponse> {
) -> serror::Result<UpdateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -157,28 +135,9 @@ impl Resolve<WriteArgs> for UpdateOnboardingKey {
}
if let Some(tags) = self.tags {
let tags = if tags.is_empty() {
tags
} else {
// fix_tags by ensuring existence, and force replace with tag name.
let all_tags = get_all_tags(None).await?;
tags
.into_iter()
.filter_map(|tag| {
let tag = all_tags
.iter()
.find(|t| t.id == tag || t.name == tag)?;
Some(tag.name.clone())
})
.collect()
};
update.insert("tags", tags);
}
if let Some(privileged) = self.privileged {
update.insert("privileged", privileged);
}
if let Some(copy_server) = self.copy_server {
update.insert("copy_server", copy_server);
}
@@ -217,7 +176,7 @@ impl Resolve<WriteArgs> for DeleteOnboardingKey {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<DeleteOnboardingKeyResponse> {
) -> serror::Result<DeleteOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")

View File

@@ -8,6 +8,7 @@ use database::mungos::{
options::UpdateOptions,
},
};
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::*,
entities::{
@@ -15,7 +16,7 @@ use komodo_client::{
permission::{UserTarget, UserTargetVariant},
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{helpers::query::get_user, state::db_client};
@@ -34,7 +35,7 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
async fn resolve(
self,
WriteArgs { user: super_admin }: &WriteArgs,
) -> mogh_error::Result<UpdateUserAdminResponse> {
) -> serror::Result<UpdateUserAdminResponse> {
if !super_admin.super_admin {
return Err(
anyhow!("Only super admins can call this method.").into(),
@@ -82,7 +83,7 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UpdateUserBasePermissionsResponse> {
) -> serror::Result<UpdateUserBasePermissionsResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
@@ -148,7 +149,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UpdatePermissionOnResourceTypeResponse> {
) -> serror::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
@@ -226,7 +227,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UpdatePermissionOnTargetResponse> {
) -> serror::Result<UpdatePermissionOnTargetResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
@@ -295,7 +296,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
/// checks if inner id is actually a `name`, and replaces it with id if so.
async fn extract_user_target_with_validation(
user_target: &UserTarget,
) -> mogh_error::Result<(UserTargetVariant, String)> {
) -> serror::Result<(UserTargetVariant, String)> {
match user_target {
UserTarget::User(ident) => {
let filter = match ObjectId::from_str(ident) {
@@ -306,8 +307,8 @@ async fn extract_user_target_with_validation(
.users
.find_one(filter)
.await
.context("Failed to query db for users")?
.context("No matching user found")?
.context("failed to query db for users")?
.context("no matching user found")?
.id;
Ok((UserTargetVariant::User, id))
}
@@ -320,8 +321,8 @@ async fn extract_user_target_with_validation(
.user_groups
.find_one(filter)
.await
.context("Failed to query db for user_groups")?
.context("No matching user_group found")?
.context("failed to query db for user_groups")?
.context("no matching user_group found")?
.id;
Ok((UserTargetVariant::UserGroup, id))
}
@@ -331,68 +332,12 @@ async fn extract_user_target_with_validation(
/// checks if inner id is actually a `name`, and replaces it with id if so.
async fn extract_resource_target_with_validation(
resource_target: &ResourceTarget,
) -> mogh_error::Result<(ResourceTargetVariant, String)> {
) -> serror::Result<(ResourceTargetVariant, String)> {
match resource_target {
ResourceTarget::System(_) => {
let res = resource_target.extract_variant_id();
Ok((res.0, res.1.clone()))
}
ResourceTarget::Swarm(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.swarms
.find_one(filter)
.await
.context("Failed to query db for swarms")?
.context("No matching server found")?
.id;
Ok((ResourceTargetVariant::Server, id))
}
ResourceTarget::Server(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.servers
.find_one(filter)
.await
.context("Failed to query db for servers")?
.context("No matching server found")?
.id;
Ok((ResourceTargetVariant::Server, id))
}
ResourceTarget::Stack(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.stacks
.find_one(filter)
.await
.context("Failed to query db for stacks")?
.context("No matching stack found")?
.id;
Ok((ResourceTargetVariant::Stack, id))
}
ResourceTarget::Deployment(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.deployments
.find_one(filter)
.await
.context("Failed to query db for deployments")?
.context("No matching deployment found")?
.id;
Ok((ResourceTargetVariant::Deployment, id))
}
ResourceTarget::Build(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
@@ -402,11 +347,53 @@ async fn extract_resource_target_with_validation(
.builds
.find_one(filter)
.await
.context("Failed to query db for builds")?
.context("No matching build found")?
.context("failed to query db for builds")?
.context("no matching build found")?
.id;
Ok((ResourceTargetVariant::Build, id))
}
ResourceTarget::Builder(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.builders
.find_one(filter)
.await
.context("failed to query db for builders")?
.context("no matching builder found")?
.id;
Ok((ResourceTargetVariant::Builder, id))
}
ResourceTarget::Deployment(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.deployments
.find_one(filter)
.await
.context("failed to query db for deployments")?
.context("no matching deployment found")?
.id;
Ok((ResourceTargetVariant::Deployment, id))
}
ResourceTarget::Server(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.servers
.find_one(filter)
.await
.context("failed to query db for servers")?
.context("no matching server found")?
.id;
Ok((ResourceTargetVariant::Server, id))
}
ResourceTarget::Repo(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
@@ -416,11 +403,25 @@ async fn extract_resource_target_with_validation(
.repos
.find_one(filter)
.await
.context("Failed to query db for repos")?
.context("No matching repo found")?
.context("failed to query db for repos")?
.context("no matching repo found")?
.id;
Ok((ResourceTargetVariant::Repo, id))
}
ResourceTarget::Alerter(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.alerters
.find_one(filter)
.await
.context("failed to query db for alerters")?
.context("no matching alerter found")?
.id;
Ok((ResourceTargetVariant::Alerter, id))
}
ResourceTarget::Procedure(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
@@ -430,8 +431,8 @@ async fn extract_resource_target_with_validation(
.procedures
.find_one(filter)
.await
.context("Failed to query db for procedures")?
.context("No matching procedure found")?
.context("failed to query db for procedures")?
.context("no matching procedure found")?
.id;
Ok((ResourceTargetVariant::Procedure, id))
}
@@ -444,8 +445,8 @@ async fn extract_resource_target_with_validation(
.actions
.find_one(filter)
.await
.context("Failed to query db for actions")?
.context("No matching action found")?
.context("failed to query db for actions")?
.context("no matching action found")?
.id;
Ok((ResourceTargetVariant::Action, id))
}
@@ -458,38 +459,24 @@ async fn extract_resource_target_with_validation(
.resource_syncs
.find_one(filter)
.await
.context("Failed to query db for resource syncs")?
.context("No matching resource sync found")?
.context("failed to query db for resource syncs")?
.context("no matching resource sync found")?
.id;
Ok((ResourceTargetVariant::ResourceSync, id))
}
ResourceTarget::Builder(ident) => {
ResourceTarget::Stack(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.builders
.stacks
.find_one(filter)
.await
.context("Failed to query db for builders")?
.context("No matching builder found")?
.context("failed to query db for stacks")?
.context("no matching stack found")?
.id;
Ok((ResourceTargetVariant::Builder, id))
}
ResourceTarget::Alerter(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.alerters
.find_one(filter)
.await
.context("Failed to query db for alerters")?
.context("No matching alerter found")?
.id;
Ok((ResourceTargetVariant::Alerter, id))
Ok((ResourceTargetVariant::Stack, id))
}
}
}

View File

@@ -4,7 +4,7 @@ use komodo_client::{
permission::PermissionLevel, procedure::Procedure, update::Update,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
@@ -23,7 +23,7 @@ impl Resolve<WriteArgs> for CreateProcedure {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateProcedureResponse> {
) -> serror::Result<CreateProcedureResponse> {
resource::create::<Procedure>(&self.name, self.config, None, user)
.await
}
@@ -42,7 +42,7 @@ impl Resolve<WriteArgs> for CopyProcedure {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CopyProcedureResponse> {
) -> serror::Result<CopyProcedureResponse> {
let Procedure { config, .. } =
get_check_permissions::<Procedure>(
&self.id,
@@ -73,7 +73,7 @@ impl Resolve<WriteArgs> for UpdateProcedure {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateProcedureResponse> {
) -> serror::Result<UpdateProcedureResponse> {
Ok(
resource::update::<Procedure>(&self.id, self.config, user)
.await?,
@@ -94,7 +94,7 @@ impl Resolve<WriteArgs> for RenameProcedure {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(
resource::rename::<Procedure>(&self.id, &self.name, user)
.await?,
@@ -114,7 +114,7 @@ impl Resolve<WriteArgs> for DeleteProcedure {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<DeleteProcedureResponse> {
) -> serror::Result<DeleteProcedureResponse> {
Ok(resource::delete::<Procedure>(&self.id, user).await?)
}
}

View File

@@ -10,9 +10,9 @@ use komodo_client::{
provider::{DockerRegistryAccount, GitProviderAccount},
},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
helpers::update::{add_update, make_update},
@@ -35,7 +35,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateGitProviderAccountResponse> {
) -> serror::Result<CreateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can create git provider accounts")
@@ -111,7 +111,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateGitProviderAccountResponse> {
) -> serror::Result<UpdateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can update git provider accounts")
@@ -199,7 +199,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<DeleteGitProviderAccountResponse> {
) -> serror::Result<DeleteGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can delete git provider accounts")
@@ -261,7 +261,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateDockerRegistryAccountResponse> {
) -> serror::Result<CreateDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!(
@@ -340,7 +340,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateDockerRegistryAccountResponse> {
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can update docker registry accounts")
@@ -435,7 +435,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<DeleteDockerRegistryAccountResponse> {
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("Only admins can delete docker registry accounts")

View File

@@ -15,8 +15,8 @@ use komodo_client::{
update::{Log, Update},
},
};
use mogh_resolver::Resolve;
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
config::core_config,
@@ -44,7 +44,7 @@ impl Resolve<WriteArgs> for CreateRepo {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Repo> {
) -> serror::Result<Repo> {
resource::create::<Repo>(&self.name, self.config, None, user)
.await
}
@@ -63,7 +63,7 @@ impl Resolve<WriteArgs> for CopyRepo {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Repo> {
) -> serror::Result<Repo> {
let Repo { config, .. } = get_check_permissions::<Repo>(
&self.id,
user,
@@ -87,7 +87,7 @@ impl Resolve<WriteArgs> for DeleteRepo {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Repo> {
) -> serror::Result<Repo> {
Ok(resource::delete::<Repo>(&self.id, user).await?)
}
}
@@ -105,7 +105,7 @@ impl Resolve<WriteArgs> for UpdateRepo {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Repo> {
) -> serror::Result<Repo> {
Ok(resource::update::<Repo>(&self.id, self.config, user).await?)
}
}
@@ -123,7 +123,7 @@ impl Resolve<WriteArgs> for RenameRepo {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
&self.id,
user,
@@ -199,7 +199,7 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<NoData> {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// repo should be able to do this.
let repo = get_check_permissions::<Repo>(

View File

@@ -1,16 +1,16 @@
use anyhow::anyhow;
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, stack::Stack, swarm::Swarm,
sync::ResourceSync,
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::resource::{self, ResourceMetaUpdate};
@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
async fn resolve(
self,
args: &WriteArgs,
) -> mogh_error::Result<UpdateResourceMetaResponse> {
) -> serror::Result<UpdateResourceMetaResponse> {
let meta = ResourceMetaUpdate {
description: self.description,
template: self.template,
@@ -45,15 +45,9 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
.status_code(StatusCode::BAD_REQUEST),
);
}
ResourceTarget::Swarm(id) => {
resource::update_meta::<Swarm>(&id, meta, args).await?;
}
ResourceTarget::Server(id) => {
resource::update_meta::<Server>(&id, meta, args).await?;
}
ResourceTarget::Stack(id) => {
resource::update_meta::<Stack>(&id, meta, args).await?;
}
ResourceTarget::Deployment(id) => {
resource::update_meta::<Deployment>(&id, meta, args).await?;
}
@@ -63,6 +57,12 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
ResourceTarget::Repo(id) => {
resource::update_meta::<Repo>(&id, meta, args).await?;
}
ResourceTarget::Builder(id) => {
resource::update_meta::<Builder>(&id, meta, args).await?;
}
ResourceTarget::Alerter(id) => {
resource::update_meta::<Alerter>(&id, meta, args).await?;
}
ResourceTarget::Procedure(id) => {
resource::update_meta::<Procedure>(&id, meta, args).await?;
}
@@ -73,11 +73,8 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
resource::update_meta::<ResourceSync>(&id, meta, args)
.await?;
}
ResourceTarget::Builder(id) => {
resource::update_meta::<Builder>(&id, meta, args).await?;
}
ResourceTarget::Alerter(id) => {
resource::update_meta::<Alerter>(&id, meta, args).await?;
ResourceTarget::Stack(id) => {
resource::update_meta::<Stack>(&id, meta, args).await?;
}
}
Ok(UpdateResourceMetaResponse {})

View File

@@ -10,8 +10,8 @@ use komodo_client::{
update::{Update, UpdateStatus},
},
};
use mogh_resolver::Resolve;
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
@@ -37,7 +37,7 @@ impl Resolve<WriteArgs> for CreateServer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Server> {
) -> serror::Result<Server> {
resource::create::<Server>(
&self.name,
self.config,
@@ -64,7 +64,7 @@ impl Resolve<WriteArgs> for CopyServer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Server> {
) -> serror::Result<Server> {
let Server { config, .. } = get_check_permissions::<Server>(
&self.id,
user,
@@ -97,7 +97,7 @@ impl Resolve<WriteArgs> for DeleteServer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Server> {
) -> serror::Result<Server> {
Ok(resource::delete::<Server>(&self.id, user).await?)
}
}
@@ -115,7 +115,7 @@ impl Resolve<WriteArgs> for UpdateServer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Server> {
) -> serror::Result<Server> {
Ok(resource::update::<Server>(&self.id, self.config, user).await?)
}
}
@@ -133,7 +133,7 @@ impl Resolve<WriteArgs> for RenameServer {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Server>(&self.id, &self.name, user).await?)
}
}
@@ -151,7 +151,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
user,

View File

@@ -1,22 +1,20 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use database::mungos::{by_id::find_one_by_id, mongodb::bson::doc};
use database::mungos::{
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use komodo_client::{
api::write::*,
api::{user::CreateApiKey, write::*},
entities::{
komodo_timestamp,
user::{NewUserParams, User, UserConfig},
user::{User, UserConfig},
},
};
use mogh_auth_client::api::manage::CreateApiKey;
use mogh_auth_server::api::manage::api_key::create_api_key;
use mogh_error::{AddStatusCode as _, AddStatusCodeError as _};
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use crate::{
auth::KomodoAuthImpl, helpers::validations::validate_username,
state::db_client,
};
use crate::{api::user::UserArgs, state::db_client};
use super::WriteArgs;
@@ -33,30 +31,32 @@ impl Resolve<WriteArgs> for CreateServiceUser {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateServiceUserResponse> {
) -> serror::Result<CreateServiceUserResponse> {
if !user.admin {
return Err(anyhow!("user not admin").into());
}
if ObjectId::from_str(&self.username).is_ok() {
return Err(
anyhow!("Only Admins can manage Service Users")
.status_code(StatusCode::FORBIDDEN),
anyhow!("username cannot be valid ObjectId").into(),
);
}
validate_username(&self.username)
.status_code(StatusCode::BAD_REQUEST)?;
let config = UserConfig::Service {
description: self.description,
};
let mut user = User::new(NewUserParams {
let mut user = User {
id: Default::default(),
username: self.username,
config,
enabled: true,
admin: false,
super_admin: false,
config,
create_server_permissions: false,
create_build_permissions: false,
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
updated_at: komodo_timestamp(),
});
};
user.id = db_client()
.users
.insert_one(&user)
@@ -66,7 +66,6 @@ impl Resolve<WriteArgs> for CreateServiceUser {
.as_object_id()
.context("inserted id is not object id")?
.to_string();
Ok(user)
}
}
@@ -84,30 +83,20 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateServiceUserDescriptionResponse> {
) -> serror::Result<UpdateServiceUserDescriptionResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can manage Service Users")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user not admin").into());
}
let db = db_client();
let service_user = db
.users
.find_one(doc! { "username": &self.username })
.await
.context("Failed to query db for user")?
.context("No user with given username")?;
.context("failed to query db for user")?
.context("no user with given username")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(
anyhow!("Target user is not Service User")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user is not service user").into());
};
db.users
.update_one(
doc! { "username": &self.username },
@@ -115,15 +104,13 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
)
.await
.context("failed to update user on db")?;
let service_user = db
let res = db
.users
.find_one(doc! { "username": &self.username })
.await
.context("failed to query db for user")?
.context("user with username not found")?;
Ok(service_user)
Ok(res)
}
}
@@ -141,35 +128,23 @@ impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateApiKeyForServiceUserResponse> {
) -> serror::Result<CreateApiKeyForServiceUserResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can manage Service Users")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user not admin").into());
}
let service_user =
find_one_by_id(&db_client().users, &self.user_id)
.await
.context("Failed to query db for user")?
.context("No user found with id")?;
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(
anyhow!("Target user is not Service User")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user is not service user").into());
};
create_api_key(
&KomodoAuthImpl,
service_user.id,
CreateApiKey {
name: self.name,
expires: self.expires as u64,
},
)
CreateApiKey {
name: self.name,
expires: self.expires,
}
.resolve(&UserArgs { user: service_user })
.await
}
}
@@ -186,36 +161,25 @@ impl Resolve<WriteArgs> for DeleteApiKeyForServiceUser {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<DeleteApiKeyForServiceUserResponse> {
) -> serror::Result<DeleteApiKeyForServiceUserResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can manage Service Users")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user not admin").into());
}
let db = db_client();
let api_key = db
.api_keys
.find_one(doc! { "key": &self.key })
.await
.context("failed to query db for api key")?
.context("did not find matching api key")?;
let service_user =
find_one_by_id(&db_client().users, &api_key.user_id)
.await
.context("failed to query db for user")?
.context("no user found with id")?;
let UserConfig::Service { .. } = &service_user.config else {
return Err(
anyhow!("Target user is not Service User")
.status_code(StatusCode::FORBIDDEN),
);
return Err(anyhow!("user is not service user").into());
};
db.api_keys
.delete_one(doc! { "key": self.key })
.await

View File

@@ -1,55 +1,42 @@
use std::{collections::HashMap, path::PathBuf, sync::OnceLock};
use std::path::PathBuf;
use anyhow::{Context, anyhow};
use database::{
bson::to_bson,
mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
},
};
use database::mungos::mongodb::bson::{doc, to_document};
use formatting::format_serror;
use futures_util::{StreamExt as _, stream::FuturesOrdered};
use komodo_client::{
api::{execute::DeployStack, write::*},
api::write::*,
entities::{
FileContents, NoData, Operation, RepoExecutionArgs,
ResourceTarget, SwarmOrServer,
alert::{Alert, AlertData, SeverityLevel},
all_logs_success, komodo_timestamp,
all_logs_success,
permission::PermissionLevel,
repo::Repo,
stack::{Stack, StackInfo, StackServiceWithUpdate, StackState},
server::ServerState,
stack::{Stack, StackInfo},
update::Update,
user::{auto_redeploy_user, stack_user, system_user},
user::stack_user,
},
};
use mogh_cache::SetCache;
use mogh_resolver::Resolve;
use periphery_client::api::compose::{
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
WriteComposeContentsToHost,
};
use resolver_api::Resolve;
use crate::{
alert::send_alerts,
api::execute::{self, ExecuteRequest, ExecutionResult},
config::core_config,
helpers::{
query::get_swarm_or_server,
stack_git_token, swarm_or_server_request,
update::{add_update, make_update, poll_update_until_complete},
periphery_client,
query::get_server_with_state,
stack_git_token,
update::{add_update, make_update},
},
permission::get_check_permissions,
resource::{self, list_full_for_user_using_pattern},
resource,
stack::{
remote::{RemoteComposeContents, get_repo_compose_contents},
services::{
extract_services_from_stack, extract_services_into_res,
},
setup_stack_execution,
services::extract_services_into_res,
},
state::{db_client, image_digest_cache, stack_status_cache},
state::db_client,
};
use super::WriteArgs;
@@ -67,7 +54,7 @@ impl Resolve<WriteArgs> for CreateStack {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Stack> {
) -> serror::Result<Stack> {
resource::create::<Stack>(&self.name, self.config, None, user)
.await
}
@@ -86,7 +73,7 @@ impl Resolve<WriteArgs> for CopyStack {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Stack> {
) -> serror::Result<Stack> {
let Stack { config, .. } = get_check_permissions::<Stack>(
&self.id,
user,
@@ -111,7 +98,7 @@ impl Resolve<WriteArgs> for DeleteStack {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Stack> {
) -> serror::Result<Stack> {
Ok(resource::delete::<Stack>(&self.id, user).await?)
}
}
@@ -129,31 +116,8 @@ impl Resolve<WriteArgs> for UpdateStack {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Stack> {
let compose_update = self.config.linked_repo.is_some()
|| self.config.repo.is_some()
|| self.config.files_on_host.is_some()
|| self.config.file_contents.is_some();
let stack =
resource::update::<Stack>(&self.id, self.config, user).await?;
if compose_update {
tokio::spawn(async move {
let _ = (CheckStackForUpdate {
stack: self.id,
skip_auto_update: false,
wait_for_auto_update: false,
skip_cache_refresh: true,
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await;
});
}
Ok(stack)
) -> serror::Result<Stack> {
Ok(resource::update::<Stack>(&self.id, self.config, user).await?)
}
}
@@ -170,7 +134,7 @@ impl Resolve<WriteArgs> for RenameStack {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(resource::rename::<Stack>(&self.id, &self.name, user).await?)
}
}
@@ -188,7 +152,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let WriteStackFileContents {
stack,
file_path,
@@ -215,13 +179,11 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
update.push_simple_log("File contents to write", &contents);
let id = stack.id.clone();
let update = if stack.config.files_on_host {
if stack.config.files_on_host {
write_stack_file_contents_on_host(
stack, file_path, contents, update,
)
.await?
.await
} else {
write_stack_file_contents_git(
stack,
@@ -230,23 +192,8 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
&user.username,
update,
)
.await?
};
tokio::spawn(async move {
let _ = (CheckStackForUpdate {
stack: id,
skip_auto_update: false,
wait_for_auto_update: false,
skip_cache_refresh: true,
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await;
});
Ok(update)
.await
}
}
}
@@ -256,25 +203,33 @@ async fn write_stack_file_contents_on_host(
file_path: String,
contents: String,
mut update: Update,
) -> mogh_error::Result<Update> {
let swarm_or_server = get_swarm_or_server(
&stack.config.swarm_id,
&stack.config.server_id,
)
.await?;
let res = swarm_or_server_request(
&swarm_or_server,
WriteComposeContentsToHost {
) -> serror::Result<Update> {
if stack.config.server_id.is_empty() {
return Err(anyhow!(
"Cannot write file, Files on host Stack has not configured a Server"
).into());
}
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
if state != ServerState::Ok {
return Err(
anyhow!(
"Cannot write file when server is unreachable or disabled"
)
.into(),
);
}
match periphery_client(&server)
.await?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
},
)
.await;
match res {
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
@@ -284,7 +239,7 @@ async fn write_stack_file_contents_on_host(
format_serror(&e.into()),
);
}
}
};
if !all_logs_success(&update.logs) {
update.finalize();
@@ -322,7 +277,7 @@ async fn write_stack_file_contents_git(
contents: &str,
username: &str,
mut update: Update,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let mut repo = if !stack.config.linked_repo.is_empty() {
crate::resource::get::<Repo>(&stack.config.linked_repo)
.await?
@@ -460,7 +415,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<NoData> {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// stack should be able to do this.
let stack = get_check_permissions::<Stack>(
@@ -493,15 +448,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
}
let mut missing_files = Vec::new();
let service_image_digests = stack
.info
.latest_services
.iter()
.filter_map(|s| {
let digest = s.image_digest.clone()?;
Some((s.service_name.clone(), digest))
})
.collect::<HashMap<_, _>>();
let (
latest_services,
@@ -513,22 +459,26 @@ impl Resolve<WriteArgs> for RefreshStackCache {
// =============
// FILES ON HOST
// =============
if let Ok(swarm_or_server) = get_swarm_or_server(
&stack.config.swarm_id,
&stack.config.server_id,
)
.await
{
let (server, state) = if stack.config.server_id.is_empty() {
(None, ServerState::Disabled)
} else {
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
(Some(server), state)
};
if state != ServerState::Ok {
(vec![], None, None, None, None)
} else if let Some(server) = server {
let GetComposeContentsOnHostResponse { contents, errors } =
match swarm_or_server_request(
&swarm_or_server,
GetComposeContentsOnHost {
match periphery_client(&server)
.await?
.request(GetComposeContentsOnHost {
file_paths: stack.all_file_dependencies(),
name: stack.name.clone(),
run_directory: stack.config.run_directory.clone(),
},
)
.await
})
.await
.context("failed to get compose file contents from host")
{
Ok(res) => res,
Err(e) => GetComposeContentsOnHostResponse {
@@ -539,6 +489,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
}],
},
};
let project_name = stack.project_name(true);
let mut services = Vec::new();
@@ -551,7 +502,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&service_image_digests,
&mut services,
) {
warn!(
@@ -563,16 +513,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
(services, Some(contents), Some(errors), None, None)
} else {
// This path is reached if the swarm / server is not available.
// It carries over the last successful poll.
(
stack.info.latest_services,
stack.info.remote_contents,
stack.info.remote_errors,
// Files on host can set hash / message back to None.
None,
None,
)
(vec![], None, None, None, None)
}
} else if !repo_empty {
// ================
@@ -603,7 +544,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
&service_image_digests,
&mut services,
) {
warn!(
@@ -629,7 +569,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
// this should latest (not deployed), so make the project name fresh.
&stack.project_name(true),
&stack.config.file_contents,
&service_image_digests,
&mut services,
) {
warn!(
@@ -671,414 +610,3 @@ impl Resolve<WriteArgs> for RefreshStackCache {
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for CheckStackForUpdate {
#[instrument(
"CheckStackForUpdate",
skip_all,
fields(
operator = user.id,
stack = self.stack,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Self::Response> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// stack should be able to do this.
let (stack, swarm_or_server) = setup_stack_execution(
&self.stack,
user,
PermissionLevel::Execute.into(),
)
.await?;
swarm_or_server.verify_has_target()?;
check_stack_for_update_inner(
stack.id,
&swarm_or_server,
self.skip_auto_update,
self.wait_for_auto_update,
self.skip_cache_refresh,
)
.await
.map_err(Into::into)
}
}
/// If it goes down the "update available" path,
/// only send alert if stack id + service is not in this cache.
/// If alert is sent, add ID to cache.
/// If later it goes down non "update available" path,
/// remove the id from cache, so next time it does another alert
/// will be sent.
fn stack_alert_sent_cache() -> &'static SetCache<(String, String)> {
static CACHE: OnceLock<SetCache<(String, String)>> =
OnceLock::new();
CACHE.get_or_init(Default::default)
}
/// First refresh stack cache, then save
/// latest available 'image_digest' for stack services
/// to database.
#[instrument(
"CheckStackForUpdateInner",
skip_all,
fields(
stack = stack,
)
)]
pub async fn check_stack_for_update_inner(
// ID or name.
stack: String,
swarm_or_server: &SwarmOrServer,
skip_auto_update: bool,
// Otherwise spawns task to run in background
wait_for_auto_update: bool,
skip_cache_refresh: bool,
) -> anyhow::Result<CheckStackForUpdateResponse> {
if !skip_cache_refresh {
(RefreshStackCache {
stack: stack.clone(),
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context("Failed to refresh stack cache before update check")?;
}
// Query again after refresh
let mut stack = resource::get::<Stack>(&stack).await?;
let cache = image_digest_cache();
for service in &mut stack.info.latest_services {
// Prefer the image coming from deployed services
// so it will be after any interpolation.
let image = stack
.info
.deployed_services
.as_ref()
.and_then(|services| {
services.iter().find_map(|deployed| {
(deployed.service_name == service.service_name)
.then_some(&deployed.image)
})
})
.unwrap_or(&service.image);
if image.is_empty() ||
// Images with a hardcoded digest can't have update.
image.contains('@')
{
service.image_digest = None;
continue;
}
match cache.get(swarm_or_server, image, None, None).await {
Ok(digest) => service.image_digest = Some(digest),
Err(e) => {
warn!(
"Failed to check for update | Stack: {} | Service: {} | Error: {e:#}",
stack.name, service.service_name
);
service.image_digest = None;
continue;
}
};
}
let latest_services = to_bson(&stack.info.latest_services)
.context("Failed to serialize stack latest services to BSON")?;
update_one_by_id(
&db_client().stacks,
&stack.id,
doc! { "$set": { "info.latest_services": latest_services } },
None,
)
.await?;
let alert_cache = stack_alert_sent_cache();
let Some(status) = stack_status_cache().get(&stack.id).await else {
alert_cache
.retain(|(stack_id, _)| stack_id != &stack.id)
.await;
return Ok(CheckStackForUpdateResponse {
services: extract_services_from_stack(&stack)
.into_iter()
.map(|service| StackServiceWithUpdate {
service: service.service_name,
image: service.image,
update_available: false,
})
.collect(),
stack: stack.id,
});
};
let StackState::Running = status.curr.state else {
alert_cache
.retain(|(stack_id, _)| stack_id != &stack.id)
.await;
return Ok(CheckStackForUpdateResponse {
stack: stack.id,
services: status
.curr
.services
.iter()
.map(|service| StackServiceWithUpdate {
service: service.service.clone(),
image: service.image.clone(),
update_available: false,
})
.collect(),
});
};
let mut services = Vec::new();
for service in status.curr.services.iter() {
let mut service_with_update = StackServiceWithUpdate {
service: service.service.clone(),
image: service.image.clone(),
update_available: false,
};
let Some(current_digests) = &service.image_digests else {
services.push(service_with_update);
continue;
};
let Some(latest_digest) =
stack.info.latest_services.iter().find_map(|s| {
if s.service_name == service.service {
s.image_digest.clone()
} else {
None
}
})
else {
services.push(service_with_update);
continue;
};
service_with_update.update_available =
latest_digest.update_available(current_digests);
if service_with_update.update_available
&& (skip_auto_update || !stack.config.auto_update)
&& !alert_cache
.contains(&(stack.id.clone(), service.service.clone()))
.await
{
// Send service update available alert
alert_cache
.insert((stack.id.clone(), service.service.clone()))
.await;
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Stack(stack.id.clone()),
data: AlertData::StackImageUpdateAvailable {
id: stack.id.clone(),
name: stack.name.clone(),
swarm_name: swarm_or_server
.swarm_name()
.map(str::to_string),
swarm_id: swarm_or_server.swarm_id().map(str::to_string),
server_name: swarm_or_server
.server_name()
.map(str::to_string),
server_id: swarm_or_server.server_id().map(str::to_string),
service: service.service.clone(),
image: service.image.clone(),
},
};
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!(
"Failed to record StackImageUpdateAvailable to db | {e:#}"
);
}
send_alerts(&[alert]).await;
}
services.push(service_with_update);
}
let services_with_update = services
.iter()
.filter(|service| service.update_available)
.cloned()
.collect::<Vec<_>>();
if skip_auto_update
|| !stack.config.auto_update
|| services_with_update.is_empty()
{
return Ok(CheckStackForUpdateResponse {
stack: stack.id,
services,
});
}
// Conservatively remove from alert cache so 'skip_auto_update'
// doesn't cause alerts not to be sent on subsequent calls.
alert_cache
.retain(|(stack_id, _)| stack_id != &stack.id)
.await;
let deploy_services = if stack.config.auto_update_all_services {
Vec::new()
} else {
services_with_update
.iter()
.map(|service| service.service.clone())
.collect()
};
let swarm_id = swarm_or_server.swarm_id().map(str::to_string);
let swarm_name = swarm_or_server.swarm_name().map(str::to_string);
let server_id = swarm_or_server.server_id().map(str::to_string);
let server_name = swarm_or_server.server_name().map(str::to_string);
let stack_id = stack.id.clone();
let run = async move {
match execute::inner_handler(
ExecuteRequest::DeployStack(DeployStack {
stack: stack.id.clone(),
services: deploy_services,
stop_time: None,
}),
auto_redeploy_user().to_owned(),
)
.await
{
Ok(res) => {
let ExecutionResult::Single(update) = res else {
unreachable!()
};
let Ok(update) = poll_update_until_complete(&update.id).await
else {
return;
};
if update.success {
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
resolved_ts: ts.into(),
level: SeverityLevel::Ok,
target: ResourceTarget::Stack(stack.id.clone()),
data: AlertData::StackAutoUpdated {
id: stack.id.clone(),
name: stack.name.clone(),
swarm_id,
swarm_name,
server_id,
server_name,
images: services_with_update
.iter()
.map(|service| service.image.clone())
.collect(),
},
};
let res = db_client().alerts.insert_one(&alert).await;
if let Err(e) = res {
error!("Failed to record StackAutoUpdated to db | {e:#}");
}
send_alerts(&[alert]).await;
}
}
Err(e) => {
warn!("Failed to auto update Stack {} | {e:#}", stack.name)
}
}
};
if wait_for_auto_update {
run.await
} else {
tokio::spawn(run);
}
Ok(CheckStackForUpdateResponse {
stack: stack_id,
services,
})
}
//
impl Resolve<WriteArgs> for BatchCheckStackForUpdate {
#[instrument(
"BatchCheckStackForUpdate",
skip_all,
fields(
operator = user.id,
pattern = self.pattern,
skip_auto_update = self.skip_auto_update,
wait_for_auto_update = self.wait_for_auto_update,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let stacks = list_full_for_user_using_pattern::<Stack>(
&self.pattern,
Default::default(),
user,
PermissionLevel::Execute.into(),
&[],
)
.await?;
let res = stacks
.into_iter()
.map(|stack| async move {
let swarm_or_server = get_swarm_or_server(
&stack.config.swarm_id,
&stack.config.server_id,
)
.await?;
swarm_or_server.verify_has_target().map_err(|e| e.error)?;
check_stack_for_update_inner(
stack.id,
&swarm_or_server,
self.skip_auto_update,
self.wait_for_auto_update,
self.skip_cache_refresh,
)
.await
})
.collect::<FuturesOrdered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.filter_map(|res| {
res
.inspect_err(|e| {
warn!(
"Failed to check stack for update in batch run | {e:#}"
)
})
.ok()
})
.collect();
Ok(res)
}
}

View File

@@ -1,108 +0,0 @@
use komodo_client::{
api::write::*,
entities::{
permission::PermissionLevel, swarm::Swarm, update::Update,
},
};
use mogh_resolver::Resolve;
use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateSwarm {
#[instrument(
"CreateSwarm",
skip_all,
fields(
operator = user.id,
swarm = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Swarm> {
resource::create::<Swarm>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopySwarm {
#[instrument(
"CopySwarm",
skip_all,
fields(
operator = user.id,
swarm = self.name,
copy_swarm = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Swarm> {
let Swarm { config, .. } = get_check_permissions::<Swarm>(
&self.id,
user,
PermissionLevel::Read.into(),
)
.await?;
resource::create::<Swarm>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteSwarm {
#[instrument(
"DeleteSwarm",
skip_all,
fields(
operator = user.id,
swarm = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Swarm> {
Ok(resource::delete::<Swarm>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateSwarm {
#[instrument(
"UpdateSwarm",
skip_all,
fields(
operator = user.id,
swarm = self.id,
update = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Swarm> {
Ok(resource::update::<Swarm>(&self.id, self.config, user).await?)
}
}
impl Resolve<WriteArgs> for RenameSwarm {
#[instrument(
"RenameSwarm",
skip_all,
fields(
operator = user.id,
swarm = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
Ok(resource::rename::<Swarm>(&self.id, &self.name, user).await?)
}
}

View File

@@ -26,14 +26,13 @@ use komodo_client::{
repo::Repo,
server::Server,
stack::Stack,
swarm::Swarm,
sync::{ResourceSync, ResourceSyncInfo},
sync::{ResourceSync, ResourceSyncInfo, SyncDeployUpdate},
to_path_compatible_name,
update::{Log, Update},
user::sync_user,
},
};
use mogh_resolver::Resolve;
use resolver_api::Resolve;
use tracing::Instrument;
use crate::{
@@ -70,7 +69,7 @@ impl Resolve<WriteArgs> for CreateResourceSync {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
resource::create::<ResourceSync>(
&self.name,
self.config,
@@ -94,7 +93,7 @@ impl Resolve<WriteArgs> for CopyResourceSync {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
let ResourceSync { config, .. } =
get_check_permissions::<ResourceSync>(
&self.id,
@@ -124,7 +123,7 @@ impl Resolve<WriteArgs> for DeleteResourceSync {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
Ok(resource::delete::<ResourceSync>(&self.id, user).await?)
}
}
@@ -142,7 +141,7 @@ impl Resolve<WriteArgs> for UpdateResourceSync {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
Ok(
resource::update::<ResourceSync>(&self.id, self.config, user)
.await?,
@@ -163,7 +162,7 @@ impl Resolve<WriteArgs> for RenameResourceSync {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
Ok(
resource::rename::<ResourceSync>(&self.id, &self.name, user)
.await?,
@@ -182,10 +181,7 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
file_path = self.file_path,
)
)]
async fn resolve(
self,
args: &WriteArgs,
) -> mogh_error::Result<Update> {
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
&args.user,
@@ -235,7 +231,7 @@ async fn write_sync_file_contents_on_host(
args: &WriteArgs,
sync: ResourceSync,
mut update: Update,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let WriteSyncFileContents {
sync: _,
resource_path,
@@ -253,7 +249,7 @@ async fn write_sync_file_contents_on_host(
.context("Invalid resource path")?;
let full_path = root.join(&resource_path).join(&file_path);
if let Err(e) = mogh_secret_file::write_async(&full_path, &contents)
if let Err(e) = secret_file::write_async(&full_path, &contents)
.await
.with_context(|| {
format!(
@@ -299,7 +295,7 @@ async fn write_sync_file_contents_git(
sync: ResourceSync,
repo: Option<Repo>,
mut update: Update,
) -> mogh_error::Result<Update> {
) -> serror::Result<Update> {
let WriteSyncFileContents {
sync: _,
resource_path,
@@ -452,10 +448,7 @@ impl Resolve<WriteArgs> for CommitSync {
sync = self.sync,
)
)]
async fn resolve(
self,
args: &WriteArgs,
) -> mogh_error::Result<Update> {
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let WriteArgs { user } = args;
let sync = get_check_permissions::<entities::sync::ResourceSync>(
@@ -516,20 +509,11 @@ impl Resolve<WriteArgs> for CommitSync {
None
};
// Get the latest existing resources to preserve any meta values
let RemoteResources { resources, .. } =
crate::sync::remote::get_remote_resources(&sync, repo.as_ref())
.await
.context("failed to get remote resources")?;
let res = ExportAllResourcesToToml {
include_resources: sync.config.include_resources,
tags: sync.config.match_tags.clone(),
include_variables: sync.config.include_variables,
include_user_groups: sync.config.include_user_groups,
existing: resources
.inspect_err(|e| warn!("Existing resource TOML is unavailable, resource meta will not be preserved | ERROR: {e:#}"))
.ok(),
}
.resolve(&ReadArgs {
user: sync_user().to_owned(),
@@ -551,13 +535,12 @@ impl Resolve<WriteArgs> for CommitSync {
.join(to_path_compatible_name(&sync.name))
.join(&resource_path);
let span = info_span!("CommitSyncOnHost");
if let Err(e) =
mogh_secret_file::write_async(&file_path, &res.toml)
.instrument(span)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
})
if let Err(e) = secret_file::write_async(&file_path, &res.toml)
.instrument(span)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
})
{
update.push_error_log(
"Write resource file",
@@ -694,7 +677,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<ResourceSync> {
) -> serror::Result<ResourceSync> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// sync should be able to do this.
let mut sync =
@@ -785,40 +768,110 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
let mut diffs = Vec::new();
macro_rules! push_updates {
($(($Type:ident, $field:ident)),* $(,)?) => {
$(
push_updates_for_view::<$Type>(
resources.$field,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
)*
};
}
// New resource types need to be added here manually.
push_updates!(
(Server, servers),
(Swarm, swarms),
(Stack, stacks),
(Deployment, deployments),
(Build, builds),
(Repo, repos),
(Procedure, procedures),
(Action, actions),
(Builder, builders),
(Alerter, alerters),
(ResourceSync, resource_syncs),
);
push_updates_for_view::<Server>(
resources.servers,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Stack>(
resources.stacks,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Deployment>(
resources.deployments,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Build>(
resources.builds,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Repo>(
resources.repos,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Procedure>(
resources.procedures,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Builder>(
resources.builders,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Alerter>(
resources.alerters,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
(diffs, deploy_updates)
} else {
Default::default()
(Vec::new(), SyncDeployUpdate::default())
};
let variable_updates = if sync.config.include_variables {
@@ -852,7 +905,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
let (
resource_updates,
(pending_deploys, pending_deploy_error),
deploy_updates,
variable_updates,
user_group_updates,
pending_error,
@@ -868,7 +921,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
};
let has_updates = !resource_updates.is_empty()
|| !pending_deploys.is_empty()
|| !deploy_updates.to_deploy == 0
|| !variable_updates.is_empty()
|| !user_group_updates.is_empty();
@@ -880,8 +933,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
remote_errors: sync.info.remote_errors,
pending_hash: sync.info.pending_hash,
pending_message: sync.info.pending_message,
pending_deploys,
pending_deploy_error,
pending_deploy: deploy_updates,
resource_updates,
variable_updates,
user_group_updates,

View File

@@ -13,9 +13,9 @@ use komodo_client::{
server::Server, stack::Stack, sync::ResourceSync, tag::Tag,
},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
config::core_config,
@@ -39,7 +39,7 @@ impl Resolve<WriteArgs> for CreateTag {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Tag> {
) -> serror::Result<Tag> {
if core_config().disable_non_admin_create && !user.admin {
return Err(
anyhow!("Non admins cannot create tags")
@@ -88,7 +88,7 @@ impl Resolve<WriteArgs> for RenameTag {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Tag> {
) -> serror::Result<Tag> {
if ObjectId::from_str(&self.name).is_ok() {
return Err(anyhow!("tag name cannot be ObjectId").into());
}
@@ -121,7 +121,7 @@ impl Resolve<WriteArgs> for UpdateTagColor {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Tag> {
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.tag, user).await?;
update_one_by_id(
@@ -149,7 +149,7 @@ impl Resolve<WriteArgs> for DeleteTag {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Tag> {
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.id, user).await?;
tokio::try_join!(

View File

@@ -3,19 +3,15 @@ use futures_util::{StreamExt as _, stream::FuturesUnordered};
use komodo_client::{
api::write::*,
entities::{
NoData,
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{Terminal, TerminalTarget},
NoData, deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, terminal::TerminalTarget,
user::User,
},
};
use mogh_error::AddStatusCode;
use mogh_resolver::Resolve;
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::{
@@ -51,35 +47,30 @@ impl Resolve<WriteArgs> for CreateTerminal {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<Terminal> {
) -> serror::Result<NoData> {
match self.target.clone() {
TerminalTarget::Server { server } => {
let server = server
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
create_server_terminal(self, server, user)
.await
.map_err(Into::into)
create_server_terminal(self, server, user).await?;
}
TerminalTarget::Container { server, container } => {
create_container_terminal(self, server, container, user)
.await
.map_err(Into::into)
.await?;
}
TerminalTarget::Stack { stack, service } => {
let service = service
.context("Must provide 'target.params.service'")
.status_code(StatusCode::BAD_REQUEST)?;
create_stack_service_terminal(self, stack, service, user)
.await
.map_err(Into::into)
.await?;
}
TerminalTarget::Deployment { deployment } => {
create_deployment_terminal(self, deployment, user)
.await
.map_err(Into::into)
create_deployment_terminal(self, deployment, user).await?;
}
}
};
Ok(NoData {})
}
}
@@ -93,7 +84,7 @@ async fn create_server_terminal(
}: CreateTerminal,
server: String,
user: &User,
) -> anyhow::Result<Terminal> {
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
@@ -103,7 +94,7 @@ async fn create_server_terminal(
let periphery = periphery_client(&server).await?;
let mut terminal = periphery
periphery
.request(api::terminal::CreateServerTerminal {
name,
command,
@@ -112,12 +103,7 @@ async fn create_server_terminal(
.await
.context("Failed to create Server Terminal on Periphery")?;
// Fix server terminal target with server id
terminal.target = TerminalTarget::Server {
server: Some(server.id),
};
Ok(terminal)
Ok(())
}
async fn create_container_terminal(
@@ -125,7 +111,7 @@ async fn create_container_terminal(
server: String,
container: String,
user: &User,
) -> anyhow::Result<Terminal> {
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
@@ -141,7 +127,7 @@ async fn create_stack_service_terminal(
stack: String,
service: String,
user: &User,
) -> anyhow::Result<Terminal> {
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
@@ -152,7 +138,7 @@ async fn create_deployment_terminal(
req: CreateTerminal,
deployment: String,
user: &User,
) -> anyhow::Result<Terminal> {
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
create_container_terminal_inner(req, &periphery, container).await
@@ -173,7 +159,7 @@ impl Resolve<WriteArgs> for DeleteTerminal {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<NoData> {
) -> serror::Result<NoData> {
let server = match &self.target {
TerminalTarget::Server { server } => {
let server = server
@@ -247,7 +233,7 @@ impl Resolve<WriteArgs> for DeleteAllTerminals {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<NoData> {
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,

View File

@@ -1,120 +1,28 @@
use std::{collections::VecDeque, str::FromStr};
use std::str::FromStr;
use anyhow::{Context, anyhow};
use async_timing_util::unix_timestamp_ms;
use database::{
bson::to_bson,
hash_password,
mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
},
mungos::mongodb::bson::{doc, oid::ObjectId},
};
use komodo_client::{
api::write::*,
entities::{
komodo_timestamp,
user::{NewUserParams, User, UserConfig},
NoData,
user::{User, UserConfig},
},
};
use mogh_error::{AddStatusCode as _, AddStatusCodeError};
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
helpers::{
query::get_user,
validations::{validate_password, validate_username},
},
state::db_client,
};
use crate::{config::core_config, state::db_client};
use super::WriteArgs;
//
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<WriteArgs> for PushRecentlyViewed {
#[instrument(
"PushRecentlyViewed",
level = "debug",
skip_all,
fields(
user_id = user.id,
resource = format!("{:?}", self.resource)
)
)]
async fn resolve(
self,
WriteArgs { user, .. }: &WriteArgs,
) -> mogh_error::Result<PushRecentlyViewedResponse> {
let user = get_user(&user.id).await?;
let (resource_type, id) = self.resource.extract_variant_id();
let field = format!("recents.{resource_type}");
let update = match user.recents.get(&resource_type) {
Some(recents) => {
let mut recents = recents
.iter()
.filter(|_id| !id.eq(*_id))
.take(RECENTLY_VIEWED_MAX - 1)
.collect::<VecDeque<_>>();
recents.push_front(id);
doc! { &field: to_bson(&recents)? }
}
None => {
doc! { &field: [id] }
}
};
update_one_by_id(
&db_client().users,
&user.id,
database::mungos::update::Update::Set(update),
None,
)
.await
.with_context(|| format!("Failed to update user '{field}'"))?;
Ok(PushRecentlyViewedResponse {})
}
}
//
impl Resolve<WriteArgs> for SetLastSeenUpdate {
#[instrument(
"SetLastSeenUpdate",
level = "debug",
skip_all,
fields(user_id = user.id)
)]
async fn resolve(
self,
WriteArgs { user, .. }: &WriteArgs,
) -> mogh_error::Result<SetLastSeenUpdateResponse> {
update_one_by_id(
&db_client().users,
&user.id,
database::mungos::update::Update::Set(doc! {
"last_update_view": komodo_timestamp()
}),
None,
)
.await
.context("Failed to update user 'last_update_view'")?;
Ok(SetLastSeenUpdateResponse {})
}
}
//
impl Resolve<WriteArgs> for CreateLocalUser {
#[instrument(
"CreateLocalUser",
@@ -127,18 +35,27 @@ impl Resolve<WriteArgs> for CreateLocalUser {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<CreateLocalUserResponse> {
) -> serror::Result<CreateLocalUserResponse> {
if !admin.admin {
return Err(
anyhow!("This method is Admin Only.")
anyhow!("This method is admin-only.")
.status_code(StatusCode::FORBIDDEN),
);
}
validate_username(&self.username)
.status_code(StatusCode::BAD_REQUEST)?;
validate_password(&self.password)
.status_code(StatusCode::BAD_REQUEST)?;
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
if ObjectId::from_str(&self.username).is_ok() {
return Err(
anyhow!("Username cannot be valid ObjectId").into(),
);
}
if self.password.is_empty() {
return Err(anyhow!("Password cannot be empty.").into());
}
let db = db_client();
@@ -155,16 +72,22 @@ impl Resolve<WriteArgs> for CreateLocalUser {
let ts = unix_timestamp_ms() as i64;
let hashed_password = hash_password(self.password)?;
let mut user = User::new(NewUserParams {
let mut user = User {
id: Default::default(),
username: self.username,
enabled: true,
admin: false,
super_admin: false,
create_server_permissions: false,
create_build_permissions: false,
updated_at: ts,
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local {
password: hashed_password,
},
updated_at: ts,
});
};
user.id = db_client()
.users
@@ -184,6 +107,91 @@ impl Resolve<WriteArgs> for CreateLocalUser {
//
impl Resolve<WriteArgs> for UpdateUserUsername {
#[instrument(
"UpdateUserUsername",
skip_all,
fields(
operator = user.id,
new_username = self.username,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserUsernameResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their username.")
.into(),
);
}
}
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
if ObjectId::from_str(&self.username).is_ok() {
return Err(
anyhow!("Username cannot be valid ObjectId").into(),
);
}
let db = db_client();
if db
.users
.find_one(doc! { "username": &self.username })
.await
.context("Failed to query for existing users")?
.is_some()
{
return Err(anyhow!("Username already taken.").into());
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
db.users
.update_one(
doc! { "_id": id },
doc! { "$set": { "username": self.username } },
)
.await
.context("Failed to update user username on database.")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for UpdateUserPassword {
#[instrument(
"UpdateUserPassword",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserPasswordResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their password.")
.into(),
);
}
}
db_client().set_user_password(user, &self.password).await?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for DeleteUser {
#[instrument(
"DeleteUser",
@@ -196,26 +204,22 @@ impl Resolve<WriteArgs> for DeleteUser {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<DeleteUserResponse> {
) -> serror::Result<DeleteUserResponse> {
if !admin.admin {
return Err(
anyhow!("This method is admin-only.")
.status_code(StatusCode::FORBIDDEN),
);
}
if admin.username == self.user || admin.id == self.user {
return Err(anyhow!("User cannot delete themselves.").into());
}
let query = if let Ok(id) = ObjectId::from_str(&self.user) {
doc! { "_id": id }
} else {
doc! { "username": self.user }
};
let db = db_client();
let Some(user) = db
.users
.find_one(query.clone())
@@ -226,25 +230,21 @@ impl Resolve<WriteArgs> for DeleteUser {
anyhow!("No user found with given id / username").into(),
);
};
if user.super_admin {
return Err(
anyhow!("Cannot delete a super admin user.").into(),
);
}
if user.admin && !admin.super_admin {
return Err(
anyhow!("Only a Super Admin can delete an admin user.")
.into(),
);
}
db.users
.delete_one(query)
.await
.context("Failed to delete user from database")?;
// Also remove user id from all user groups
if let Err(e) = db
.user_groups
@@ -253,7 +253,6 @@ impl Resolve<WriteArgs> for DeleteUser {
{
warn!("Failed to remove deleted user from user groups | {e:?}");
};
Ok(user)
}
}

View File

@@ -10,9 +10,9 @@ use komodo_client::{
api::write::*,
entities::{komodo_timestamp, user_group::UserGroup},
};
use mogh_error::AddStatusCodeError;
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::state::db_client;
@@ -30,7 +30,7 @@ impl Resolve<WriteArgs> for CreateUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -76,7 +76,7 @@ impl Resolve<WriteArgs> for RenameUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -112,7 +112,7 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -156,7 +156,7 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -211,7 +211,7 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -266,7 +266,7 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
@@ -324,7 +324,7 @@ impl Resolve<WriteArgs> for SetEveryoneUserGroup {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> mogh_error::Result<UserGroup> {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")

View File

@@ -4,15 +4,14 @@ use komodo_client::{
api::write::*,
entities::{Operation, ResourceTarget, variable::Variable},
};
use mogh_error::{AddStatusCode as _, AddStatusCodeError};
use mogh_resolver::Resolve;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
helpers::{
query::get_variable,
update::{add_update, make_update},
validations::{validate_variable_name, validate_variable_value},
},
state::db_client,
};
@@ -33,10 +32,10 @@ impl Resolve<WriteArgs> for CreateVariable {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<CreateVariableResponse> {
) -> serror::Result<CreateVariableResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can create Variables")
anyhow!("Only admins can create variables")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -48,11 +47,6 @@ impl Resolve<WriteArgs> for CreateVariable {
is_secret,
} = self;
validate_variable_name(&name)
.status_code(StatusCode::BAD_REQUEST)?;
validate_variable_value(&value)
.status_code(StatusCode::BAD_REQUEST)?;
let variable = Variable {
name,
value,
@@ -64,7 +58,7 @@ impl Resolve<WriteArgs> for CreateVariable {
.variables
.insert_one(&variable)
.await
.context("Failed to create Variable on db")?;
.context("Failed to create variable on db")?;
let mut update = make_update(
ResourceTarget::system(),
@@ -73,8 +67,7 @@ impl Resolve<WriteArgs> for CreateVariable {
);
update
.push_simple_log("Create Variable", format!("{variable:#?}"));
.push_simple_log("create variable", format!("{variable:#?}"));
update.finalize();
add_update(update).await?;
@@ -95,21 +88,16 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateVariableValueResponse> {
) -> serror::Result<UpdateVariableValueResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can update Variables")
anyhow!("Only admins can update variables")
.status_code(StatusCode::FORBIDDEN),
);
}
let UpdateVariableValue { name, value } = self;
validate_variable_name(&name)
.status_code(StatusCode::BAD_REQUEST)?;
validate_variable_value(&value)
.status_code(StatusCode::BAD_REQUEST)?;
let variable = get_variable(&name).await?;
if value == variable.value {
@@ -165,14 +153,13 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateVariableDescriptionResponse> {
) -> serror::Result<UpdateVariableDescriptionResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can update Variables")
anyhow!("Only admins can update variables")
.status_code(StatusCode::FORBIDDEN),
);
}
db_client()
.variables
.update_one(
@@ -181,7 +168,6 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
)
.await
.context("Failed to update variable description on db")?;
Ok(get_variable(&self.name).await?)
}
}
@@ -199,14 +185,13 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<UpdateVariableIsSecretResponse> {
) -> serror::Result<UpdateVariableIsSecretResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can update Variables")
anyhow!("Only admins can update variables")
.status_code(StatusCode::FORBIDDEN),
);
}
db_client()
.variables
.update_one(
@@ -214,8 +199,7 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
doc! { "$set": { "is_secret": self.is_secret } },
)
.await
.context("Failed to update Variable 'is_secret' on db")?;
.context("Failed to update variable is secret on db")?;
Ok(get_variable(&self.name).await?)
}
}
@@ -232,21 +216,19 @@ impl Resolve<WriteArgs> for DeleteVariable {
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> mogh_error::Result<DeleteVariableResponse> {
) -> serror::Result<DeleteVariableResponse> {
if !user.admin {
return Err(
anyhow!("Only Admins can delete Variables")
anyhow!("Only admins can delete variables")
.status_code(StatusCode::FORBIDDEN),
);
}
let variable = get_variable(&self.name).await?;
db_client()
.variables
.delete_one(doc! { "name": &self.name })
.await
.context("Failed to delete Variable on db")?;
.context("Failed to delete variable on db")?;
let mut update = make_update(
ResourceTarget::system(),

Some files were not shown because too many files have changed in this diff Show More