Compare commits

..

373 Commits

Author SHA1 Message Date
mbecker20
c5401de1c5 tweak user level tab view 2025-10-28 11:42:29 -07:00
mbecker20
7a3d9e0ef6 tweak description 2025-10-28 00:32:39 -07:00
mbecker20
595e3ece42 deploy 2.0.0-dev-86 2025-10-27 21:05:13 -07:00
mbecker20
a3bc895755 fix terminal disconnect 2025-10-27 21:04:46 -07:00
mbecker20
3e3def03ec terminal init properly lexes init command 2025-10-27 21:01:15 -07:00
mbecker20
bc672d9649 deploy 2.0.0-dev-85 2025-10-27 20:01:18 -07:00
mbecker20
ea6dee4d51 clippy lint 2025-10-27 19:13:43 -07:00
mbecker20
b985f18c74 deploy 2.0.0-dev-84 2025-10-27 19:12:54 -07:00
mbecker20
45909b2f04 pid1 reaper doesn't work, init: true should be required in compose 2025-10-27 19:06:50 -07:00
mbecker20
2b5a54ce89 deploy 2.0.0-dev-83 2025-10-27 18:31:56 -07:00
mbecker20
a18f33b95e formalize the terminal message variants 2025-10-27 18:31:30 -07:00
mbecker20
f35b00ea95 bump clap dependency 2025-10-27 16:18:30 -07:00
mbecker20
70fab08520 clean up terminal modules 2025-10-27 16:17:20 -07:00
mbecker20
0331780a5f rename variables shell -> command 2025-10-27 11:08:57 -07:00
mbecker20
06cdfd2bbc Terminal -> Terminals tabs 2025-10-27 02:53:06 -07:00
mbecker20
1555202569 Create Terminal don't auto set request after changed 2025-10-27 02:42:06 -07:00
mbecker20
5139622aad deploy 2.0.0-dev-82 2025-10-27 02:28:48 -07:00
mbecker20
61ce2ee3db improve new terminal 2025-10-27 02:04:15 -07:00
mbecker20
3171c14f2b comment on spawn process reaper 2025-10-27 01:41:06 -07:00
mbecker20
521db748d8 deploy 2.0.0-dev-81 2025-10-27 01:27:42 -07:00
mbecker20
35bf224080 deploy 2.0.0-dev-80 2025-10-27 01:21:44 -07:00
mbecker20
e0b31cfe51 CreateTerminal only shows resources which are actually available to connect to 2025-10-27 00:44:56 -07:00
mbecker20
0a890078b0 deploy 2.0.0-dev-79 2025-10-27 00:38:08 -07:00
mbecker20
df97ced7a4 deploy 2.0.0-dev-78 2025-10-27 00:03:26 -07:00
mbecker20
d4e5e2e6d8 add execute_<>_terminal convenience methods 2025-10-26 23:35:17 -07:00
mbecker20
19aa60dcb5 deploy 2.0.0-dev-77 2025-10-26 23:21:15 -07:00
mbecker20
fc19c53e6f deploy 2.0.0-dev-76 2025-10-26 23:00:59 -07:00
mbecker20
4f0af960db Big Terminal refactor + most commands run directly / bypass 'sh -c "..."' 2025-10-26 23:00:35 -07:00
mbecker20
e2ec5258fb add "New" kb shortcut 2025-10-23 23:55:24 -07:00
mbecker20
49b6545a02 reorder cli command list 2025-10-23 23:53:10 -07:00
mbecker20
0aabaa9e62 deploy 2.0.0-dev-75 2025-10-23 12:23:10 -07:00
mbecker20
dc65986eab binaries still built with bullseye for compat, but final images use trixie 2025-10-23 12:22:50 -07:00
mbecker20
1d8f28437d km attach <CONTAINER> 2025-10-23 12:22:02 -07:00
mbecker20
c1502e89c2 deploy 2.0.0-dev-74 2025-10-23 11:51:40 -07:00
mbecker20
0bd15fc442 ResourceQuery.names supports names or ids 2025-10-23 11:23:37 -07:00
mbecker20
5a3621b02e km exec 2025-10-23 01:55:50 -07:00
mbecker20
38192e2dac deploy 2.0.0-dev-73 2025-10-23 00:56:15 -07:00
mbecker20
5d271d5547 use Ping timeout to handle reconnect if for some reason network cuts but ws doesn't receive Close 2025-10-23 00:55:51 -07:00
mbecker20
11fb67a35b ssh use cancel token so stdout.write_all isn't cancelled mid-write, which leads to undefined behavior 2025-10-23 00:14:17 -07:00
mbecker20
a80499dcc4 improve stack config files responsive 2025-10-22 19:02:30 -07:00
mbecker20
8c76b8487f alert responsive, better Server terminal disabled 2025-10-22 13:48:08 -07:00
mbecker20
2b32d9042a deploy 2.0.0-dev-72 2025-10-22 01:00:19 -07:00
mbecker20
dc48f1f2ca deploy 2.0.0-dev-71 2025-10-22 00:50:02 -07:00
mbecker20
8e7b7bdcf1 deploy 2.0.0-dev-70 2025-10-22 00:44:54 -07:00
mbecker20
f11d64f72e add 'init' param to make 'execute_terminal' in single call possible 2025-10-22 00:44:33 -07:00
mbecker20
2ffae85180 dashboard table section headers link to resources page 2025-10-22 00:03:12 -07:00
mbecker20
bd79d0f1e0 km ssh <SERVER> [COMMAND] -n [NAME] 2025-10-21 23:55:36 -07:00
mbecker20
e890b1f675 deploy 2.0.0-dev-69 2025-10-21 23:32:18 -07:00
mbecker20
3b7de25c30 Shift + X - Terminals, Shift + N - New (Resource, Terminal) 2025-10-21 16:11:27 -07:00
mbecker20
793bb99f31 nav to terminal on create 2025-10-21 16:00:50 -07:00
mbecker20
d465c9f273 deploy 2.0.0-dev-68 2025-10-21 15:51:38 -07:00
mbecker20
ce641a8974 terminal page 2025-10-21 15:51:18 -07:00
mbecker20
1b89ceb122 deploy 2.0.0-dev-67 2025-10-21 02:50:21 -07:00
mbecker20
2dbc011d26 remove unneeded log on client terminal disconnect 2025-10-21 02:33:19 -07:00
mbecker20
246da88ae1 deploy 2.0.0-dev-66 2025-10-21 02:29:12 -07:00
mbecker20
a8c16f64b1 km ssh 2025-10-21 02:28:42 -07:00
mbecker20
a5b711a348 stack tabs localstorage increment 2025-10-20 20:35:08 -07:00
mbecker20
9666e9ad83 Fix monitoring table with proper server version component 2025-10-20 03:01:07 -07:00
mbecker20
7479640c73 add hover information for mysterious server header icons 2025-10-20 02:53:18 -07:00
mbecker20
4823825035 give websocket indicator info on hover 2025-10-20 02:35:12 -07:00
mbecker20
23897a7acf clippy 2025-10-20 02:16:52 -07:00
mbecker20
20d5588b5c deploy 2.0.0-dev-65 2025-10-20 02:15:15 -07:00
mbecker20
f7e15ccde5 progress on terminals page 2025-10-20 02:14:51 -07:00
mbecker20
cf7623b1fc combine all resources / table view into dashboard 2025-10-20 01:40:27 -07:00
mbecker20
d3c464c05d start Terminals management page 2025-10-20 00:42:45 -07:00
mbecker20
5c9d416aa4 prog on docs update 2025-10-19 23:33:41 -07:00
mbecker20
aabcd88312 update connect-servers docs 2025-10-19 23:07:50 -07:00
mbecker20
9d2624c6bc clarify root directory in periphery config file 2025-10-19 23:07:19 -07:00
mbecker20
ee11fb0b6c clean up setup script 2025-10-19 23:07:02 -07:00
mbecker20
45adfbddd0 mounting custom CA 2025-10-19 23:06:48 -07:00
mbecker20
d26d035dc6 clean up docs intro 2025-10-19 22:03:17 -07:00
mbecker20
e673ba0adf deploy 2.0.0-dev-64 2025-10-19 21:48:15 -07:00
mbecker20
f876facfa7 improve git status message / failure propogation 2025-10-19 21:47:29 -07:00
mbecker20
3a47d57478 container class px-[1.2rem] 2025-10-19 20:31:40 -07:00
mbecker20
a707028277 responsive tweaks 2025-10-19 20:07:30 -07:00
mbecker20
0c6276c677 fix Resources / Containers mobile 2025-10-19 19:51:28 -07:00
mbecker20
fc9c6706f1 keep more descriptive settings header mobile 2025-10-19 13:24:44 -07:00
mbecker20
7674269ce9 fix user dropdown not showing username mobile 2025-10-19 13:11:34 -07:00
mbecker20
3b511c5adc improve server terminal mobile responsiveness 2025-10-19 13:00:30 -07:00
mbecker20
87221a10e9 fix mobile ContainerTerminal responsiveness 2025-10-19 12:56:11 -07:00
mbecker20
450cb6a148 fix stack config files mobile responsiveness 2025-10-19 12:46:51 -07:00
mbecker20
f252cefb21 responsive server docker tab 2025-10-19 12:37:26 -07:00
mbecker20
7855e9d688 run dkf 2025-10-19 12:30:59 -07:00
mbecker20
feb263c15f more type safe tabs 2025-10-19 12:27:55 -07:00
mbecker20
4f8d1c22cc rest of tabs also use mobile friendly 2025-10-19 12:11:11 -07:00
mbecker20
60bd47834e deploy 2.0.0-dev-63 2025-10-19 11:48:09 -07:00
mbecker20
4d632a6b61 improve resources mobile tabs responsiveness 2025-10-19 11:47:47 -07:00
mbecker20
381dd76723 deploy 2.0.0-dev-62 2025-10-19 01:37:10 -07:00
mbecker20
077e28a5fe fix ConfigList too wide on mobile 2025-10-19 01:36:50 -07:00
mbecker20
6b02aaed7d hide core pubkey copy if origin not https 2025-10-19 01:28:45 -07:00
mbecker20
e466944c05 improve mobile settings view 2025-10-19 01:24:41 -07:00
mbecker20
8ff94b7465 deploy 2.0.0-dev-61 2025-10-19 00:35:26 -07:00
mbecker20
b17df5ed7b show host public ip 2025-10-19 00:34:52 -07:00
mbecker20
207dc30206 cli is distroless, no shell / update-ca-certificates 2025-10-18 22:12:44 -07:00
mbecker20
c3eb386bdb fix copy entrypoint 2025-10-18 22:07:16 -07:00
mbecker20
4279e46892 deploy 2.0.0-dev-60 2025-10-18 12:59:19 -07:00
mbecker20
8d3d2fee12 use entrypoint scripts to make update-ca-certificates consistent when using custom CMD 2025-10-18 12:58:55 -07:00
mbecker20
1df36c4266 deploy 2.0.0-dev-59 2025-10-18 11:36:07 -07:00
mbecker20
36f7ad33c7 core and periphery images auto run update-ca-certificates on start, only need to mount in. 2025-10-18 11:35:45 -07:00
mbecker20
ec34b2c139 deploy 2.0.0-dev-58 2025-10-18 11:02:11 -07:00
mbecker20
d14c28d1f2 new otel instrumentation 2025-10-18 11:01:47 -07:00
mbecker20
68f7a0e9ce all info menu to top of settings 2025-10-18 00:45:59 -07:00
mbecker20
50f0376f0a Add Core title and public key to top of Settings 2025-10-18 00:01:41 -07:00
mbecker20
bbd53747ad fix km ps -h description 2025-10-17 17:17:18 -07:00
mbecker20
6a2adf1f83 tweak logs 2025-10-16 01:06:37 -07:00
mbecker20
128b15b94f deploy 2.0.0-dev-57 2025-10-16 00:59:46 -07:00
mbecker20
8d74b377b7 more otel refinements 2025-10-16 00:59:20 -07:00
mbecker20
d7e972e5c6 stack ui doesn't show project missing when deploying 2025-10-15 23:49:26 -07:00
mbecker20
e5cb4aac5a Fix: Webhook triggered checks linked repo branch for build, stack, sync 2025-10-15 18:06:43 -07:00
mbecker20
d0f62f8326 rework tracing events / improve opentelemetry output 2025-10-15 01:41:18 -07:00
mbecker20
47c4091a4b onboarding key uses recognizable key 2025-10-14 16:57:35 -07:00
mbecker20
973480e2b3 remove all the unnecessary instrument debug 2025-10-14 00:33:53 -07:00
mbecker20
b9e1cc87d2 remove instrument from validate_cancel_repo_build 2025-10-13 23:52:55 -07:00
mbecker20
05d20c8603 deploy 2.0.0-dev-56 2025-10-13 22:05:07 -07:00
mbecker20
fe2d68a001 fix config loading 2025-10-13 22:04:42 -07:00
mbecker20
26fd5b2a6d deploy 2.0.0-dev-55 2025-10-13 20:30:40 -07:00
mbecker20
76457bcb61 apply env / shell interpolation as *final* config loading stage, to include env vars. 2025-10-13 20:26:13 -07:00
mbecker20
ebd2c2238d bump deps 2025-10-13 19:51:05 -07:00
mbecker20
b7fc1bef7b refine default env 2025-10-13 13:53:12 -07:00
mbecker20
50b9f2e1bf deploy 2.0.0-dev-54 2025-10-13 13:06:23 -07:00
mbecker20
41ce86f6ab deploy 2.0.0-dev-53 2025-10-12 20:00:47 -07:00
mbecker20
7a21c01e52 tweak 2025-10-12 19:59:09 -07:00
mbecker20
e63e282510 small clean up 2025-10-12 19:56:15 -07:00
mbecker20
5456b36c18 deploy 2.0.0-dev-52 2025-10-12 13:55:39 -07:00
mbecker20
fcfb58a7e9 periphery with server disabled can initialize core public key file 2025-10-12 13:55:15 -07:00
mbecker20
2203004a74 move periphery in memory state to state.rs 2025-10-12 13:15:52 -07:00
mbecker20
996fb49823 periphery server_enabled /version route 2025-10-12 12:56:29 -07:00
mbecker20
35d22c77a2 Core add non authed /version route 2025-10-12 12:55:14 -07:00
mbecker20
44ab89600f Simpllify Option + Result into one encoding layer 2025-10-12 03:24:00 -07:00
mbecker20
0900e48cb8 ntfy / pushover url interpolation 2025-10-12 01:34:07 -07:00
mbecker20
c530a46a27 deploy 2.0.0-dev-51 2025-10-12 01:09:35 -07:00
mbecker20
f69c8db3ea pass through whether Periphery docker daemon connection is ok 2025-10-12 01:08:45 -07:00
mbecker20
48f2f651e1 periphery runs with logs if couldn't connect to docker daemon 2025-10-12 01:05:20 -07:00
mbecker20
bdb5b4185e rename some websocket fields 2025-10-12 00:28:55 -07:00
mbecker20
42a7b8c19b move connection queries to periphery_client 2025-10-12 00:08:59 -07:00
mbecker20
ded17e4840 more encoding refine 2025-10-12 00:05:16 -07:00
mbecker20
80fb1e6889 more on encoding 2025-10-11 14:11:07 -07:00
mbecker20
1dc861f538 fix periphery keys init when config.private_key is not explicitly defined. 2025-10-11 12:00:29 -07:00
mbecker20
3da63395fd fix EncodedOption docs 2025-10-10 00:32:05 -07:00
mbecker20
c40cbc4d77 deploy 2.0.0-dev-50 2025-10-09 23:40:42 -07:00
mbecker20
05e352e88c attach working 2025-10-09 23:28:27 -07:00
mbecker20
5884c09fb8 fix fe 2025-10-09 22:05:36 -07:00
mbecker20
f8add38043 backend for container attach 2025-10-09 21:53:26 -07:00
mbecker20
501f734e8b deploy 2.0.0-dev-49 2025-10-09 20:13:17 -07:00
mbecker20
de62732ac8 bump jwt lib 2025-10-09 20:12:51 -07:00
mbecker20
bfa61058cd remove github only managed repo webhooks feature. Not well implemented or documented, also provider specific. 2025-10-09 20:06:06 -07:00
mbecker20
72ca6d9910 deploy 2.0.0-dev-48 2025-10-09 19:36:47 -07:00
mbecker20
4d1ac32ad3 clippy 2025-10-09 18:24:50 -07:00
mbecker20
927e5959fa move encoded message schemas between core / periphery into periphery_client 2025-10-09 18:23:44 -07:00
mbecker20
37ccc6e1ef isolate encoding out of transport 2025-10-09 18:11:49 -07:00
mbecker20
deaa8754f3 slowly better ergonomics 2025-10-09 17:29:05 -07:00
mbecker20
dd8ac67c72 clippy and fmt 2025-10-09 16:26:06 -07:00
mbecker20
be4457c9cf deploy 2.0.0-dev-47 2025-10-09 15:41:06 -07:00
mbecker20
1868421815 strictly typed transport bytes encoding 2025-10-09 15:40:42 -07:00
mbecker20
366f7a12b4 Enumerated transport message types 2025-10-08 20:36:16 -07:00
mbecker20
75119370df standardize key rotation with wrapper 2025-10-08 17:45:45 -07:00
mbecker20
9e85b9d4c8 deploy 2.0.0-dev-46 2025-10-08 03:59:22 -07:00
mbecker20
8afbbf23dc deploy 2.0.0-dev-45 2025-10-08 02:48:24 -07:00
mbecker20
770a1116a1 fix: RotateCoreKeys also needs to store the new keys in mem 2025-10-08 02:48:01 -07:00
mbecker20
0b4aebbc24 periphery refresh panics if server_enabled, and core public key fails to parse. 2025-10-08 02:32:52 -07:00
mbecker20
f1696e26e4 deploy 2.0.0-dev-44 2025-10-08 02:11:38 -07:00
mbecker20
1a7b682301 RotateCoreKeys api 2025-10-08 02:11:17 -07:00
mbecker20
b0110b05aa deploy 2.0.0-dev-43 2025-10-08 00:02:34 -07:00
mbecker20
561b490f26 write files potentially containing secrets as 0600 2025-10-07 23:59:53 -07:00
mbecker20
cac1f0b42e align server standard and monitoring tables 2025-10-07 19:41:57 -07:00
mbecker20
28886fb304 fix typos 2025-10-07 14:32:00 -07:00
mbecker20
fb84d4cf7d deploy 2.0.0-dev-42 2025-10-07 01:59:04 -07:00
mbecker20
31e9624556 auto_rotate_keys config 2025-10-07 01:58:13 -07:00
mbecker20
3864bb7115 onboarding key expiry view 2025-10-07 01:16:28 -07:00
mbecker20
cea8601246 remove deleted server from onboarding key copy server 2025-10-07 00:44:36 -07:00
mbecker20
a546364bf3 deploy 2.0.0-dev-41 2025-10-07 00:29:33 -07:00
mbecker20
c8c62ea562 core public keys improve refresh 2025-10-07 00:27:47 -07:00
mbecker20
845e8780c7 improve server stats UI 2025-10-06 23:45:26 -07:00
mbecker20
db60347566 deploy 2.0.0-dev-40 2025-10-06 22:12:40 -07:00
mbecker20
c3ea0239d6 fix passkey support 2025-10-06 22:12:15 -07:00
mbecker20
e9d13449bf improve ws trait ergonomics 2025-10-06 20:02:06 -07:00
mbecker20
2daa92a639 working with safer transport message api 2025-10-06 19:16:03 -07:00
mbecker20
6473080078 deploy 2.0.0-dev-39 2025-10-06 03:14:15 -07:00
mbecker20
d3957f65dc schedule alert send before not after 2025-10-06 03:13:48 -07:00
mbecker20
cb34969f1e move skip label to be built into images 2025-10-06 03:07:11 -07:00
mbecker20
55a0a8cd05 deploy 2.0.0-dev-38 2025-10-06 02:38:42 -07:00
mbecker20
89f08372c6 CloseAlert 2025-10-06 02:38:16 -07:00
mbecker20
6a3ce2d426 config log some errors 2025-10-06 02:13:36 -07:00
mbecker20
4928378d46 fmt 2025-10-06 02:03:03 -07:00
mbecker20
eea222cfba simplify periphery config by removing option 2025-10-06 02:02:31 -07:00
mbecker20
6e9cc2dc77 deploy 2.0.0-dev-37 2025-10-06 01:40:24 -07:00
mbecker20
55d45084d0 comment 2025-10-06 01:40:00 -07:00
mbecker20
9657a44049 Improve config toml / yaml / json interpolation support 2025-10-06 01:38:48 -07:00
mbecker20
51fa9ae3c2 update setup script readme 2025-10-06 00:29:35 -07:00
mbecker20
5fd256444e improve the setup script 2025-10-05 23:53:21 -07:00
mbecker20
059716f178 deploy 2.0.0-dev-36 2025-10-05 18:23:43 -07:00
mbecker20
0bee1fe2c5 fix: connect and connect insecure are swapped 2025-10-05 18:20:29 -07:00
mbecker20
1e58c1a958 deploy 2.0.0-dev-35 2025-10-05 17:45:16 -07:00
mbecker20
ed1431db0a improve v1 downgrade 2025-10-05 17:43:46 -07:00
mbecker20
dc769ff159 document periphery_public_key 2025-10-05 17:39:11 -07:00
mbecker20
098f23ac4c configure Core -> Periphery insecure_tls 2025-10-05 17:36:07 -07:00
mbecker20
03f577d22f forgiving periphery public key parsing 2025-10-05 17:02:08 -07:00
mbecker20
95ca217362 deploy 2.0.0-dev-34 2025-10-05 16:52:33 -07:00
mbecker20
6d61045764 support KOMODO_PERIPHERY_PUBLIC_KEY 2025-10-05 16:52:00 -07:00
mbecker20
34e075eaf3 periphery support core_tls_insecure_skip_verify 2025-10-05 16:12:04 -07:00
mbecker20
232dc0bb4e deploy 2.0.0-dev-33 2025-10-05 14:59:23 -07:00
mbecker20
0cc0ee2aab load periphery_public_key 2025-10-05 14:58:52 -07:00
mbecker20
edebe925ff add km maintenance tasks aliases 2025-10-05 14:50:20 -07:00
mbecker20
5fd45bbc7b deploy 2.0.0-dev-32 2025-10-05 14:39:09 -07:00
mbecker20
0a490dadb2 rotation maintenance execution doesn't make individual updates 2025-10-05 14:38:45 -07:00
mbecker20
23847c15bc deploy 2.0.0-dev-31 2025-10-05 14:21:18 -07:00
mbecker20
0d238aee4f onboarding create_builder 2025-10-05 14:20:58 -07:00
mbecker20
98ad6cf5fa create update use uppercase 2025-10-05 13:41:42 -07:00
mbecker20
e35b81630b deploy 2.0.0-dev-30 2025-10-05 13:30:47 -07:00
mbecker20
1215852fe4 onboarding set Server tags 2025-10-05 13:24:38 -07:00
mbecker20
4164b76ff5 onboarded server needs to be enabled 2025-10-05 12:36:04 -07:00
mbecker20
26a9daffeb deploy 2.0.0-dev-29 2025-10-05 05:49:29 -07:00
mbecker20
8bb9f16e9b onboarding save copy server selection 2025-10-05 05:47:51 -07:00
mbecker20
b6eaf76497 Include templates in onboarding selector 2025-10-05 05:44:58 -07:00
mbecker20
073893da0e deploy 2.0.0-dev-28 2025-10-05 05:18:48 -07:00
mbecker20
e71547f1c2 configure server onboarding key 2025-10-05 05:17:56 -07:00
mbecker20
1991627990 move periphery public key to Server info (keep it out of resource sync) 2025-10-05 04:18:59 -07:00
mbecker20
3434d827a3 deploy 2.0.0-dev-27 2025-10-05 02:57:58 -07:00
mbecker20
1ef8b9878a rotate all server keys task 2025-10-05 02:57:27 -07:00
mbecker20
07ddaa8377 tweak 2025-10-05 01:41:44 -07:00
mbecker20
142c08cde4 deploy 2.0.0-dev-26 2025-10-05 01:19:21 -07:00
mbecker20
1aa1422faa periphery private key rotation 2025-10-05 01:18:56 -07:00
mbecker20
1394e8a6b1 Rotate Server private keys 2025-10-05 00:54:56 -07:00
mbecker20
420ee10211 tweaks 2025-10-04 23:59:14 -07:00
mbecker20
e918461dc5 refine onboarding key 2025-10-04 23:36:37 -07:00
mbecker20
4dc9ca27be refactor Periphery onboarding 2025-10-04 16:43:02 -07:00
mbecker20
f49b186f2f consolidate periphery docker apis into single mod 2025-10-04 16:17:32 -07:00
mbecker20
6e039b41f1 deploy 2.0.0-dev-25 2025-10-03 17:51:46 -07:00
mbecker20
e7cd77b022 tweaks 2025-10-03 17:06:14 -07:00
mbecker20
556cbd04c7 server onboarding flow using onboarding key 2025-10-03 17:01:58 -07:00
mbecker20
4e3d181466 default documented setup now uses Periphery -> Core setup 2025-10-03 12:55:06 -07:00
mbecker20
5d4326f46f NOT_FOUND if server not found 2025-10-03 03:17:37 -07:00
mbecker20
4bb486ad0a deploy 2.0.0-dev-24 2025-10-03 02:30:20 -07:00
mbecker20
d29c5112d8 Confirm server public key flow 2025-10-03 02:29:53 -07:00
mbecker20
d41315b8a4 don't navigate to /login for network errors 2025-10-03 01:58:23 -07:00
mbecker20
847404388c deploy 2.0.0-dev-23 2025-10-03 00:48:11 -07:00
mbecker20
eef8ec59b8 deploy 2.0.0-dev-22 2025-10-03 00:19:43 -07:00
mbecker20
9eb32f9ff5 store attempted public keys 2025-10-03 00:13:55 -07:00
mbecker20
859bfe67ef Improve Core side connection handling and fix Periphery -> Core error report 2025-10-02 23:03:58 -07:00
mbecker20
21ea469cd4 add login message 2 sec timeout 2025-10-02 16:00:45 -07:00
mbecker20
7fb902b892 deploy 2.0.0-dev-21 2025-10-02 03:12:59 -07:00
mbecker20
c9c4ac47ee fix clippy 2025-10-02 02:34:23 -07:00
mbecker20
f228cd31f3 deploy 2.0.0-dev-20 2025-10-02 02:33:33 -07:00
mbecker20
4feecb4b97 write key pem files by default when not otherwise provided. 2025-10-02 02:32:13 -07:00
mbecker20
e2680d0942 fix deploy 2025-10-01 21:35:27 -07:00
mbecker20
7422c0730d deploy 2.0.0-dev-19 2025-10-01 21:27:59 -07:00
mbecker20
37ac0dc7e3 update deploy 2025-10-01 21:17:43 -07:00
mbecker20
dccaca1df4 make sure not a config file before include as compose file 2025-10-01 20:32:52 -07:00
mbecker20
886aea4c36 deploy 2.0.0-dev-18 2025-10-01 19:48:40 -07:00
mbecker20
cbca070bae load keys from files 2025-10-01 19:41:32 -07:00
mbecker20
b4bdd401f6 fix unneeded base64 prefix 2025-10-01 02:36:26 -07:00
mbecker20
e546166240 use pkcs8 and spki for private / public key encoding, matching openssl 2025-10-01 02:25:41 -07:00
mbecker20
21689ce0ad periphery support same key gen functions 2025-09-29 23:32:47 -07:00
mbecker20
941787db64 slack client 0.5.0 2025-09-29 12:38:39 -07:00
mbecker20
d4b1aacac3 comment out 2025-09-29 02:21:07 -07:00
mbecker20
30f89461bf deploy 2.0.0-dev-17 2025-09-29 00:57:19 -07:00
mbecker20
a42d1397e9 back to bullseye (for max GLIBC compatibility) 2025-09-29 00:56:19 -07:00
mbecker20
b29313c28f deploy 2.0.0-dev-16 2025-09-29 00:47:17 -07:00
mbecker20
08a246a90c bullseye -> trixie 2025-09-29 00:46:51 -07:00
mbecker20
1a08df28d0 docs and config clean up 2025-09-29 00:06:35 -07:00
mbecker20
a226ffc256 fix json config load from interpolated 2025-09-28 23:20:04 -07:00
mbecker20
b385ee5ec3 start on docs update 2025-09-28 22:59:58 -07:00
mbecker20
c78c34357d remove unnecessary connected to core websocket log 2025-09-28 18:14:57 -07:00
mbecker20
4b7c692f00 deploy 2.0.0-dev-15 2025-09-28 18:02:18 -07:00
mbecker20
1ac98a096e bump async timing util to 1.1.0 to support for timelengths 2025-09-28 17:57:12 -07:00
mbecker20
281a2dc1ce first server configuration works with Periphery -> Core 2025-09-28 14:39:11 -07:00
mbecker20
0fe91378a6 tweak key gen output 2025-09-28 14:12:41 -07:00
mbecker20
11e76d1cf2 deploy 2.0.0-dev-14 2025-09-28 13:10:00 -07:00
mbecker20
a3bcd71105 simplify cache refresh with single periphery call 2025-09-28 13:05:45 -07:00
mbecker20
3ecc56dd76 clean up crypto provider install 2025-09-27 21:40:20 -07:00
mbecker20
7239cbb19b remove extra install crypto provider 2025-09-27 19:37:50 -07:00
mbecker20
a0540f7011 deploy 2.0.0-dev-13 2025-09-27 16:54:00 -07:00
mbecker20
37aea7605e gen types 2025-09-27 14:33:14 -07:00
mbecker20
78be913541 fix stuff after main rebase 2025-09-27 14:26:58 -07:00
mbecker20
c34f5ebf49 update config and compose envs 2025-09-27 14:23:49 -07:00
mbecker20
e5822cefb8 clean up socket handling 2025-09-27 14:23:49 -07:00
mbecker20
4baab194cf centralize the terminal stuff 2025-09-27 14:23:49 -07:00
mbecker20
a896583da6 deploy 2.0.0-dev-12 2025-09-27 14:23:49 -07:00
mbecker20
7b2674c38b deploy 2.0.0-dev-11 2025-09-27 14:23:42 -07:00
mbecker20
d1e32989e3 allow any number of simultaneous inbound / outbound connections (to different Cores) 2025-09-27 14:23:36 -07:00
mbecker20
e802bb3882 periphery support multiple core_public_keys 2025-09-27 14:23:36 -07:00
mbecker20
27a38b1bf5 periphery support multiple simultaneous core connections 2025-09-27 14:23:36 -07:00
mbecker20
2bc8a754be clean up passkey login 2025-09-27 14:23:36 -07:00
mbecker20
7a2a54bec1 dev-10 2025-09-27 14:23:36 -07:00
mbecker20
6a15150d59 don't cleanup server type builders 2025-09-27 14:23:31 -07:00
mbecker20
1b1dca76da deploy 2.0.0-dev-9 2025-09-27 14:23:31 -07:00
mbecker20
a032f0f4ff move system info to server cache 2025-09-27 14:23:25 -07:00
mbecker20
2749d49435 Core -> Periphery connection prefers noise handshake if 'core_public_key' is set 2025-09-27 14:23:25 -07:00
mbecker20
d88e42ef2d add specific server passkey support back 2025-09-27 14:23:25 -07:00
mbecker20
a370e7d121 support passkey auth for Core -> Periphery connection to remove the breaking change 2025-09-27 14:23:25 -07:00
mbecker20
d139ad2b3d always fallback to core config 'periphery_public_key' 2025-09-27 14:23:25 -07:00
mbecker20
8d2d180398 deploy 2.0.0-dev-8 2025-09-27 14:22:48 -07:00
mbecker20
37ca4ca986 fix server update hang 2025-09-27 14:22:42 -07:00
mbecker20
33e73b8543 use warn log 2025-09-27 14:22:42 -07:00
mbecker20
cf6e36e90c periphery server avoid auth fail log spam 2025-09-27 14:22:42 -07:00
mbecker20
9eb8b32f4a create and delete connections on demand 2025-09-27 14:22:42 -07:00
mbecker20
b400add6f1 deploy 2.0.0-dev-7 2025-09-27 14:22:41 -07:00
mbecker20
24adb89d25 execute container exec waits a bit for terminal to init before sending command 2025-09-27 14:22:36 -07:00
mbecker20
4674b2badb deploy 2.0.0-dev-6 2025-09-27 14:22:36 -07:00
mbecker20
65d1a69cb9 Mount ExecuteContainerExec periphery api 2025-09-27 14:22:27 -07:00
mbecker20
0da5718991 store connection channels under the connection 2025-09-27 14:22:27 -07:00
mbecker20
6b26cd120c simplify most of periphery client into bin/core 2025-09-27 14:22:27 -07:00
mbecker20
28e1bb19a4 deploy 2.0.0-dev-5 2025-09-27 14:22:27 -07:00
mbecker20
166107ac07 bail_if_not_connected 2025-09-27 14:22:21 -07:00
mbecker20
d77201880f dashboard Active include GlobalAutoUpdate 2025-09-27 14:22:21 -07:00
mbecker20
1d7629e9b2 Update server address description and placeholders 2025-09-27 14:22:21 -07:00
mbecker20
198f690ca5 Got invalid public key: {public_key} 2025-09-27 14:22:21 -07:00
mbecker20
531c79a144 deploy 2.0.0-dev-4 2025-09-27 14:22:21 -07:00
mbecker20
d685862713 improve Core - Periphery auth error messages 2025-09-27 14:22:09 -07:00
mbecker20
af0f245b5b deploy 2.0.0-dev-3 2025-09-27 14:22:09 -07:00
mbecker20
cba36861b7 deploy 2.0.0-dev-2 2025-09-27 14:22:02 -07:00
mbecker20
2c2c1d47b4 dev-2 2025-09-27 14:22:02 -07:00
mbecker20
3a6b997241 Json and JsonPretty formatting 2025-09-27 14:21:54 -07:00
mbecker20
7122f79b9d add -f json option to key utils (for use with jquery etc. 2025-09-27 14:21:54 -07:00
mbecker20
9bcee8122b tweak 2025-09-27 14:21:54 -07:00
mbecker20
a49c98946e add copy pubkeys 2025-09-27 14:21:54 -07:00
mbecker20
7d222a7241 dev-1 2025-09-27 14:21:54 -07:00
mbecker20
33501dac3e fix Core -> Periphery reconnection 2025-09-27 14:21:44 -07:00
mbecker20
4675dfa736 improve the logging 2025-09-27 14:21:44 -07:00
mbecker20
0be51dc784 move core connection handlers into core binary 2025-09-27 14:21:44 -07:00
mbecker20
52453d1320 set default allowed periphery public key 2025-09-27 14:21:44 -07:00
mbecker20
25da97ac1a basic configure auth 2025-09-27 14:21:44 -07:00
mbecker20
02db5a11d3 pipe through core side private / public key handling 2025-09-27 14:21:44 -07:00
mbecker20
89a5272246 rename passkey -> private_key 2025-09-27 14:21:44 -07:00
mbecker20
ae51ea1ad6 Copy core public key 2025-09-27 14:21:44 -07:00
mbecker20
3bdb4bea16 Core includes public key in CoreInfo 2025-09-27 14:21:44 -07:00
mbecker20
677bb14b5d auth forward error 2025-09-27 14:21:44 -07:00
mbecker20
6700700a80 clean up websocket handlers with many params 2025-09-27 14:21:44 -07:00
mbecker20
996d4aa129 standardize server size header identifier extraction 2025-09-27 14:21:44 -07:00
mbecker20
75894a7282 wire through private keys 2025-09-27 14:21:44 -07:00
mbecker20
2a065edcf1 avoid looping periphery client error 2025-09-27 14:21:44 -07:00
mbecker20
6f3703acfb periphery client makes more sense 2025-09-27 14:21:44 -07:00
mbecker20
59e989ecdf noise library and cli key utilities 2025-09-27 14:21:44 -07:00
mbecker20
951ff34a9e abstract websocket handling implementations on both sides 2025-09-27 14:21:12 -07:00
mbecker20
2d83105500 clean up 2025-09-27 14:21:12 -07:00
mbecker20
3d455f5142 implement noise auth basic 2025-09-27 14:21:12 -07:00
mbecker20
01de8c4a9b use standardized websocket wrappers / traits 2025-09-27 14:21:12 -07:00
mbecker20
d5de338561 outbound connection mode working 2025-09-27 14:21:12 -07:00
mbecker20
58c1afb8ef add login draft for transport 2025-09-27 14:21:12 -07:00
mbecker20
230f357b5a everything over ws working 2025-09-27 14:21:12 -07:00
mbecker20
991c95fff0 execute basically working, still need to clear the response channel upon completion 2025-09-27 14:21:12 -07:00
mbecker20
f6243fe6b1 more cleanup 2025-09-27 14:21:12 -07:00
mbecker20
9feeccba6e container terminal over connection 2025-09-27 14:21:12 -07:00
mbecker20
673c7f3a6b multiplex requests + terminal over single WS 2025-09-27 14:21:12 -07:00
mbecker20
39f900d651 standardize and consolidate logic in transport lib 2025-09-27 14:21:12 -07:00
mbecker20
8a06a0d6ce is work 2025-09-27 14:21:12 -07:00
mbecker20
7789ee4f4a prog 2025-09-27 14:21:12 -07:00
mbecker20
0472b6a7f7 fix after 1.19.4 2025-09-27 14:21:12 -07:00
mbecker20
d1d2227d36 prog 2025-09-27 14:21:11 -07:00
mbecker20
cea7c5fc5e prog on ws connect 2025-09-27 14:21:11 -07:00
Maxwell Becker
34a9f8eb9e 1.19.5 (#846)
* start 1.19.5

* deploy 1.19.5-dev-1

* avoid execute_and_poll error when update is already complete or has no id

* improve image tagging customization

* 1.19.5 release
2025-09-27 13:29:16 -07:00
mbecker20
494d01aeed RequireAuth redirect when no jwt 2025-09-14 14:05:43 -07:00
mbecker20
084e2fec23 1.19.4 2025-09-14 12:32:58 -07:00
Maxwell Becker
98d72fc908 1.19.4 (#812)
* start 1.19.4

* deploy 1.19.4-dev-1

* try smaller binaries with cargo strip

* deploy 1.19.4-dev-2

* smaller binaries with cargo strip

* Fix Submit Dialog Button Behavior with 500 Errors on Duplicate Names (#819)

* Implement enhanced error handling and messaging for resource creation

* Implement improved error handling for resource creation across alerter, build, and sync

* Implement error handling improvements for resource copying and validation feedback

* Adjust error handling for resource creation to distinguish validation errors from unexpected system errors

* Refactor resource creation error handling by removing redundant match statements and simplifying the error propagation in multiple API modules.

* fmt

* bump indexmap

* fix account selector showing empty when account no longer found

* clean up theme logic, ensure monaco and others get up to date current theme

* enforce disable_non_admin_create for tags. Clean up status code responses

* update server cache concurrency controller

* deploy 1.19.4-dev-3

* Allow signing in by pressing enter (#830)

* Improve dialog overflow handling to prevent clipping of content (#828)

* Add Email notification entry to community.md (#824)

* Add clickable file path to show/hide file contents in StackInfo (#827)

* add clickable file path to show/hide file contents in StackInfo

Also added CopyButton due to the new functionality making the file path not selectable.

* Move clicking interaction to CardHeader

* Avoid sync edge cases of having toggle show function capturing showContents from outside

Co-authored-by: Maxwell Becker <49575486+mbecker20@users.noreply.github.com>

* Format previous change

* Add `default_show_contents` to `handleToggleShow`

---------

Co-authored-by: Maxwell Becker <49575486+mbecker20@users.noreply.github.com>

* deploy 1.19.4-dev-4

* avoid stake info ShowHideButton double toggle

* Allow multiple simultaneous Action runs for use with Args

* deploy 1.19.4-dev-5

* feat: persist all table sorting states including unsorted (#832)

- Always save sorting state to localStorage, even when empty/unsorted
- Fixes issue where 'unsorted' state was not persisted across page reloads
- Ensures consistent and predictable sorting behavior for all DataTable components

* autofocus on login username field (#837)

* Fix unnecessary auth queries flooding console on login page (#842)

* Refactor authentication error handling to use serror::Result and status codes

* Enable user query only when JWT is present

* Enable query execution in useRead only if JWT is present

* Revert backend auth changes - keep PR focused on frontend only

* Fix unnecessary API queries to unreachable servers flooding console (#843)

* Implement server availability checks in various components

* Refactor server availability check to ensure only healthy servers are identified

* cargo fmt

* fmt

* Auth error handling with status codes (#841)

* Refactor authentication error handling to use serror::Result and status codes

* Refactor error messages

* Refactor authentication error handling to include status codes and improve error messages

* clean up

* clean

* fmt

* invalid user id also UNAUTHORIZED

* deploy 1.19.4-dev-6

* deploy 1.19.4-dev-7

---------

Co-authored-by: Marcel Pfennig <82059270+MP-Tool@users.noreply.github.com>
Co-authored-by: jack <45038833+jackra1n@users.noreply.github.com>
Co-authored-by: Guten <ywzhaifei@gmail.com>
Co-authored-by: Paulo Roberto Albuquerque <paulora2405@gmail.com>
Co-authored-by: Lorenzo Farnararo <2814802+baldarn@users.noreply.github.com>
2025-09-14 12:32:06 -07:00
azrikahar
20ac04fae5 fix navigation link to users page via omnibar (#838) 2025-09-11 10:04:45 -07:00
Maxwell Becker
a65fd4dca7 1.19.3 (#792)
* start. 1.19.3

* deploy 1.19.3-dev-1

* repo state from db includes BuildRepo success

* clean up version mismatch text

* feat(containers): debounced search input and added filter by server name (#796)

* Fix cleaning Alerter resource whitelist / blacklist on resource delete re #581

* fmt

* Fix signup button not working correctly (#801)

* Improve route protection and authentication flow (#798)

* Improve route protection and authentication flow

* Cleanup

* fix: inconsistent behaviour of new resource create button (#800)

* fix monaco crashing with absolute path config files

* deploy 1.19.3-dev-2

* proofread config

* Fix #427

* deploy 1.19.3-dev-3

* poll logs use println

* Sync: Only show commit / execute when viewing pending tab

* Improve sync UX

* deploy 1.19.3-dev-4

* bold link

* remove claims about database resource usage.

* 1.19.3

---------

Co-authored-by: mbecker20 <max@mogh.tech>
Co-authored-by: Antonio Sarro <tech@antoniosarro.dev>
Co-authored-by: jack <45038833+jackra1n@users.noreply.github.com>
2025-09-05 13:41:58 -07:00
mbecker20
0873104b5a fmt 2025-08-31 19:13:14 -07:00
Maxwell Becker
9a7b6ebd51 1.19.2 (#764)
* 1.19.2-dev-0

* deploy 1.19.2-dev-1

* Add option to make run command detachable (#766)

* improve missing files log to include the missing paths

* bump mungos for urlencoding mongo creds

* Update permissioning.md - typo: "priviledges" -> "privileges" (#770)

* Add support for monaco-yaml and docker compose spec validatiaon (#772)

* deploy 1.19.2-dev-2

* on delete user, remove from all user groups

* fix Google login issues around `picture`

* unsafe_unsanitized_startup_config

* improve git provider support re #355

* should fix #468

* should fix exit code re #597

* deploy 1.19.2-dev-3

* fix container ports sorting (#776)

* missing serde default

* deploy 1.19.2-dev-4

* ensure git tokens trimmed in remote url

* Add link to Authentik support docs

* Fix incorrect commit branch when using linked repo re #634

* Better display container port ranges (#786)

* ensure build and sync also commit to correct branch. re #634

* deploy 1.19.2-dev-5

* Improve login form (#788)

* Use proper form for login, add autocomplete and names to input fields

* Do not return null if loading

* Remove unused function

* Cleanup and streamline

* improve login screen flash on reload

* first builder given same name as first server

* 1.19.2

---------

Co-authored-by: mbecker20 <max@mogh.tech>
Co-authored-by: Brian Bradley <brian.bradley.p@gmail.com>
Co-authored-by: Ravi Wolter-Krishan <rkn@gedikas.net>
Co-authored-by: Christopher Hoage <iam@chrishoage.com>
Co-authored-by: jack <45038833+jackra1n@users.noreply.github.com>
2025-08-31 19:08:45 -07:00
mbecker20
a4153fa28b fix UI showing Redeploy when its actually None 2025-08-24 14:30:43 -07:00
Maxwell Becker
e732da3b05 1.19.1 (#740)
* start 1.19.1

* deploy 1.19.1-dev-1

* Global Auto Update rustdoc

* support stack additional files

* deploy 1.19.1-dev-2

* Fe support additional file language detection

* fix tsc

* Fix: Example code blocks got interpreted as rust code, leading to compilation errors (#743)

* Enhanced Server Stats Dashboard with Performance Optimizations (#746)

* Improve the layout of server mini stats in the dashboard.

- Server stats and tags made siblings for clearer responsibilities
- Changed margin to padding
- Unreachable indicator made into an overlay of the stats

* feat: optimize dashboard server stats with lazy loading and smart server availability checks

- Add enabled prop to ServerStatsMini for conditional data fetching
- Implement server availability check (only fetch stats for Ok servers, not NotOk/Disabled)
- Prevent 500 errors by avoiding API calls to offline servers
- Increase polling interval from 10s to 15s and add 5s stale time
- Add useMemo for expensive calculations to reduce re-renders
- Add conditional overlay rendering for unreachable servers
- Only render stats when showServerStats preference is enabled

* fix: show disabled servers with overlay instead of hiding component

- Maintain consistent layout by showing disabled state overlay
- Prevent UX inconsistency where disabled servers disappeared entirely

* fix: show button height

* feat: add enhance card animations

* cleanup

* gen types

* deploy 1.19.1-dev-3

* add .ini

* deploy 1.19.1-dev-4

* simple configure action args as JSON

* server enabled actually defaults false

* SendAlert via Action / CLI

* fix clippy if let string

* deploy 1.19.1-dev-5

* improve cli ergonomics

* gen types and fix responses formatting

* Add RunStackService API implementing `docker compose run` (#732)

* Add RunStackService API implementing `docker compose run`

* Add working Procedure configuration

* Remove `km execute run` alias. Remove redundant ``#[serde(default)]` on `Option`.

* Refactor command from `String` to `Vec<String>`

* Implement proper shell escaping

* bump deps

* Update configuration.md - fix typo: "affect" -> "effect" (#747)

* clean up SendAlert doc

* deploy 1.19.1-dev-6

* env file args won't double pass env file

* deploy 1.19.1-dev-7

* Add Enter Key Support for Dialog Confirmations (#750)

* start 1.19.1

* deploy 1.19.1-dev-1

* Implement usePromptHotkeys for enhanced dialog interactions and UX

* Refactor usePromptHotkeys to enhance confirm button detection and improve UX

* Remove forceConfirmDialog prop from ActionWithDialog and related logic for cleaner implementation

* Add dialog descriptions to ConfirmUpdate and ActionWithDialog for better clarity and resolve warnings

* fix

* Restore forceConfirmDialog prop to ActionWithDialog for enhanced confirmation handling

* cleanup

* Remove conditional className logic from ConfirmButton

---------

Co-authored-by: mbecker20 <max@mogh.tech>

* Support complex file depency action resolution

* get FE compile

* deploy 1.19.1-dev-8

* implement additional file dependency configuration

* deploy 1.19.1-dev-9

* UI default file dependency None

* default additional file requires is None

* deploy 1.19.1-dev-10

* rename additional_files => config_files for clarity

* deploy 1.19.1-dev-11

* fix skip serializing if None

* deploy 1.19.1-dev-12

* stack file dependency toml parsing aliases

* fmt

* Add: Server Version Mismatch Warnings & Alert System (#748)

* start 1.19.1

* deploy 1.19.1-dev-1

* feat: implement version mismatch warnings in server UI
- Replace orange warning colors with yellow for better visibility
- Add version mismatch detection that shows warnings instead of OK status
Implement responsive "VERSION MISMATCH" badge layout
- Update server dashboard to include warning counts
- Add backend version comparison logic for GetServersSummary

* feat: add warning count to server summary and update backup documentation link

* feat: add server version mismatch alert handling and update server summary invalidation logic

* fix: correct version mismatch alert config and disabled server display

- Use send_version_mismatch_alerts instead of send_unreachable_alerts
- Show 'Unknown' instead of 'Disabled' for disabled server versions
- Remove commented VersionAlert and Alerts UI components
- Update version to 1.19.0

* cleanup

* Update TypeScript types after merge

* cleanup

* cleanup

* cleanup

* Add "ServerVersionMismatch" to alert types

* Adjust color classes for warning states and revert server update invalidation logic

---------

Co-authored-by: mbecker20 <max@mogh.tech>

* backend for build multi registry push support

* deploy 1.19.1-dev-13

* build multi registry configuration

* deploy 1.19.1-dev-14

* fix invalid tokens JSON

* DeployStackIfChanged restarts also update stack.info.deployed_contents

* update deployed services comments

* deploy 1.19.1-dev-15

* Enhance server monitoring with load average data and new server monitoring table (#761)

* add monitoring page

* initial table

* moving monitoring table to servers

* add cpu load average

* typeshare doesnt allow tuples

* fix GetHistoricalServerStats

* add loadAvg to the server monitoring table

* improve styling

* add load average chart

* multiple colors for average loads chart

* make load average chart line and non-stacked

* cleanup

* use server thresholds

* cleanup

* Change "Dependents:" to "Services:" in config file service dependency
selector

* deploy 1.19.1-dev-16

* 1.19.1

---------

Co-authored-by: mbecker20 <max@mogh.tech>
Co-authored-by: Marcel Pfennig <82059270+MP-Tool@users.noreply.github.com>
Co-authored-by: Brian Bradley <brian.bradley.p@gmail.com>
Co-authored-by: Ravi Wolter-Krishan <rkn@gedikas.net>
Co-authored-by: jack <45038833+jackra1n@users.noreply.github.com>
2025-08-24 12:51:04 -07:00
Marcel Pfennig
75ffbd559b Fix: Correct environment variable name for container stats polling rate (#752)
* docs(config): Update environment variable name and default value for container stats polling rate

* fix(config): Update default value for container stats polling rate to 30 seconds
2025-08-21 15:01:10 -07:00
mbecker20
cae80b43e5 fix ferret v2 migration link 2025-08-18 16:38:03 -07:00
mbecker20
d924a8ace4 fix ferret v2 upgrade link 2025-08-18 11:36:25 -07:00
Karl Woditsch
dcfad5dc4e docs(docker-compose): Fix obsolete repo-cache volume declaration (#741) 2025-08-18 11:29:38 -07:00
mbecker20
134d1697e9 include backups path in env / yaml 2025-08-18 10:46:20 -07:00
mbecker20
3094d0036a edit cli docs 2025-08-17 21:00:04 -07:00
mbecker20
ee5fd55cdb first server commented out in default config 2025-08-17 18:39:27 -07:00
mbecker20
0ca126ff23 fix broken docs links before publish 2025-08-17 18:21:01 -07:00
Maxwell Becker
2fa9d9ecce 1.19.0 (#722)
* start 1.18.5

* prevent empty additional permission check (ie for new resources)

* dev-2

* bump rust to 1.88

* tweaks

* repo based stack commit happens from core repo cache rather than on server to simplify

* clippy auto fix

* clippy lints periphery

* clippy fix komodo_client

* dev-3

* emphasize ferret version pinning

* bump svi with PR fix

* dev-4

* webhook disabled early return

* Fix missing alert types for whitelist

* add "ScheduleRun"

* fix status cache not cleaning on resource delete

* dev-5

* forgot to pipe through poll in previous refactor

* refetch given in ms

* fix configure build extra args

* reorder resource sync config

* Implement ability to run actions at startup (#664)

* Implement ability to run actions at startup

* run post-startup actions after server is listening

* startup use action query

* fmt

* Fix Google Login enabled message (#668)

- it was showing "Github Login" instead of "Google Login"

* Allow CIDR ranges in Allowed IPs (#666)

* Allow CIDR ranges in Allowed IPs

* Catch mixed IPv4/IPv6 mappings that are probably intended to match

* forgiving vec

* dev-6

* forgiving vec log. allowed ips docs

* server stats UI: move current disk breakdown above charts

* searchable container stats, toggle collaple container / disk sections

* Add Clear repo cache method

* fix execute usage docs

* Komodo managed env-file should take precedence in all cases (ie come last in env file list)

* tag include unused flag for future use

* combine users page search

* util backup / restore

* refactor backup/restore duplication

* cleanup restore

* core image include util binary

* dev-7

* back to LinesCodec

* dev-8

* clean up

* clean up logs

* rename to komodo-util

* dev-9

* enable_fance_toml

* dev-10 enable fancy toml

* add user agent to oidc requests (#701)

Co-authored-by: eleith <online-github@eleith.com>

* fmt

* use database library

* clippy lint

* consolidate and standardize cli

* dev-11

* dev-12 implement backup using cli

* dev-13 logs

* command variant fields need to be #[arg]

* tweak cli

* gen client

* fix terminal reconnect issue

* rename cli to `km`

* tweaks for the cli logs

* wait for enter on --yes empty println

* fix --yes

* dev-15

* bump deps

* update croner to latest, use static parser

* dev-16

* cli execute polls updates until complete before logging

* remove repo cache mount

* cli nice

* /backup -> /backups

* dev-17 config loading preserves CONFIG_PATHS precedence

* update dockerfile default docker cli config keywords

* dev-18

* support .kmignore

* add ignores log

* Implement automatic backup pruning, default 14 backups before prune

* db copy / restore uses idempotent upsert

* cli update variable - "km set var VAR value"

* improve cli initial logs

* time the executions

* implement update for most resources

* dev 20

* add update page

* dev 21 support cli update link

* dev-22 test the deploy

* dev-23 use indexmap

* install-cli.py

* Frontend mobile fixes (#714)

* Allow ResourcePageHeader items to wrap

* Allow CardHeader items to wrap

* Increase z-index of sticky TableHeader, fixes #690

* Remove fixed widths from ActionButton, let them flex more to fit more layouts

* Make Section scroll overflow

* Remove grid class from Tabs, seems to prevent them from overflowing at small sizes

* deploy 1.18.5-dev-24

* auto version increment and deploy

* cli: profiles support aliases and merge on top of Default (root) config

* fix page set titles

* rust 1.89 and improve config logs

* skip serializing for proper merge

* fix clippy lints re 1.89

* remove layouts overflow-x-scroll

* deploy 1.18.5-dev-25

* 1.89 docker images not ready yet

* km cfg -a (print all profiles)

* include commit variables

* skip serializing profiles when empty

* skip serialize default db / log configs

* km cfg --debug print mode

* correct defaults for CLI and only can pass restore folder from cli arg

* some more skip serialization

* db restore / copy index optional

* add runfile command aliases

* remove second schedule updating loop, can causes some schedules to be missed

* deploy 1.18.5-dev-26

* add log when target db indexing disabled

* cli: user password reset, update user super admin

* Add manual network interface configuration for multi-NIC Docker environments (#719)

* Add iproute2 to debian-debs

* feat: Add manual network interface configuration for multi-NIC support

Complete implementation of manual interface configuration:
- Add internet_interface config option
- Implement manual gateway routing
- Add NET_ADMIN capability requirement
- Clean up codebase changes

* fix: Update internet interface handling for multi-NIC support

* refactor: Enhance error messages and logging in networking module

* refactor: Simplify interface argument handling and improve logging in network configuration and cleanup

* refactor(network): simplify startup integration and improve error handling

- Move config access and error handling into network::configure_internet_gateway()
- Simplify startup.rs to single function call without parameters
- Remove redundant check_network_privileges() function
- Improve error handling by checking actual command output instead of pre-validation
- Better separation of concerns between startup and network modules

Addresses feedback from PR discussion:
https://github.com/moghtech/komodo/pull/719#discussion_r2261542921

* fix(config): update default internet interface setting
Addresses feedback from PR discussion:
https://github.com/moghtech/komodo/pull/719#discussion_r2261552279

* fix(config): remove custom default for internet interface in CoreConfig

* move mod.rs -> network.rs
Addresses feedback from PR discussion:
https://github.com/moghtech/komodo/pull/719#discussion_r2261558332

* add internet interface example

* docs(build-images): document multi-platform builds with Docker Buildx (#721)

* docs(build-images): add multi-platform buildx guide to builders.md

* docs(build-images): add multi-platform buildx guide and clarify platform selection in Komodo UI Extra Args field

* move to 1.19.0

* core support reading from multiple config files

* config support yaml

* deploy 1.19.0-dev-1

* deploy 1.19.0-dev-2

* add default komodo cli config

* better config merge with base

* no need to panic if empty config paths

* improve km --help

* prog on cli docs

* tweak cli docs

* tweak doc

* split the runfile commands

* update docsite deps

* km ps initial

* km ls

* list resource apis

* km con inspect

* deploy 1.19.0-dev-3

* fix: need serde default

* dev-4 fix container parsing issue

* tweak

* use include-based file finding for much faster discovery

* just move to standard config dir .config/komodo/komodo.cli.*

* update fe w/ new contianer info minimal serialization

* add links to table names

* deploy 1.19.0-dev-5

* links in tables

* backend for Action arguments

* deploy 1.19.0-dev-6

* deploy 1.19.0-dev-7

* deploy 1.19.0-dev-8

* no space at front of KeyValue default args

* webhook branch / body optional

* The incoming arguments

* deploy 1.19.0-dev-9

* con -> cn

* add config -> cf alias

* .kmignore

* .peripheryinclude

* outdated

* optional links, configurable table format

* table_format -> table_borders

* get types

* include docsite in yarn install

* update runnables command in docs

* tweak

* improve km ls only show important stuff

* Add BackupCoreDatabase

* deploy 1.19.0-dev-10

* backup command needs "--yes"

* deploy 1.19.0-dev-11

* update rustc 1.89.0

* cli tweak

* try chef

* Fix chef (after dependencies)

* try other compile command

* fix

* fix comment

* cleanup stats page

* ensure database backup procedure

* UI allow configure Backup Core Database in Procedures

* procedure description

* deploy 1.19.0-dev-12

* deploy 1.19.0-dev-13

* GlobalAutoUpdate

* deploy 1.19.0-dev-14

* default tags and global auto update procedure

* deploy 1.19.0-dev-15

* trim the default procedure descriptions

* deploy 1.19.0-dev-16

* in "system" theme, also poll for updates to the theme based on time.

* Add next run to Action / Procedure column

* km ls support filter by templates

* fix procedure toml serialization when params = {}

* deploy 1.19.0-dev-17

* KOMODO_INIT_ADMIN_USERNAME

* KOMODO_FIRST_SERVER_NAME

* add server.config.external_address for use with links

* deploy 1.19.0-dev-18

* improve auto prune

* fix system theme auto update

* deploy 1.19.0-dev-19

* rename auth/CreateLocalUser -> SignUpLocalUser. Add write/CreateLocalUser for in-ui initialization.

* deploy 1.19.0-dev-20

* UI can handle multiple active logins

* deploy 1.19.0-dev-21

* fix

* add logout function

* fix oauth redirect

* fix multi user exchange token function

* default external address

* just Add

* style account switcher

* backup and restore docs

* rework docsite file / sidebar structure, start auto update docs

* auto update docs

* tweak

* fix doc links

* only pull / update running stacks / deployments images

* deploy 1.19.0-dev-22

* deploy 1.19.0-dev-23

* fix #737

* community docs

* add BackupCoreDatabase link to docs

* update ferret v2 update guide using komodo-cli

* fix data table headers overlapping topbar

* don't alert when deploying

* CommitSync returns Update

* deploy 1.19.0-dev-24

* trim the decoded branch

* action uses file contents deserializer

* deploy 1.19.0-dev-25

* remove Toml from action args format

* clarify External Address purpose

* Fix podman compatibility in `get_container_stats` (#739)

* Add podman compability for querying stats

Podman and docker stats differ in results in significant ways but this filter change they will output the same stats

* syntax fix

* feat(dashboard): display CPU, memory, and disk usage on server cards (#729)

* feat: mini-stats-card: Expose Server CPU , Memory, Disk Usage to Dashboard View

* comment: resolved

* Feat: fix overflow card , DRY stats-mini, add unreachable mini stats

* lint: fix

* deploy 1.19.0-dev-26

* 1.19.0

* linux, macos container install

* cli main config

---------

Co-authored-by: Brian Bradley <brian.bradley.p@gmail.com>
Co-authored-by: Daniel <daniel.barabasa@gmail.com>
Co-authored-by: eleith <eleith@users.noreply.github.com>
Co-authored-by: eleith <online-github@eleith.com>
Co-authored-by: Sam Edwards <sam@samedwards.ca>
Co-authored-by: Marcel Pfennig <82059270+MP-Tool@users.noreply.github.com>
Co-authored-by: itsmesid <693151+arevindh@users.noreply.github.com>
Co-authored-by: mbecker20 <max@mogh.tech>
Co-authored-by: Rhyn <Rhyn@users.noreply.github.com>
Co-authored-by: Anh Nguyen <tuananh131001@gmail.com>
2025-08-17 17:25:45 -07:00
Maxwell Becker
118ae9b92c 1.18.4 (#604)
* update easy deps

* update otel deps

* implement template in types + update resource meta

* ts types

* dev-2

* dev-3 default template query is include

* Toggle resource is template in resource header

* dev-4 support CopyServer

* gen ts

* style template selector in New Resource menu

* fix new menu show 0

* add template market in omni search bar

* fix some dynamic import behavior

* template badge on dashboard

* dev-5

* standardize interpolation methods with nice api

* core use new interpolation methods

* refactor git usage

* dev-6 refactor interpolation / git methods

* fix pull stack passed replacers

*  new types

* remove redundant interpolation for build secret args

* clean up periphery docker client

* dev-7 include ports in container summary, see if they actually come through

* show container ports in container table

* refresh processes without tasks (more efficient)

* dev-8 keep container stats cache, include with ContainerListItem

* gen types

* display more container ports

* dev-9 fix repo clone when repo doesn't exist initially

* Add ports display to more spots

* fix function name

* add Periphery full container stats api, may be used later

* server container stats list

* dev-10

* 1.18.4 release

* Use reset instead of invalidate to fix GetUser spam on token expiry (#618)

---------

Co-authored-by: Jacky Fong <hello@huzky.dev>
2025-06-24 16:32:39 -07:00
Luke
2205a81e79 Update webhooks.md (#611) 2025-06-20 11:56:05 -07:00
mbecker20
e2280f38df fix: allow Build / Repo add Attach permission 2025-06-16 00:40:24 -07:00
Maxwell Becker
545196d7eb 1.18.3 (#603)
* start 1.18.3 branch

* git::pull will fetch before checkout

* dev-2

* 1.18.3 quick release
2025-06-15 23:45:50 -07:00
577 changed files with 53102 additions and 40878 deletions

2
.gitignore vendored
View File

@@ -1,6 +1,7 @@
target
node_modules
dist
deno.lock
.env
.env.development
.DS_Store
@@ -9,5 +10,4 @@ dist
/frontend/build
/lib/ts_client/build
creds.toml
.dev

1
.kminclude Normal file
View File

@@ -0,0 +1 @@
.dev

View File

@@ -3,8 +3,8 @@
"scope": "rust",
"prefix": "resolve",
"body": [
"impl Resolve<${1}, User> for State {",
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
"impl Resolve<${0}> for ${1} {",
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
"\t\ttodo!()",
"\t}",
"}"
@@ -15,9 +15,9 @@
"prefix": "static",
"body": [
"fn ${1}() -> &'static ${2} {",
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
"\t${3}.get_or_init(|| {",
"\t\t${0}",
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
"\t${0}.get_or_init(|| {",
"\t\ttodo!()",
"\t})",
"}"
]

2981
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,122 +8,142 @@ members = [
]
[workspace.package]
version = "1.18.2"
version = "2.0.0-dev-86"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/moghtech/komodo"
homepage = "https://komo.do"
[profile.release]
strip = "debuginfo"
[workspace.dependencies]
# LOCAL
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
environment_file = { path = "lib/environment_file" }
environment = { path = "lib/environment" }
interpolate = { path = "lib/interpolate" }
secret_file = { path = "lib/secret_file" }
formatting = { path = "lib/formatting" }
transport = { path = "lib/transport" }
database = { path = "lib/database" }
encoding = { path = "lib/encoding" }
response = { path = "lib/response" }
command = { path = "lib/command" }
config = { path = "lib/config" }
logger = { path = "lib/logger" }
cache = { path = "lib/cache" }
noise = { path = "lib/noise" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.5.0", default-features = false }
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
serror = { version = "0.5.3", default-features = false }
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
async_timing_util = "1.0.0"
async_timing_util = "1.1.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "2.0.1"
mongo_indexed = "2.0.2"
resolver_api = "3.0.0"
toml_pretty = "1.1.2"
mungos = "3.2.0"
svi = "1.0.1"
toml_pretty = "1.2.0"
mungos = "3.2.2"
svi = "1.2.0"
# ASYNC
reqwest = { version = "0.12.20", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.45.1", features = ["full"] }
tokio-util = { version = "0.7.15", features = ["io", "codec"] }
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.48.0", features = ["full"] }
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
tokio-stream = { version = "0.1.17", features = ["sync"] }
pin-project-lite = "0.2.16"
futures = "0.3.31"
futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.10.1", features = ["typed-header"] }
tower-http = { version = "0.6.4", features = ["fs", "cors"] }
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.10.3", features = ["typed-header"] }
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
# SER/DE
indexmap = { version = "2.9.0", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
strum = { version = "0.27.1", features = ["derive"] }
serde_json = "1.0.140"
serde_yaml = "0.9.34"
ipnetwork = { version = "0.21.1", features = ["serde"] }
indexmap = { version = "2.12.0", features = ["serde"] }
serde = { version = "1.0.227", features = ["derive"] }
strum = { version = "0.27.2", features = ["derive"] }
bson = { version = "2.15.0" } # must keep in sync with mongodb version
serde_yaml_ng = "0.10.0"
serde_json = "1.0.145"
serde_qs = "0.15.0"
toml = "0.8.22"
toml = "0.9.8"
url = "2.5.7"
# ERROR
anyhow = "1.0.98"
thiserror = "2.0.12"
anyhow = "1.0.100"
thiserror = "2.0.17"
# LOGGING
opentelemetry-otlp = { version = "0.29.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.19", features = ["json"] }
opentelemetry-semantic-conventions = "0.29.0"
tracing-opentelemetry = "0.30.0"
opentelemetry = "0.29.1"
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.20", features = ["json"] }
opentelemetry-semantic-conventions = "0.31.0"
tracing-opentelemetry = "0.32.0"
opentelemetry = "0.31.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.38", features = ["derive"] }
clap = { version = "4.5.50", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.17.0", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "9.3.1", default-features = false }
openidconnect = "4.0.0"
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "10.1.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.34", features = ["aws-lc-rs"] }
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
openidconnect = "4.0.1"
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.17.0"
bcrypt = "0.17.1"
base64 = "0.22.1"
rustls = "0.23.27"
pkcs8 = "0.10.2"
snow = "0.10.0"
hmac = "0.12.1"
sha1 = "0.10.6"
sha2 = "0.10.9"
rand = "0.9.1"
rand = "0.9.2"
hex = "0.4.3"
spki = "0.7.3"
der = "0.7.10"
# SYSTEM
hickory-resolver = "0.25.2"
portable-pty = "0.9.0"
bollard = "0.19.0"
sysinfo = "0.35.1"
shell-escape = "0.1.5"
crossterm = "0.29.0"
bollard = "0.19.3"
sysinfo = "0.37.1"
shlex = "1.3.0"
# CLOUD
aws-config = "1.6.3"
aws-sdk-ec2 = "1.134.0"
aws-credential-types = "1.2.3"
aws-config = "1.8.8"
aws-sdk-ec2 = "1.176.0"
aws-credential-types = "1.2.8"
## CRON
english-to-cron = "0.1.6"
chrono-tz = "0.10.3"
chrono = "0.4.41"
croner = "2.1.0"
chrono-tz = "0.10.4"
chrono = "0.4.42"
croner = "3.0.0"
# MISC
async-compression = { version = "0.4.32", features = ["tokio", "gzip"] }
derive_builder = "0.20.2"
comfy-table = "7.2.1"
typeshare = "1.0.4"
octorust = "0.10.0"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.0.0"
regex = "1.11.1"
bytes = "1.10.1"
bson = "2.15.0"
regex = "1.12.2"

2
action/build.ts Normal file
View File

@@ -0,0 +1,2 @@
import { run } from "./run.ts";
await run("build-komodo");

5
action/deno.json Normal file
View File

@@ -0,0 +1,5 @@
{
"imports": {
"@std/toml": "jsr:@std/toml"
}
}

4
action/deploy-fe.ts Normal file
View File

@@ -0,0 +1,4 @@
const cmd = "km run -y action deploy-komodo-fe-change";
new Deno.Command("bash", {
args: ["-c", cmd],
}).spawn();

2
action/deploy.ts Executable file
View File

@@ -0,0 +1,2 @@
import { run } from "./run.ts";
await run("deploy-komodo");

52
action/run.ts Normal file
View File

@@ -0,0 +1,52 @@
import * as TOML from "@std/toml";
export const run = async (action: string) => {
const branch = await new Deno.Command("bash", {
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
})
.output()
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
const prev_version = (
TOML.parse(cargo_toml_str) as {
workspace: { package: { version: string } };
}
).workspace.package.version;
const [version, tag, count] = prev_version.split("-");
const next_count = Number(count) + 1;
const next_version = `${version}-${tag}-${next_count}`;
await Deno.writeTextFile(
"Cargo.toml",
cargo_toml_str.replace(
`version = "${prev_version}"`,
`version = "${next_version}"`
)
);
// Cargo check first here to make sure lock file is updated before commit.
const cmd = `
cargo check
echo ""
git add --all
git commit --all --message "deploy ${version}-${tag}-${next_count}"
echo ""
git push
echo ""
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
`
.split("\n")
.map((line) => line.trim())
.filter((line) => line.length > 0 && !line.startsWith("//"))
.join(" && ");
new Deno.Command("bash", {
args: ["-c", cmd],
}).spawn();
};

View File

@@ -1,7 +1,8 @@
## Builds the Komodo Core, Periphery, and Util binaries
## for a specific architecture.
FROM rust:1.87.0-bullseye AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./
@@ -10,21 +11,22 @@ COPY ./client/core/rs ./client/core/rs
COPY ./client/periphery ./client/periphery
COPY ./bin/core ./bin/core
COPY ./bin/periphery ./bin/periphery
COPY ./bin/util ./bin/util
COPY ./bin/cli ./bin/cli
# Compile bin
RUN \
cargo build -p komodo_core --release && \
cargo build -p komodo_periphery --release && \
cargo build -p komodo_util --release
cargo build -p komodo_cli --release && \
cargo strip
# Copy just the binaries to scratch image
FROM scratch
COPY --from=builder /builder/target/release/core /core
COPY --from=builder /builder/target/release/periphery /periphery
COPY --from=builder /builder/target/release/util /util
COPY --from=builder /builder/target/release/km /km
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Binaries"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -0,0 +1,36 @@
## Builds the Komodo Core, Periphery, and Util binaries
## for a specific architecture.
## Uses chef for dependency caching to help speed up back-to-back builds.
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
WORKDIR /builder
# Plan just the RECIPE to see if things have changed
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
RUN cargo install cargo-strip
COPY --from=planner /builder/recipe.json recipe.json
# Build JUST dependencies - cached layer
RUN cargo chef cook --release --recipe-path recipe.json
# NOW copy again (this time into builder) and build app
COPY . .
RUN \
cargo build --release --bin core && \
cargo build --release --bin periphery && \
cargo build --release --bin km && \
cargo strip
# Copy just the binaries to scratch image
FROM scratch
COPY --from=builder /builder/target/release/core /core
COPY --from=builder /builder/target/release/periphery /periphery
COPY --from=builder /builder/target/release/km /km
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Binaries"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -1,30 +1,39 @@
[package]
name = "komodo_cli"
description = "Command line tool to execute Komodo actions"
description = "Command line tool for Komodo"
version.workspace = true
edition.workspace = true
authors.workspace = true
license.workspace = true
homepage.workspace = true
repository.workspace = true
homepage.workspace = true
[[bin]]
name = "komodo"
name = "km"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
# komodo_client = "1.16.12"
environment_file.workspace = true
komodo_client.workspace = true
database.workspace = true
config.workspace = true
logger.workspace = true
noise.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true
futures.workspace = true
futures-util.workspace = true
comfy-table.workspace = true
tokio-util.workspace = true
serde_json.workspace = true
crossterm.workspace = true
serde_qs.workspace = true
wildcard.workspace = true
tracing.workspace = true
colored.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
chrono.workspace = true
tokio.workspace = true
serde.workspace = true
clap.workspace = true
envy.workspace = true

25
bin/cli/aio.Dockerfile Normal file
View File

@@ -0,0 +1,25 @@
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./
COPY ./lib ./lib
COPY ./client/core/rs ./client/core/rs
COPY ./client/periphery ./client/periphery
COPY ./bin/cli ./bin/cli
# Compile bin
RUN cargo build -p komodo_cli --release && cargo strip
# Copy binaries to distroless base
FROM gcr.io/distroless/cc
COPY --from=builder /builder/target/release/km /usr/local/bin/km
ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "km" ]
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -7,13 +7,13 @@ Can be used to move between MongoDB / FerretDB, or upgrade from FerretDB v1 to v
services:
copy_database:
image: ghcr.io/moghtech/komodo-util
image: ghcr.io/moghtech/komodo-cli
command: km database copy -y
environment:
MODE: CopyDatabase
SOURCE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
SOURCE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
```
@@ -90,13 +90,13 @@ services:
...(new database)
copy_database:
image: ghcr.io/moghtech/komodo-util
image: ghcr.io/moghtech/komodo-cli
command: km database copy -y
environment:
MODE: CopyDatabase
SOURCE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
SOURCE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
...(unchanged)
```

View File

@@ -14,14 +14,16 @@ FROM debian:bullseye-slim
WORKDIR /app
## Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
COPY --from=x86_64 /util /app/arch/linux/amd64
COPY --from=aarch64 /util /app/arch/linux/arm64
COPY --from=x86_64 /km /app/arch/linux/amd64
COPY --from=aarch64 /km /app/arch/linux/arm64
ARG TARGETPLATFORM
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/util && rm -r /app/arch
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/arch
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.description="Komodo Util"
LABEL org.opencontainers.image.licenses=GPL-3.0
ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "util" ]
CMD [ "km" ]
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses="GPL-3.0"

4
bin/cli/runfile.toml Normal file
View File

@@ -0,0 +1,4 @@
[install-cli]
alias = "ic"
description = "installs the komodo-cli, available on the command line as 'km'"
cmd = "cargo install --path ."

View File

@@ -0,0 +1,18 @@
## Assumes the latest binaries for the required arch are already built (by binaries.Dockerfile).
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
# This is required to work with COPY --from
FROM ${BINARIES_IMAGE} AS binaries
FROM gcr.io/distroless/cc
COPY --from=binaries /km /usr/local/bin/km
ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "km" ]
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -1,55 +0,0 @@
use clap::{Parser, Subcommand};
use komodo_client::api::execute::Execution;
use serde::Deserialize;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
pub struct CliArgs {
/// Sync or Exec
#[command(subcommand)]
pub command: Command,
/// The path to a creds file.
///
/// Note: If each of `url`, `key` and `secret` are passed,
/// no file is required at this path.
#[arg(long, default_value_t = default_creds())]
pub creds: String,
/// Pass url in args instead of creds file
#[arg(long)]
pub url: Option<String>,
/// Pass api key in args instead of creds file
#[arg(long)]
pub key: Option<String>,
/// Pass api secret in args instead of creds file
#[arg(long)]
pub secret: Option<String>,
/// Always continue on user confirmation prompts.
#[arg(long, short, default_value_t = false)]
pub yes: bool,
}
fn default_creds() -> String {
let home =
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
format!("{home}/.config/komodo/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]
pub enum Command {
/// Runs an execution
Execute {
#[command(subcommand)]
execution: Execution,
},
// Room for more
}
#[derive(Debug, Deserialize)]
pub struct CredsFile {
pub url: String,
pub key: String,
pub secret: String,
}

View File

@@ -0,0 +1,314 @@
use std::collections::{HashMap, HashSet};
use anyhow::Context;
use colored::Colorize;
use comfy_table::{Attribute, Cell, Color};
use futures_util::{
FutureExt, TryStreamExt, stream::FuturesUnordered,
};
use komodo_client::{
api::read::{
InspectDockerContainer, ListAllDockerContainers, ListServers,
},
entities::{
config::cli::args::container::{
Container, ContainerCommand, InspectContainer,
},
docker::{
self,
container::{ContainerListItem, ContainerStateStatusEnum},
},
},
};
use crate::{
command::{
PrintTable, clamp_sha, matches_wildcards, parse_wildcards,
print_items,
},
config::cli_config,
};
pub async fn handle(container: &Container) -> anyhow::Result<()> {
match &container.command {
None => list_containers(container).await,
Some(ContainerCommand::Inspect(inspect)) => {
inspect_container(inspect).await
}
}
}
async fn list_containers(
Container {
all,
down,
links,
reverse,
containers: names,
images,
networks,
servers,
format,
command: _,
}: &Container,
) -> anyhow::Result<()> {
let client = super::komodo_client().await?;
let (server_map, containers) = tokio::try_join!(
client
.read(ListServers::default())
.map(|res| res.map(|res| res
.into_iter()
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default(),
containers: Default::default(),
}),
)?;
// (Option<Server Name>, Container)
let containers = containers.into_iter().map(|c| {
let server = if let Some(server_id) = c.server_id.as_ref()
&& let Some(server) = server_map.get(server_id)
{
server
} else {
return (None, c);
};
(Some(server.name.as_str()), c)
});
let names = parse_wildcards(names);
let servers = parse_wildcards(servers);
let images = parse_wildcards(images);
let networks = parse_wildcards(networks);
let mut containers = containers
.into_iter()
.filter(|(server_name, c)| {
let state_check = if *all {
true
} else if *down {
!matches!(c.state, ContainerStateStatusEnum::Running)
} else {
matches!(c.state, ContainerStateStatusEnum::Running)
};
let network_check = matches_wildcards(
&networks,
&c.network_mode
.as_deref()
.map(|n| vec![n])
.unwrap_or_default(),
) || matches_wildcards(
&networks,
&c.networks.iter().map(String::as_str).collect::<Vec<_>>(),
);
state_check
&& network_check
&& matches_wildcards(&names, &[c.name.as_str()])
&& matches_wildcards(
&servers,
&server_name
.as_deref()
.map(|i| vec![i])
.unwrap_or_default(),
)
&& matches_wildcards(
&images,
&c.image.as_deref().map(|i| vec![i]).unwrap_or_default(),
)
})
.collect::<Vec<_>>();
containers.sort_by(|(a_s, a), (b_s, b)| {
a.state
.cmp(&b.state)
.then(a.name.cmp(&b.name))
.then(a_s.cmp(b_s))
.then(a.network_mode.cmp(&b.network_mode))
.then(a.image.cmp(&b.image))
});
if *reverse {
containers.reverse();
}
print_items(containers, *format, *links)?;
Ok(())
}
pub async fn inspect_container(
inspect: &InspectContainer,
) -> anyhow::Result<()> {
let client = super::komodo_client().await?;
let (server_map, mut containers) = tokio::try_join!(
client
.read(ListServers::default())
.map(|res| res.map(|res| res
.into_iter()
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default(),
containers: Default::default()
}),
)?;
containers.iter_mut().for_each(|c| {
let Some(server_id) = c.server_id.as_ref() else {
return;
};
let Some(server) = server_map.get(server_id) else {
c.server_id = Some(String::from("Unknown"));
return;
};
c.server_id = Some(server.name.clone());
});
let names = [inspect.container.to_string()];
let names = parse_wildcards(&names);
let servers = parse_wildcards(&inspect.servers);
let mut containers = containers
.into_iter()
.filter(|c| {
matches_wildcards(&names, &[c.name.as_str()])
&& matches_wildcards(
&servers,
&c.server_id
.as_deref()
.map(|i| vec![i])
.unwrap_or_default(),
)
})
.map(|c| async move {
client
.read(InspectDockerContainer {
container: c.name,
server: c.server_id.context("No server...")?,
})
.await
})
.collect::<FuturesUnordered<_>>()
.try_collect::<Vec<_>>()
.await?;
containers.sort_by(|a, b| a.name.cmp(&b.name));
match containers.len() {
0 => {
println!(
"{}: Did not find any containers matching '{}'",
"INFO".green(),
inspect.container.bold()
);
}
1 => {
println!("{}", serialize_container(inspect, &containers[0])?);
}
_ => {
let containers = containers
.iter()
.map(|c| serialize_container(inspect, c))
.collect::<anyhow::Result<Vec<_>>>()?
.join("\n");
println!("{containers}");
}
}
Ok(())
}
fn serialize_container(
inspect: &InspectContainer,
container: &docker::container::Container,
) -> anyhow::Result<String> {
let res = if inspect.state {
serde_json::to_string_pretty(&container.state)
} else if inspect.mounts {
serde_json::to_string_pretty(&container.mounts)
} else if inspect.host_config {
serde_json::to_string_pretty(&container.host_config)
} else if inspect.config {
serde_json::to_string_pretty(&container.config)
} else if inspect.network_settings {
serde_json::to_string_pretty(&container.network_settings)
} else {
serde_json::to_string_pretty(container)
}
.context("Failed to serialize items to JSON")?;
Ok(res)
}
// (Option<Server Name>, Container)
impl PrintTable for (Option<&'_ str>, ContainerListItem) {
fn header(links: bool) -> &'static [&'static str] {
if links {
&[
"Container",
"State",
"Server",
"Ports",
"Networks",
"Image",
"Link",
]
} else {
&["Container", "State", "Server", "Ports", "Networks", "Image"]
}
}
fn row(self, links: bool) -> Vec<Cell> {
let color = match self.1.state {
ContainerStateStatusEnum::Running => Color::Green,
ContainerStateStatusEnum::Paused => Color::DarkYellow,
ContainerStateStatusEnum::Empty => Color::Grey,
_ => Color::Red,
};
let mut networks = HashSet::new();
if let Some(network) = self.1.network_mode {
networks.insert(network);
}
for network in self.1.networks {
networks.insert(network);
}
let mut networks = networks.into_iter().collect::<Vec<_>>();
networks.sort();
let mut ports = self
.1
.ports
.into_iter()
.flat_map(|p| p.public_port.map(|p| p.to_string()))
.collect::<HashSet<_>>()
.into_iter()
.collect::<Vec<_>>();
ports.sort();
let ports = if ports.is_empty() {
Cell::new("")
} else {
Cell::new(format!(":{}", ports.join(", :")))
};
let image = self.1.image.as_deref().unwrap_or("Unknown");
let mut res = vec![
Cell::new(self.1.name.clone()).add_attribute(Attribute::Bold),
Cell::new(self.1.state.to_string())
.fg(color)
.add_attribute(Attribute::Bold),
Cell::new(self.0.unwrap_or("Unknown")),
ports,
Cell::new(networks.join(", ")),
Cell::new(clamp_sha(image)),
];
if !links {
return res;
}
let link = if let Some(server_id) = self.1.server_id {
format!(
"{}/servers/{server_id}/container/{}",
cli_config().host,
self.1.name
)
} else {
String::new()
};
res.push(Cell::new(link));
res
}
}

View File

@@ -0,0 +1,366 @@
use std::path::Path;
use anyhow::Context;
use colored::Colorize;
use database::mungos::mongodb::bson::{Document, doc};
use komodo_client::entities::{
config::cli::args::database::DatabaseCommand, optional_string,
};
use crate::{command::sanitize_uri, config::cli_config};
pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
match command {
DatabaseCommand::Backup { yes, .. } => backup(*yes).await,
DatabaseCommand::Restore {
restore_folder,
index,
yes,
..
} => restore(restore_folder.as_deref(), *index, *yes).await,
DatabaseCommand::Prune { yes, .. } => prune(*yes).await,
DatabaseCommand::Copy { yes, index, .. } => {
copy(*index, *yes).await
}
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
}
}
async fn backup(yes: bool) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} Utility 🦎",
"Komodo".bold(),
"Backup".green().bold()
);
println!(
"\n{}\n",
" - Backup all database contents to gzip compressed files."
.dimmed()
);
if let Some(uri) = optional_string(&config.database.uri) {
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) = optional_string(&config.database.address) {
println!("{}: {address}", " - Source Address".dimmed());
}
if let Some(username) = optional_string(&config.database.username) {
println!("{}: {username}", " - Source Username".dimmed());
}
println!(
"{}: {}\n",
" - Source Db Name".dimmed(),
config.database.db_name,
);
println!(
"{}: {:?}",
" - Backups Folder".dimmed(),
config.backups_folder
);
if config.max_backups == 0 {
println!(
"{}{}",
" - Backup pruning".dimmed(),
"disabled".red().dimmed()
);
} else {
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
}
crate::command::wait_for_enter("start backup", yes)?;
let db = database::init(&config.database).await?;
database::utils::backup(&db, &config.backups_folder).await?;
// Early return if backup pruning disabled
if config.max_backups == 0 {
return Ok(());
}
// Know that new backup was taken successfully at this point,
// safe to prune old backup folders
prune_inner().await
}
async fn restore(
restore_folder: Option<&Path>,
index: bool,
yes: bool,
) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} Utility 🦎",
"Komodo".bold(),
"Restore".purple().bold()
);
println!(
"\n{}\n",
" - Restores database contents from gzip compressed files."
.dimmed()
);
if let Some(uri) = optional_string(&config.database_target.uri) {
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) =
optional_string(&config.database_target.address)
{
println!("{}: {address}", " - Target Address".dimmed());
}
if let Some(username) =
optional_string(&config.database_target.username)
{
println!("{}: {username}", " - Target Username".dimmed());
}
println!(
"{}: {}",
" - Target Db Name".dimmed(),
config.database_target.db_name,
);
if !index {
println!(
"{}: {}",
" - Target Db Indexing".dimmed(),
"DISABLED".red(),
);
}
println!(
"\n{}: {:?}",
" - Backups Folder".dimmed(),
config.backups_folder
);
if let Some(restore_folder) = restore_folder {
println!("{}: {restore_folder:?}", " - Restore Folder".dimmed());
}
crate::command::wait_for_enter("start restore", yes)?;
let db = if index {
database::Client::new(&config.database_target).await?.db
} else {
database::init(&config.database_target).await?
};
database::utils::restore(
&db,
&config.backups_folder,
restore_folder,
)
.await
}
async fn prune(yes: bool) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} Utility 🦎",
"Komodo".bold(),
"Backup Prune".cyan().bold()
);
println!(
"\n{}\n",
" - Prunes database backup folders when greater than the configured amount."
.dimmed()
);
println!(
"{}: {:?}",
" - Backups Folder".dimmed(),
config.backups_folder
);
if config.max_backups == 0 {
println!(
"{}{}",
" - Backup pruning".dimmed(),
"disabled".red().dimmed()
);
} else {
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
}
// Early return if backup pruning disabled
if config.max_backups == 0 {
info!(
"Backup pruning is disabled, enabled using 'max_backups' (KOMODO_CLI_MAX_BACKUPS)"
);
return Ok(());
}
crate::command::wait_for_enter("start backup prune", yes)?;
prune_inner().await
}
async fn prune_inner() -> anyhow::Result<()> {
let config = cli_config();
let mut backups_dir =
match tokio::fs::read_dir(&config.backups_folder)
.await
.context("Failed to read backups folder for prune")
{
Ok(backups_dir) => backups_dir,
Err(e) => {
warn!("{e:#}");
return Ok(());
}
};
let mut backup_folders = Vec::new();
loop {
match backups_dir.next_entry().await {
Ok(Some(entry)) => {
let Ok(metadata) = entry.metadata().await else {
continue;
};
if metadata.is_dir() {
backup_folders.push(entry.path());
}
}
Ok(None) => break,
Err(_) => {
continue;
}
}
}
// Ordered from oldest -> newest
backup_folders.sort();
let max_backups = config.max_backups as usize;
let backup_folders_len = backup_folders.len();
// Early return if under the backup count threshold
if backup_folders_len <= max_backups {
info!("No backups to prune");
return Ok(());
}
let to_delete =
&backup_folders[..(backup_folders_len - max_backups)];
info!("Pruning old backups: {to_delete:?}");
for path in to_delete {
if let Err(e) =
tokio::fs::remove_dir_all(path).await.with_context(|| {
format!("Failed to delete backup folder at {path:?}")
})
{
warn!("{e:#}");
}
}
Ok(())
}
async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} Utility 🦎",
"Komodo".bold(),
"Copy".blue().bold()
);
println!(
"\n{}\n",
" - Copies database contents to another database.".dimmed()
);
if let Some(uri) = optional_string(&config.database.uri) {
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) = optional_string(&config.database.address) {
println!("{}: {address}", " - Source Address".dimmed());
}
if let Some(username) = optional_string(&config.database.username) {
println!("{}: {username}", " - Source Username".dimmed());
}
println!(
"{}: {}\n",
" - Source Db Name".dimmed(),
config.database.db_name,
);
if let Some(uri) = optional_string(&config.database_target.uri) {
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) =
optional_string(&config.database_target.address)
{
println!("{}: {address}", " - Target Address".dimmed());
}
if let Some(username) =
optional_string(&config.database_target.username)
{
println!("{}: {username}", " - Target Username".dimmed());
}
println!(
"{}: {}",
" - Target Db Name".dimmed(),
config.database_target.db_name,
);
if !index {
println!(
"{}: {}",
" - Target Db Indexing".dimmed(),
"DISABLED".red(),
);
}
crate::command::wait_for_enter("start copy", yes)?;
let source_db = database::init(&config.database).await?;
let target_db = if index {
database::Client::new(&config.database_target).await?.db
} else {
database::init(&config.database_target).await?
};
database::utils::copy(&source_db, &target_db).await
}
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} 🦎",
"Komodo".bold(),
"V1 Downgrade".purple().bold()
);
println!(
"\n{}\n",
" - Downgrade the database to V1 compatible data structures."
.dimmed()
);
if let Some(uri) = optional_string(&config.database.uri) {
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) = optional_string(&config.database.address) {
println!("{}: {address}", " - Address".dimmed());
}
if let Some(username) = optional_string(&config.database.username) {
println!("{}: {username}", " - Username".dimmed());
}
println!(
"{}: {}\n",
" - Db Name".dimmed(),
config.database.db_name,
);
crate::command::wait_for_enter("run downgrade", yes)?;
let db = database::init(&config.database).await?;
db.collection::<Document>("Server")
.update_many(doc! {}, doc! { "$set": { "info": null } })
.await
.context("Failed to downgrade Server schema")?;
info!(
"V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅"
);
Ok(())
}

View File

@@ -1,22 +1,25 @@
use std::time::Duration;
use colored::Colorize;
use futures_util::{StreamExt, stream::FuturesUnordered};
use komodo_client::{
api::execute::{BatchExecutionResponse, Execution},
entities::update::Update,
api::execute::{
BatchExecutionResponse, BatchExecutionResponseItem, Execution,
},
entities::{resource_link, update::Update},
};
use crate::{
helpers::wait_for_enter,
state::{cli_args, komodo_client},
};
use crate::config::cli_config;
pub enum ExecutionResult {
enum ExecutionResult {
Single(Box<Update>),
Batch(BatchExecutionResponse),
}
pub async fn run(execution: Execution) -> anyhow::Result<()> {
pub async fn handle(
execution: &Execution,
yes: bool,
) -> anyhow::Result<()> {
if matches!(execution, Execution::None(_)) {
println!("Got 'none' execution. Doing nothing...");
tokio::time::sleep(Duration::from_secs(3)).await;
@@ -25,7 +28,7 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
}
println!("\n{}: Execution", "Mode".dimmed());
match &execution {
match execution {
Execution::None(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -209,262 +212,299 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::BatchDestroyStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunStackService(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::TestAlerter(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::SendAlert(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::ClearRepoCache(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BackupCoreDatabase(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::GlobalAutoUpdate(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RotateAllServerKeys(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RotateCoreKeys(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
}
if !cli_args().yes {
wait_for_enter("run execution")?;
}
super::wait_for_enter("run execution", yes)?;
info!("Running Execution...");
let res = match execution {
Execution::RunAction(request) => komodo_client()
let client = super::komodo_client().await?;
let res = match execution.clone() {
Execution::RunAction(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchRunAction(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::RunProcedure(request) => komodo_client()
Execution::BatchRunAction(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::RunProcedure(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchRunProcedure(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::RunBuild(request) => komodo_client()
Execution::BatchRunProcedure(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::RunBuild(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchRunBuild(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CancelBuild(request) => komodo_client()
Execution::BatchRunBuild(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::CancelBuild(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::Deploy(request) => komodo_client()
Execution::Deploy(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchDeploy(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullDeployment(request) => komodo_client()
Execution::BatchDeploy(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::PullDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StartDeployment(request) => komodo_client()
Execution::StartDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RestartDeployment(request) => komodo_client()
Execution::RestartDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PauseDeployment(request) => komodo_client()
Execution::PauseDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::UnpauseDeployment(request) => komodo_client()
Execution::UnpauseDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StopDeployment(request) => komodo_client()
Execution::StopDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DestroyDeployment(request) => komodo_client()
Execution::DestroyDeployment(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchDestroyDeployment(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CloneRepo(request) => komodo_client()
Execution::BatchDestroyDeployment(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::CloneRepo(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchCloneRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullRepo(request) => komodo_client()
Execution::BatchCloneRepo(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::PullRepo(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchPullRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::BuildRepo(request) => komodo_client()
Execution::BatchPullRepo(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::BuildRepo(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchBuildRepo(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::CancelRepoBuild(request) => komodo_client()
Execution::BatchBuildRepo(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::CancelRepoBuild(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StartContainer(request) => komodo_client()
Execution::StartContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RestartContainer(request) => komodo_client()
Execution::RestartContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PauseContainer(request) => komodo_client()
Execution::PauseContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::UnpauseContainer(request) => komodo_client()
Execution::UnpauseContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StopContainer(request) => komodo_client()
Execution::StopContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DestroyContainer(request) => komodo_client()
Execution::DestroyContainer(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StartAllContainers(request) => komodo_client()
Execution::StartAllContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RestartAllContainers(request) => komodo_client()
Execution::RestartAllContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PauseAllContainers(request) => komodo_client()
Execution::PauseAllContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::UnpauseAllContainers(request) => komodo_client()
Execution::UnpauseAllContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StopAllContainers(request) => komodo_client()
Execution::StopAllContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneContainers(request) => komodo_client()
Execution::PruneContainers(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DeleteNetwork(request) => komodo_client()
Execution::DeleteNetwork(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneNetworks(request) => komodo_client()
Execution::PruneNetworks(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DeleteImage(request) => komodo_client()
Execution::DeleteImage(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneImages(request) => komodo_client()
Execution::PruneImages(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DeleteVolume(request) => komodo_client()
Execution::DeleteVolume(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneVolumes(request) => komodo_client()
Execution::PruneVolumes(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneDockerBuilders(request) => komodo_client()
Execution::PruneDockerBuilders(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneBuildx(request) => komodo_client()
Execution::PruneBuildx(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PruneSystem(request) => komodo_client()
Execution::PruneSystem(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RunSync(request) => komodo_client()
Execution::RunSync(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::CommitSync(request) => komodo_client()
Execution::CommitSync(request) => client
.write(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DeployStack(request) => komodo_client()
Execution::DeployStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchDeployStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::DeployStackIfChanged(request) => komodo_client()
Execution::BatchDeployStack(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::DeployStackIfChanged(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchDeployStackIfChanged(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::PullStack(request) => komodo_client()
Execution::BatchDeployStackIfChanged(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::PullStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchPullStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::StartStack(request) => komodo_client()
Execution::BatchPullStack(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::StartStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RestartStack(request) => komodo_client()
Execution::RestartStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::PauseStack(request) => komodo_client()
Execution::PauseStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::UnpauseStack(request) => komodo_client()
Execution::UnpauseStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::StopStack(request) => komodo_client()
Execution::StopStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::DestroyStack(request) => komodo_client()
Execution::DestroyStack(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BatchDestroyStack(request) => komodo_client()
Execution::BatchDestroyStack(request) => {
client.execute(request).await.map(ExecutionResult::Batch)
}
Execution::RunStackService(request) => client
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::TestAlerter(request) => komodo_client()
.map(|u| ExecutionResult::Single(u.into())),
Execution::TestAlerter(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::SendAlert(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::ClearRepoCache(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::BackupCoreDatabase(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::GlobalAutoUpdate(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RotateAllServerKeys(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RotateCoreKeys(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
@@ -480,13 +520,67 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
match res {
Ok(ExecutionResult::Single(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
poll_update_until_complete(&update).await
}
Ok(ExecutionResult::Batch(update)) => {
println!("\n{}: {update:#?}", "SUCCESS".green())
Ok(ExecutionResult::Batch(updates)) => {
let mut handles = updates
.iter()
.map(|update| async move {
match update {
BatchExecutionResponseItem::Ok(update) => {
poll_update_until_complete(update).await
}
BatchExecutionResponseItem::Err(e) => {
error!("{e:#?}");
Ok(())
}
}
})
.collect::<FuturesUnordered<_>>();
while let Some(res) = handles.next().await {
match res {
Ok(()) => {}
Err(e) => {
error!("{e:#?}");
}
}
}
Ok(())
}
Err(e) => {
error!("{e:#?}");
Ok(())
}
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
}
}
async fn poll_update_until_complete(
update: &Update,
) -> anyhow::Result<()> {
let link = if update.id.is_empty() {
let (resource_type, id) = update.target.extract_variant_id();
resource_link(&cli_config().host, resource_type, id)
} else {
format!("{}/updates/{}", cli_config().host, update.id)
};
println!("Link: '{}'", link.bold());
let client = super::komodo_client().await?;
let timer = tokio::time::Instant::now();
let update = client.poll_update_until_complete(&update.id).await?;
if update.success {
println!(
"FINISHED in {}: {}",
format!("{:.1?}", timer.elapsed()).bold(),
"EXECUTION SUCCESSFUL".green(),
);
} else {
eprintln!(
"FINISHED in {}: {}",
format!("{:.1?}", timer.elapsed()).bold(),
"EXECUTION FAILED".red(),
);
}
Ok(())
}

1217
bin/cli/src/command/list.rs Normal file

File diff suppressed because it is too large Load Diff

182
bin/cli/src/command/mod.rs Normal file
View File

@@ -0,0 +1,182 @@
use std::io::Read;
use anyhow::{Context, anyhow};
use chrono::TimeZone;
use colored::Colorize;
use comfy_table::{Attribute, Cell, Table};
use komodo_client::{
KomodoClient,
entities::config::cli::{CliTableBorders, args::CliFormat},
};
use serde::Serialize;
use tokio::sync::OnceCell;
use wildcard::Wildcard;
use crate::config::cli_config;
pub mod container;
pub mod database;
pub mod execute;
pub mod list;
pub mod terminal;
pub mod update;
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {
static KOMODO_CLIENT: OnceCell<KomodoClient> =
OnceCell::const_new();
KOMODO_CLIENT
.get_or_try_init(|| async {
let config = cli_config();
let (Some(key), Some(secret)) =
(&config.cli_key, &config.cli_secret)
else {
return Err(anyhow!(
"Must provide both cli_key and cli_secret"
));
};
KomodoClient::new(&config.host, key, secret)
.with_healthcheck()
.await
})
.await
}
fn wait_for_enter(
press_enter_to: &str,
skip: bool,
) -> anyhow::Result<()> {
if skip {
println!();
return Ok(());
}
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}
/// Sanitizes uris of the form:
/// `protocol://username:password@address`
fn sanitize_uri(uri: &str) -> String {
// protocol: `mongodb`
// credentials_address: `username:password@address`
let Some((protocol, credentials_address)) = uri.split_once("://")
else {
// If no protocol, return as-is
return uri.to_string();
};
// credentials: `username:password`
let Some((credentials, address)) =
credentials_address.split_once('@')
else {
// If no credentials, return as-is
return uri.to_string();
};
match credentials.split_once(':') {
Some((username, _)) => {
format!("{protocol}://{username}:*****@{address}")
}
None => {
format!("{protocol}://*****@{address}")
}
}
}
fn print_items<T: PrintTable + Serialize>(
items: Vec<T>,
format: CliFormat,
links: bool,
) -> anyhow::Result<()> {
match format {
CliFormat::Table => {
let mut table = Table::new();
let preset = {
use comfy_table::presets::*;
match cli_config().table_borders {
None | Some(CliTableBorders::Horizontal) => {
UTF8_HORIZONTAL_ONLY
}
Some(CliTableBorders::Vertical) => UTF8_FULL_CONDENSED,
Some(CliTableBorders::Inside) => UTF8_NO_BORDERS,
Some(CliTableBorders::Outside) => UTF8_BORDERS_ONLY,
Some(CliTableBorders::All) => UTF8_FULL,
}
};
table.load_preset(preset).set_header(
T::header(links)
.iter()
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
);
for item in items {
table.add_row(item.row(links));
}
println!("{table}");
}
CliFormat::Json => {
println!(
"{}",
serde_json::to_string_pretty(&items)
.context("Failed to serialize items to JSON")?
);
}
}
Ok(())
}
trait PrintTable {
fn header(links: bool) -> &'static [&'static str];
fn row(self, links: bool) -> Vec<Cell>;
}
fn parse_wildcards(items: &[String]) -> Vec<Wildcard<'_>> {
items
.iter()
.flat_map(|i| {
Wildcard::new(i.as_bytes()).inspect_err(|e| {
warn!("Failed to parse wildcard: {i} | {e:?}")
})
})
.collect::<Vec<_>>()
}
fn matches_wildcards(
wildcards: &[Wildcard<'_>],
items: &[&str],
) -> bool {
if wildcards.is_empty() {
return true;
}
items.iter().any(|item| {
wildcards.iter().any(|wc| wc.is_match(item.as_bytes()))
})
}
fn format_timetamp(ts: i64) -> anyhow::Result<String> {
let ts = chrono::Local
.timestamp_millis_opt(ts)
.single()
.context("Invalid ts")?
.format("%m/%d %H:%M:%S")
.to_string();
Ok(ts)
}
fn clamp_sha(maybe_sha: &str) -> String {
if maybe_sha.starts_with("sha256:") {
maybe_sha[0..20].to_string() + "..."
} else {
maybe_sha.to_string()
}
}
// fn text_link(link: &str, text: &str) -> String {
// format!("\x1b]8;;{link}\x07{text}\x1b]8;;\x07")
// }

View File

@@ -0,0 +1,334 @@
use anyhow::{Context, anyhow};
use colored::Colorize;
use komodo_client::{
api::{
read::{ListAllDockerContainers, ListServers},
terminal::InitTerminal,
},
entities::{
config::cli::args::terminal::{Attach, Connect, Exec},
server::ServerQuery,
terminal::{
ContainerTerminalMode, TerminalRecreateMode,
TerminalResizeMessage, TerminalStdinMessage,
},
},
ws::terminal::TerminalWebsocket,
};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio_util::sync::CancellationToken;
pub async fn handle_connect(
Connect {
server,
name,
command,
recreate,
}: &Connect,
) -> anyhow::Result<()> {
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_server_terminal(
server.to_string(),
Some(name.to_string()),
Some(InitTerminal {
command: command.clone(),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: None,
}),
)
.await
})
.await
}
pub async fn handle_exec(
Exec {
server,
container,
shell,
recreate,
}: &Exec,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: Some(shell.to_string()),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Exec),
}),
)
.await
})
.await
}
pub async fn handle_attach(
Attach {
server,
container,
recreate,
}: &Attach,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: None,
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Attach),
}),
)
.await
})
.await
}
async fn get_server(
server: Option<String>,
container: &str,
) -> anyhow::Result<String> {
if let Some(server) = server {
return Ok(server);
}
let client = super::komodo_client().await?;
let mut containers = client
.read(ListAllDockerContainers {
servers: Default::default(),
containers: vec![container.to_string()],
})
.await?;
if containers.is_empty() {
return Err(anyhow!(
"Did not find any container matching {container}"
));
}
if containers.len() == 1 {
return containers
.pop()
.context("Shouldn't happen")?
.server_id
.context("Container doesn't have server_id");
}
let servers = containers
.into_iter()
.flat_map(|container| container.server_id)
.collect::<Vec<_>>();
let servers = client
.read(ListServers {
query: ServerQuery::builder().names(servers).build(),
})
.await?
.into_iter()
.map(|server| format!("\t- {}", server.name.bold()))
.collect::<Vec<_>>()
.join("\n");
Err(anyhow!(
"Multiple containers matching '{}' on Servers:\n{servers}",
container.bold(),
))
}
async fn handle_terminal_forwarding<
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
>(
connect: C,
) -> anyhow::Result<()> {
// Need to forward multiple sources into ws write
let (write_tx, mut write_rx) =
tokio::sync::mpsc::channel::<TerminalStdinMessage>(1024);
// ================
// SETUP RESIZING
// ================
// Subscribe to SIGWINCH for resize messages
let mut sigwinch = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::window_change(),
)
.context("failed to register SIGWINCH handler")?;
// Send first resize messsage, bailing if it fails to get the size.
write_tx.send(resize_message()?).await?;
let cancel = CancellationToken::new();
let forward_resize = async {
while future_or_cancel(sigwinch.recv(), &cancel)
.await
.flatten()
.is_some()
{
if let Ok(resize_message) = resize_message()
&& write_tx.send(resize_message).await.is_err()
{
break;
}
}
cancel.cancel();
};
let forward_stdin = async {
let mut stdin = tokio::io::stdin();
let mut buf = [0u8; 8192];
while let Some(Ok(n)) =
future_or_cancel(stdin.read(&mut buf), &cancel).await
{
// EOF
if n == 0 {
break;
}
let bytes = &buf[..n];
// Check for disconnect sequence (alt + q)
if bytes == [197, 147] {
break;
}
// Forward bytes
if write_tx
.send(TerminalStdinMessage::Forward(bytes.to_vec()))
.await
.is_err()
{
break;
};
}
cancel.cancel();
};
// =====================
// CONNECT AND FORWARD
// =====================
let (mut ws_write, mut ws_read) = connect.await?.split();
let forward_write = async {
while let Some(message) =
future_or_cancel(write_rx.recv(), &cancel).await.flatten()
{
if let Err(e) = ws_write.send_stdin_message(message).await {
cancel.cancel();
return Some(e);
};
}
cancel.cancel();
None
};
let forward_read = async {
let mut stdout = tokio::io::stdout();
while let Some(msg) =
future_or_cancel(ws_read.receive_stdout(), &cancel).await
{
let bytes = match msg {
Ok(Some(bytes)) => bytes,
Ok(None) => break,
Err(e) => {
cancel.cancel();
return Some(e.context("Websocket read error"));
}
};
if let Err(e) = stdout
.write_all(&bytes)
.await
.context("Failed to write text to stdout")
{
cancel.cancel();
return Some(e);
}
let _ = stdout.flush().await;
}
cancel.cancel();
None
};
let guard = RawModeGuard::enable_raw_mode()?;
let (_, _, write_error, read_error) = tokio::join!(
forward_resize,
forward_stdin,
forward_write,
forward_read
);
drop(guard);
if let Some(e) = write_error {
eprintln!("\nFailed to forward stdin | {e:#}");
}
if let Some(e) = read_error {
eprintln!("\nFailed to forward stdout | {e:#}");
}
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
// It doesn't seem to exit by itself after the raw mode stuff.
std::process::exit(0)
}
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
let (cols, rows) = crossterm::terminal::size()
.context("Failed to get terminal size")?;
Ok(TerminalStdinMessage::Resize(TerminalResizeMessage {
rows,
cols,
}))
}
struct RawModeGuard;
impl RawModeGuard {
fn enable_raw_mode() -> anyhow::Result<Self> {
crossterm::terminal::enable_raw_mode()
.context("Failed to enable terminal raw mode")?;
Ok(Self)
}
}
impl Drop for RawModeGuard {
fn drop(&mut self) {
if let Err(e) = crossterm::terminal::disable_raw_mode() {
eprintln!("Failed to disable terminal raw mode | {e:?}");
}
}
}
async fn future_or_cancel<T, F: Future<Output = T>>(
fut: F,
cancel: &CancellationToken,
) -> Option<T> {
tokio::select! {
res = fut => Some(res),
_ = cancel.cancelled() => None
}
}

View File

@@ -0,0 +1,43 @@
use komodo_client::entities::{
build::PartialBuildConfig,
config::cli::args::update::UpdateCommand,
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
server::PartialServerConfig, stack::PartialStackConfig,
sync::PartialResourceSyncConfig,
};
mod resource;
mod user;
mod variable;
pub async fn handle(command: &UpdateCommand) -> anyhow::Result<()> {
match command {
UpdateCommand::Build(update) => {
resource::update::<PartialBuildConfig>(update).await
}
UpdateCommand::Deployment(update) => {
resource::update::<PartialDeploymentConfig>(update).await
}
UpdateCommand::Repo(update) => {
resource::update::<PartialRepoConfig>(update).await
}
UpdateCommand::Server(update) => {
resource::update::<PartialServerConfig>(update).await
}
UpdateCommand::Stack(update) => {
resource::update::<PartialStackConfig>(update).await
}
UpdateCommand::Sync(update) => {
resource::update::<PartialResourceSyncConfig>(update).await
}
UpdateCommand::Variable {
name,
value,
secret,
yes,
} => variable::update(name, value, *secret, *yes).await,
UpdateCommand::User { username, command } => {
user::update(username, command).await
}
}
}

View File

@@ -0,0 +1,152 @@
use anyhow::Context;
use colored::Colorize;
use komodo_client::{
api::write::{
UpdateBuild, UpdateDeployment, UpdateRepo, UpdateResourceSync,
UpdateServer, UpdateStack,
},
entities::{
build::PartialBuildConfig,
config::cli::args::update::UpdateResource,
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
server::PartialServerConfig, stack::PartialStackConfig,
sync::PartialResourceSyncConfig,
},
};
use serde::{Serialize, de::DeserializeOwned};
pub async fn update<
T: std::fmt::Debug + Serialize + DeserializeOwned + ResourceUpdate,
>(
UpdateResource {
resource,
update,
yes,
}: &UpdateResource,
) -> anyhow::Result<()> {
println!("\n{}: Update {}\n", "Mode".dimmed(), T::resource_type());
println!(" - {}: {resource}", "Name".dimmed());
let config = serde_qs::from_str::<T>(update)
.context("Failed to deserialize config")?;
match serde_json::to_string_pretty(&config) {
Ok(config) => {
println!(" - {}: {config}", "Update".dimmed());
}
Err(_) => {
println!(" - {}: {config:#?}", "Update".dimmed());
}
}
crate::command::wait_for_enter("update resource", *yes)?;
config.apply(resource).await
}
pub trait ResourceUpdate {
fn resource_type() -> &'static str;
async fn apply(self, resource: &str) -> anyhow::Result<()>;
}
impl ResourceUpdate for PartialBuildConfig {
fn resource_type() -> &'static str {
"Build"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateBuild {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update build config")?;
Ok(())
}
}
impl ResourceUpdate for PartialDeploymentConfig {
fn resource_type() -> &'static str {
"Deployment"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateDeployment {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update deployment config")?;
Ok(())
}
}
impl ResourceUpdate for PartialRepoConfig {
fn resource_type() -> &'static str {
"Repo"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateRepo {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update repo config")?;
Ok(())
}
}
impl ResourceUpdate for PartialServerConfig {
fn resource_type() -> &'static str {
"Server"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateServer {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update server config")?;
Ok(())
}
}
impl ResourceUpdate for PartialStackConfig {
fn resource_type() -> &'static str {
"Stack"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateStack {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update stack config")?;
Ok(())
}
}
impl ResourceUpdate for PartialResourceSyncConfig {
fn resource_type() -> &'static str {
"Sync"
}
async fn apply(self, resource: &str) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
client
.write(UpdateResourceSync {
id: resource.to_string(),
config: self,
})
.await
.context("Failed to update sync config")?;
Ok(())
}
}

View File

@@ -0,0 +1,122 @@
use anyhow::Context;
use colored::Colorize;
use database::mungos::mongodb::bson::doc;
use komodo_client::entities::{
config::{
cli::args::{CliEnabled, update::UpdateUserCommand},
empty_or_redacted,
},
optional_string,
};
use crate::{command::sanitize_uri, config::cli_config};
pub async fn update(
username: &str,
command: &UpdateUserCommand,
) -> anyhow::Result<()> {
match command {
UpdateUserCommand::Password {
password,
unsanitized,
yes,
} => {
update_password(username, password, *unsanitized, *yes).await
}
UpdateUserCommand::SuperAdmin { enabled, yes } => {
update_super_admin(username, *enabled, *yes).await
}
}
}
async fn update_password(
username: &str,
password: &str,
unsanitized: bool,
yes: bool,
) -> anyhow::Result<()> {
println!("\n{}: Update Password\n", "Mode".dimmed());
println!(" - {}: {username}", "Username".dimmed());
if unsanitized {
println!(" - {}: {password}", "Password".dimmed());
} else {
println!(
" - {}: {}",
"Password".dimmed(),
empty_or_redacted(password)
);
}
crate::command::wait_for_enter("update password", yes)?;
info!("Updating password...");
let db = database::Client::new(&cli_config().database).await?;
let user = db
.users
.find_one(doc! { "username": username })
.await
.context("Failed to query database for user")?
.context("No user found with given username")?;
db.set_user_password(&user, password).await?;
info!("Password updated ✅");
Ok(())
}
async fn update_super_admin(
username: &str,
super_admin: CliEnabled,
yes: bool,
) -> anyhow::Result<()> {
let config = cli_config();
println!("\n{}: Update Super Admin\n", "Mode".dimmed());
println!(" - {}: {username}", "Username".dimmed());
println!(" - {}: {super_admin}\n", "Super Admin".dimmed());
if let Some(uri) = optional_string(&config.database.uri) {
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) = optional_string(&config.database.address) {
println!("{}: {address}", " - Source Address".dimmed());
}
if let Some(username) = optional_string(&config.database.username) {
println!("{}: {username}", " - Source Username".dimmed());
}
println!(
"{}: {}",
" - Source Db Name".dimmed(),
config.database.db_name,
);
crate::command::wait_for_enter("update super admin", yes)?;
info!("Updating super admin...");
let db = database::Client::new(&config.database).await?;
// Make sure the user exists first before saying it is successful.
let user = db
.users
.find_one(doc! { "username": username })
.await
.context("Failed to query database for user")?
.context("No user found with given username")?;
let super_admin: bool = super_admin.into();
db.users
.update_one(
doc! { "username": user.username },
doc! { "$set": { "super_admin": super_admin } },
)
.await
.context("Failed to update user super admin on db")?;
info!("Super admin updated ✅");
Ok(())
}

View File

@@ -0,0 +1,70 @@
use anyhow::Context;
use colored::Colorize;
use komodo_client::api::{
read::GetVariable,
write::{
CreateVariable, UpdateVariableIsSecret, UpdateVariableValue,
},
};
pub async fn update(
name: &str,
value: &str,
secret: Option<bool>,
yes: bool,
) -> anyhow::Result<()> {
println!("\n{}: Update Variable\n", "Mode".dimmed());
println!(" - {}: {name}", "Name".dimmed());
println!(" - {}: {value}", "Value".dimmed());
if let Some(secret) = secret {
println!(" - {}: {secret}", "Is Secret".dimmed());
}
crate::command::wait_for_enter("update variable", yes)?;
let client = crate::command::komodo_client().await?;
let Ok(existing) = client
.read(GetVariable {
name: name.to_string(),
})
.await
else {
// Create the variable
client
.write(CreateVariable {
name: name.to_string(),
value: value.to_string(),
is_secret: secret.unwrap_or_default(),
description: Default::default(),
})
.await
.context("Failed to create variable")?;
info!("Variable created ✅");
return Ok(());
};
client
.write(UpdateVariableValue {
name: name.to_string(),
value: value.to_string(),
})
.await
.context("Failed to update variable 'value'")?;
info!("Variable 'value' updated ✅");
let Some(secret) = secret else { return Ok(()) };
if secret != existing.is_secret {
client
.write(UpdateVariableIsSecret {
name: name.to_string(),
is_secret: secret,
})
.await
.context("Failed to update variable 'is_secret'")?;
info!("Variable 'is_secret' updated to {secret} ✅");
}
Ok(())
}

280
bin/cli/src/config.rs Normal file
View File

@@ -0,0 +1,280 @@
use std::{path::PathBuf, sync::OnceLock};
use anyhow::Context;
use clap::Parser;
use colored::Colorize;
use environment_file::maybe_read_item_from_file;
use komodo_client::entities::{
config::{
DatabaseConfig,
cli::{
CliConfig, Env,
args::{CliArgs, Command, Execute, database::DatabaseCommand},
},
},
logger::LogConfig,
};
pub fn cli_args() -> &'static CliArgs {
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(CliArgs::parse)
}
pub fn cli_env() -> &'static Env {
static CLI_ARGS: OnceLock<Env> = OnceLock::new();
CLI_ARGS.get_or_init(|| {
match envy::from_env()
.context("Failed to parse Komodo CLI environment")
{
Ok(env) => env,
Err(e) => {
panic!("{e:?}")
}
}
})
}
pub fn cli_config() -> &'static CliConfig {
static CLI_CONFIG: OnceLock<CliConfig> = OnceLock::new();
CLI_CONFIG.get_or_init(|| {
let args = cli_args();
let env = cli_env().clone();
let config_paths = args
.config_path
.clone()
.unwrap_or(env.komodo_cli_config_paths);
let debug_startup =
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
if debug_startup {
println!(
"{}: Komodo CLI version: {}",
"DEBUG".cyan(),
env!("CARGO_PKG_VERSION").blue().bold()
);
println!(
"{}: {}: {config_paths:?}",
"DEBUG".cyan(),
"Config Paths".dimmed(),
);
}
let config_keywords = args
.config_keyword
.clone()
.unwrap_or(env.komodo_cli_config_keywords);
let config_keywords = config_keywords
.iter()
.map(String::as_str)
.collect::<Vec<_>>();
if debug_startup {
println!(
"{}: {}: {config_keywords:?}",
"DEBUG".cyan(),
"Config File Keywords".dimmed(),
);
}
let mut unparsed_config = (config::ConfigLoader {
paths: &config_paths
.iter()
.map(PathBuf::as_path)
.collect::<Vec<_>>(),
match_wildcards: &config_keywords,
include_file_name: ".kminclude",
merge_nested: env.komodo_cli_merge_nested_config,
extend_array: env.komodo_cli_extend_config_arrays,
debug_print: debug_startup,
})
.load::<serde_json::Map<String, serde_json::Value>>()
.expect("failed at parsing config from paths");
let init_parsed_config = serde_json::from_value::<CliConfig>(
serde_json::Value::Object(unparsed_config.clone()),
)
.context("Failed to parse config")
.unwrap();
let (host, key, secret) = match &args.command {
Command::Execute(Execute {
host, key, secret, ..
}) => (host.clone(), key.clone(), secret.clone()),
_ => (None, None, None),
};
let backups_folder = match &args.command {
Command::Database {
command: DatabaseCommand::Backup { backups_folder, .. },
} => backups_folder.clone(),
Command::Database {
command: DatabaseCommand::Restore { backups_folder, .. },
} => backups_folder.clone(),
_ => None,
};
let (uri, address, username, password, db_name) =
match &args.command {
Command::Database {
command:
DatabaseCommand::Copy {
uri,
address,
username,
password,
db_name,
..
},
} => (
uri.clone(),
address.clone(),
username.clone(),
password.clone(),
db_name.clone(),
),
_ => (None, None, None, None, None),
};
let profile = args
.profile
.as_ref()
.or(init_parsed_config.default_profile.as_ref());
let unparsed_config = if let Some(profile) = profile
&& !profile.is_empty()
{
// Find the profile config,
// then merge it with the Default config.
let serde_json::Value::Array(profiles) = unparsed_config
.remove("profile")
.context("Config has no profiles, but a profile is required")
.unwrap()
else {
panic!("`config.profile` is not array");
};
let Some(profile_config) = profiles.into_iter().find(|p| {
let Ok(parsed) =
serde_json::from_value::<CliConfig>(p.clone())
else {
return false;
};
&parsed.config_profile == profile
|| parsed
.config_aliases
.iter()
.any(|alias| alias == profile)
}) else {
panic!("No profile matching '{profile}' was found.");
};
let serde_json::Value::Object(profile_config) = profile_config
else {
panic!("Profile config is not Object type.");
};
config::merge_config(
unparsed_config,
profile_config.clone(),
env.komodo_cli_merge_nested_config,
env.komodo_cli_extend_config_arrays,
)
.unwrap_or(profile_config)
} else {
unparsed_config
};
let config = serde_json::from_value::<CliConfig>(
serde_json::Value::Object(unparsed_config),
)
.context("Failed to parse final config")
.unwrap();
let config_profile = if config.config_profile.is_empty() {
String::from("None")
} else {
config.config_profile
};
CliConfig {
config_profile,
config_aliases: config.config_aliases,
default_profile: config.default_profile,
table_borders: env
.komodo_cli_table_borders
.or(config.table_borders),
host: host
.or(env.komodo_cli_host)
.or(env.komodo_host)
.unwrap_or(config.host),
cli_key: key.or(env.komodo_cli_key).or(config.cli_key),
cli_secret: secret
.or(env.komodo_cli_secret)
.or(config.cli_secret),
backups_folder: backups_folder
.or(env.komodo_cli_backups_folder)
.unwrap_or(config.backups_folder),
max_backups: env
.komodo_cli_max_backups
.unwrap_or(config.max_backups),
database_target: DatabaseConfig {
uri: uri
.or(env.komodo_cli_database_target_uri)
.unwrap_or(config.database_target.uri),
address: address
.or(env.komodo_cli_database_target_address)
.unwrap_or(config.database_target.address),
username: username
.or(env.komodo_cli_database_target_username)
.unwrap_or(config.database_target.username),
password: password
.or(env.komodo_cli_database_target_password)
.unwrap_or(config.database_target.password),
db_name: db_name
.or(env.komodo_cli_database_target_db_name)
.unwrap_or(config.database_target.db_name),
app_name: config.database_target.app_name,
},
database: DatabaseConfig {
uri: maybe_read_item_from_file(
env.komodo_database_uri_file,
env.komodo_database_uri,
)
.unwrap_or(config.database.uri),
address: env
.komodo_database_address
.unwrap_or(config.database.address),
username: maybe_read_item_from_file(
env.komodo_database_username_file,
env.komodo_database_username,
)
.unwrap_or(config.database.username),
password: maybe_read_item_from_file(
env.komodo_database_password_file,
env.komodo_database_password,
)
.unwrap_or(config.database.password),
db_name: env
.komodo_database_db_name
.unwrap_or(config.database.db_name),
app_name: config.database.app_name,
},
cli_logging: LogConfig {
level: env
.komodo_cli_logging_level
.unwrap_or(config.cli_logging.level),
stdio: env
.komodo_cli_logging_stdio
.unwrap_or(config.cli_logging.stdio),
pretty: env
.komodo_cli_logging_pretty
.unwrap_or(config.cli_logging.pretty),
location: false,
ansi: env
.komodo_cli_logging_ansi
.unwrap_or(config.cli_logging.ansi),
otlp_endpoint: env
.komodo_cli_logging_otlp_endpoint
.unwrap_or(config.cli_logging.otlp_endpoint),
opentelemetry_service_name: env
.komodo_cli_logging_opentelemetry_service_name
.unwrap_or(config.cli_logging.opentelemetry_service_name),
opentelemetry_scope_name: env
.komodo_cli_logging_opentelemetry_scope_name
.unwrap_or(config.cli_logging.opentelemetry_scope_name),
},
profile: config.profile,
}
})
}

View File

@@ -1,17 +0,0 @@
use std::io::Read;
use anyhow::Context;
use colored::Colorize;
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
println!(
"\nPress {} to {}\n",
"ENTER".green(),
press_enter_to.bold()
);
let buffer = &mut [0u8];
std::io::stdin()
.read_exact(buffer)
.context("failed to read ENTER")?;
Ok(())
}

View File

@@ -1,32 +1,96 @@
#[macro_use]
extern crate tracing;
use anyhow::Context;
use colored::Colorize;
use komodo_client::api::read::GetVersion;
use komodo_client::entities::config::cli::args;
mod args;
mod exec;
mod helpers;
mod state;
use crate::config::cli_config;
mod command;
mod config;
async fn app() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
logger::init(&config::cli_config().cli_logging)?;
let args = config::cli_args();
let env = config::cli_env();
let debug_load =
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
match &args.command {
args::Command::Config {
all_profiles,
unsanitized,
} => {
let mut config = if *unsanitized {
cli_config().clone()
} else {
cli_config().sanitized()
};
if !*all_profiles {
config.profile = Default::default();
}
if debug_load {
println!("\n{config:#?}");
} else {
println!(
"\nCLI Config {}",
serde_json::to_string_pretty(&config)
.context("Failed to serialize config for pretty print")?
);
}
Ok(())
}
args::Command::Container(container) => {
command::container::handle(container).await
}
args::Command::Inspect(inspect) => {
command::container::inspect_container(inspect).await
}
args::Command::List(list) => command::list::handle(list).await,
args::Command::Execute(args) => {
command::execute::handle(&args.execution, args.yes).await
}
args::Command::Update { command } => {
command::update::handle(command).await
}
args::Command::Connect(connect) => {
command::terminal::handle_connect(connect).await
}
args::Command::Exec(exec) => {
command::terminal::handle_exec(exec).await
}
args::Command::Attach(attach) => {
command::terminal::handle_attach(attach).await
}
args::Command::Key { command } => {
noise::key::command::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt().with_target(false).init();
info!(
"Komodo CLI version: {}",
env!("CARGO_PKG_VERSION").blue().bold()
);
let version =
state::komodo_client().read(GetVersion {}).await?.version;
info!("Komodo Core version: {}", version.blue().bold());
match &state::cli_args().command {
args::Command::Execute { execution } => {
exec::run(execution.to_owned()).await?
}
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
tokio::select! {
res = tokio::spawn(app()) => match res {
Ok(Err(e)) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
}
Err(e) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
},
Ok(_) => {}
},
_ = term_signal.recv() => {},
}
Ok(())
}

View File

@@ -1,48 +0,0 @@
use std::sync::OnceLock;
use clap::Parser;
use komodo_client::KomodoClient;
use merge_config_files::parse_config_file;
pub fn cli_args() -> &'static crate::args::CliArgs {
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
}
pub fn komodo_client() -> &'static KomodoClient {
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
KOMODO_CLIENT.get_or_init(|| {
let args = cli_args();
let crate::args::CredsFile { url, key, secret } =
match (&args.url, &args.key, &args.secret) {
(Some(url), Some(key), Some(secret)) => {
crate::args::CredsFile {
url: url.clone(),
key: key.clone(),
secret: secret.clone(),
}
}
(url, key, secret) => {
let mut creds: crate::args::CredsFile =
parse_config_file(cli_args().creds.as_str())
.expect("failed to parse Komodo credentials");
if let Some(url) = url {
creds.url.clone_from(url);
}
if let Some(key) = key {
creds.key.clone_from(key);
}
if let Some(secret) = secret {
creds.secret.clone_from(secret);
}
creds
}
};
futures::executor::block_on(
KomodoClient::new(url, key, secret).with_healthcheck(),
)
.expect("failed to initialize Komodo client")
})
}

View File

@@ -18,30 +18,34 @@ path = "src/main.rs"
komodo_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
environment_file.workspace = true
interpolate.workspace = true
secret_file.workspace = true
formatting.workspace = true
transport.workspace = true
database.workspace = true
encoding.workspace = true
response.workspace = true
command.workspace = true
config.workspace = true
logger.workspace = true
cache.workspace = true
noise.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
merge_config_files.workspace = true
async_timing_util.workspace = true
partial_derive2.workspace = true
derive_variants.workspace = true
mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
mungos.workspace = true
slack.workspace = true
svi.workspace = true
# external
aws-credential-types.workspace = true
tokio-tungstenite.workspace = true
english-to-cron.workspace = true
openidconnect.workspace = true
jsonwebtoken.workspace = true
futures-util.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
@@ -50,18 +54,17 @@ tokio-util.workspace = true
axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
serde_yaml_ng.workspace = true
serde_qs.workspace = true
typeshare.workspace = true
chrono-tz.workspace = true
indexmap.workspace = true
octorust.workspace = true
wildcard.workspace = true
arc-swap.workspace = true
colored.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
nom_pem.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
croner.workspace = true
@@ -69,14 +72,16 @@ chrono.workspace = true
bcrypt.workspace = true
base64.workspace = true
rustls.workspace = true
bytes.workspace = true
tokio.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true
envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
hex.workspace = true
url.workspace = true

View File

@@ -1,7 +1,8 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.87.0-bullseye AS core-builder
FROM rust:1.90.0-trixie AS core-builder
RUN cargo install cargo-strip
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./
@@ -9,9 +10,12 @@ COPY ./lib ./lib
COPY ./client/core/rs ./client/core/rs
COPY ./client/periphery ./client/periphery
COPY ./bin/core ./bin/core
COPY ./bin/cli ./bin/cli
# Compile app
RUN cargo build -p komodo_core --release
RUN cargo build -p komodo_core --release && \
cargo build -p komodo_cli --release && \
cargo strip
# Build Frontend
FROM node:20.12-alpine AS frontend-builder
@@ -22,9 +26,9 @@ RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
@@ -32,9 +36,10 @@ RUN sh ./debian-deps.sh && rm ./debian-deps.sh
WORKDIR /app
# Copy
COPY ./config/core.config.toml /config/config.toml
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=core-builder /builder/target/release/core /usr/local/bin/core
COPY --from=core-builder /builder/target/release/km /usr/local/bin/km
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Set $DENO_DIR and preload external Deno deps
@@ -43,12 +48,21 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
ENTRYPOINT [ "core" ]
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
# Label to prevent Komodo from stopping with StopAllContainers
LABEL komodo.skip="true"
# Label for Ghcr
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -3,12 +3,12 @@
## Core deps installer
apt-get update
apt-get install -y git curl ca-certificates
apt-get install -y git curl ca-certificates iproute2
rm -rf /var/lib/apt/lists/*
# Starship prompt
curl -sS https://starship.rs/install.sh | sh -s -- --yes --bin-dir /usr/local/bin
echo 'export STARSHIP_CONFIG=/config/starship.toml' >> /root/.bashrc
echo 'export STARSHIP_CONFIG=/starship.toml' >> /root/.bashrc
echo 'eval "$(starship init bash)"' >> /root/.bashrc

View File

@@ -13,22 +13,28 @@ FROM ${AARCH64_BINARIES} AS aarch64
FROM ${FRONTEND_IMAGE} AS frontend
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
WORKDIR /app
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
COPY --from=x86_64 /core /app/arch/linux/amd64
COPY --from=aarch64 /core /app/arch/linux/arm64
ARG TARGETPLATFORM
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/arch
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
COPY --from=x86_64 /core /app/core/linux/amd64
COPY --from=aarch64 /core /app/core/linux/arm64
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
# Same for km
COPY --from=x86_64 /km /app/km/linux/amd64
COPY --from=aarch64 /km /app/km/linux/arm64
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
# Copy default config / static frontend / deno binary
COPY ./config/core.config.toml /config/config.toml
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=frontend /frontend /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
@@ -38,12 +44,22 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
ENTRYPOINT [ "entrypoint.sh" ]
CMD [ "core" ]
# Label to prevent Komodo from stopping with StopAllContainers
LABEL komodo.skip="true"
# Label for Ghcr
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -14,16 +14,17 @@ COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
# Copy
COPY ./config/core.config.toml /config/config.toml
COPY ./config/core.config.toml /config/.default.config.toml
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=binaries /core /usr/local/bin/core
COPY --from=binaries /km /usr/local/bin/km
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Set $DENO_DIR and preload external Deno deps
@@ -32,12 +33,22 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
ENTRYPOINT [ "entrypoint.sh" ]
CMD [ "core" ]
# Label to prevent Komodo from stopping with StopAllContainers
LABEL komodo.skip="true"
# Label for Ghcr
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -4,7 +4,6 @@ use serde::Serialize;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
@@ -17,6 +16,28 @@ pub async fn send_alert(
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
)
}
AlertData::ServerVersionMismatch {
id,
name,
region,
server_version,
core_version,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | **{name}**{region} | Periphery version now matches Core version ✅\n{link}"
)
}
_ => {
format!(
"{level} | **{name}**{region} | Version mismatch detected ⚠️\nPeriphery: **{server_version}** | Core: **{core_version}**\n{link}"
)
}
}
}
AlertData::ServerUnreachable {
id,
name,
@@ -28,7 +49,7 @@ pub async fn send_alert(
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | **{name}**{region} is now **reachable**\n{link}"
"{level} | **{name}**{region} is now **connected**\n{link}"
)
}
SeverityLevel::Critical => {
@@ -207,36 +228,45 @@ pub async fn send_alert(
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::Custom { message, details } => {
format!(
"{level} | {message}{}",
if details.is_empty() {
format_args!("")
} else {
format_args!("\n{details}")
}
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut url_interpolated = url.to_string();
// interpolate variables and secrets into the url
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut url_interpolated,
&mut global_replacers,
&mut secret_replacers,
)?;
send_message(&url_interpolated, &content)
.await
.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {}",
sanitized_error
))
})?;
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, &content)
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(

View File

@@ -1,7 +1,8 @@
use ::slack::types::Block;
use anyhow::{Context, anyhow};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use futures_util::future::join_all;
use interpolate::Interpolator;
use komodo_client::entities::{
ResourceTargetVariant,
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
@@ -10,14 +11,10 @@ use komodo_client::entities::{
komodo_timestamp,
stack::StackState,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use std::collections::HashSet;
use tracing::Instrument;
use crate::helpers::query::get_variables_and_secrets;
use crate::helpers::{
interpolate::interpolate_variables_secrets_into_string,
maintenance::is_in_maintenance,
maintenance::is_in_maintenance, query::VariablesAndSecrets,
};
use crate::{config::core_config, state::db_client};
@@ -26,40 +23,33 @@ mod ntfy;
mod pushover;
mod slack;
#[instrument(level = "debug")]
pub async fn send_alerts(alerts: &[Alert]) {
if alerts.is_empty() {
return;
}
let span =
info_span!("send_alerts", alerts = format!("{alerts:?}"));
async {
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
}) else {
return;
};
let handles =
alerts.iter().map(|alert| send_alert(&alerters, alert));
let handles = alerts
.iter()
.map(|alert| send_alert_to_alerters(&alerters, alert));
join_all(handles).await;
}
.instrument(span)
.await
join_all(handles).await;
}
#[instrument(level = "debug")]
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
if alerters.is_empty() {
return;
}
@@ -162,23 +152,18 @@ pub async fn send_alert_to_alerter(
}
}
#[instrument(level = "debug")]
async fn send_custom_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
// interpolate variables and secrets into the url
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut url_interpolated,
&mut global_replacers,
&mut secret_replacers,
)?;
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
let res = reqwest::Client::new()
.post(url_interpolated)
@@ -186,13 +171,14 @@ async fn send_custom_alert(
.send()
.await
.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request: {}",
sanitized_error
"Error with request: {sanitized_error}"
))
})
.context("failed at post request to alerter")?;
@@ -248,35 +234,244 @@ fn resource_link(
resource_type: ResourceTargetVariant,
id: &str,
) -> String {
let path = match resource_type {
ResourceTargetVariant::System => unreachable!(),
ResourceTargetVariant::Build => format!("/builds/{id}"),
ResourceTargetVariant::Builder => {
format!("/builders/{id}")
}
ResourceTargetVariant::Deployment => {
format!("/deployments/{id}")
}
ResourceTargetVariant::Stack => {
format!("/stacks/{id}")
}
ResourceTargetVariant::Server => {
format!("/servers/{id}")
}
ResourceTargetVariant::Repo => format!("/repos/{id}"),
ResourceTargetVariant::Alerter => {
format!("/alerters/{id}")
}
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::Action => {
format!("/actions/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}
};
format!("{}{path}", core_config().host)
komodo_client::entities::resource_link(
&core_config().host,
resource_type,
id,
)
}
/// Standard message content format
/// used by Ntfy, Pushover.
fn standard_alert_content(alert: &Alert) -> String {
let level = fmt_level(alert.level);
match &alert.data {
AlertData::Test { id, name } => {
let link = resource_link(ResourceTargetVariant::Alerter, id);
format!(
"{level} | If you see this message, then Alerter {name} is working\n{link}",
)
}
AlertData::ServerVersionMismatch {
id,
name,
region,
server_version,
core_version,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | {name}{region} | Periphery version now matches Core version ✅\n{link}"
)
}
_ => {
format!(
"{level} | {name}{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}\n{link}"
)
}
}
}
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!("{level} | {name}{region} is now connected\n{link}")
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {e:#?}"))
.unwrap_or_default();
format!(
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
)
}
_ => unreachable!(),
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
)
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
)
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
)
}
AlertData::ContainerStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
format!(
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
)
}
AlertData::StackStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
format!(
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
)
}
AlertData::StackAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
images,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
format!(
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on {name}\n{link}",
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build {name} failed\nversion: v{version}\n{link}",
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for {name} failed\n{link}",)
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure {name} failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action {name} failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::Custom { message, details } => {
format!(
"{level} | {message}{}",
if details.is_empty() {
format_args!("")
} else {
format_args!("\n{details}")
}
)
}
AlertData::None {} => Default::default(),
}
}

View File

@@ -2,232 +2,38 @@ use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
email: Option<&str>,
alert: &Alert,
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let content = match &alert.data {
AlertData::Test { id, name } => {
let link = resource_link(ResourceTargetVariant::Alerter, id);
format!(
"{level} | If you see this message, then Alerter {} is working\n{link}",
name,
)
}
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | {}{} is now reachable\n{link}",
name, region
)
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {:#?}", e))
.unwrap_or_default();
format!(
"{level} | {}{} is unreachable ❌\n{link}{err}",
name, region
)
}
_ => unreachable!(),
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
name, region,
)
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region,
)
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region, path,
)
}
AlertData::ContainerStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
format!(
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::StackStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
format!(
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
name, server_name, service, image,
)
}
AlertData::StackAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
images,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
format!(
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
name, server_name, images_label, images_str,
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
instance_id, message,
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on {}\n{link}",
name,
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build {} failed\nversion: v{}\n{link}",
name, version,
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for {} failed\n{link}", name,)
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure {name} failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action {name} failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
send_message(url, email, content).await?;
let content = standard_alert_content(alert);
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, email, content)
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(
@@ -237,7 +43,7 @@ async fn send_message(
) -> anyhow::Result<()> {
let mut request = http_client()
.post(url)
.header("Title", "ntfy Alert")
.header("Title", "Komodo Alert")
.body(content);
if let Some(email) = email {
@@ -254,14 +60,11 @@ async fn send_message(
} else {
let text = response.text().await.with_context(|| {
format!(
"Failed to send message to ntfy | {} | failed to get response text",
status
"Failed to send message to ntfy | {status} | failed to get response text"
)
})?;
Err(anyhow!(
"Failed to send message to ntfy | {} | {}",
status,
text
"Failed to send message to ntfy | {status} | {text}",
))
}
}

View File

@@ -2,230 +2,35 @@ use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let content = match &alert.data {
AlertData::Test { id, name } => {
let link = resource_link(ResourceTargetVariant::Alerter, id);
format!(
"{level} | If you see this message, then Alerter {} is working\n{link}",
name,
)
}
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | {}{} is now reachable\n{link}",
name, region
)
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {:#?}", e))
.unwrap_or_default();
format!(
"{level} | {}{} is unreachable ❌\n{link}{err}",
name, region
)
}
_ => unreachable!(),
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
name, region,
)
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region,
)
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region, path,
)
}
AlertData::ContainerStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
format!(
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::StackStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
format!(
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
name, server_name, service, image,
)
}
AlertData::StackAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
images,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
format!(
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
name, server_name, images_label, images_str,
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
instance_id, message,
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on {}\n{link}",
name,
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build {name} failed\nversion: v{version}\n{link}",
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for {} failed\n{link}", name,)
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure {name} failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action {name} failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
send_message(url, content).await?;
let content = standard_alert_content(alert);
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, content).await.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(
@@ -252,8 +57,7 @@ async fn send_message(
} else {
let text = response.text().await.with_context(|| {
format!(
"Failed to send message to pushover | {} | failed to get response text",
status
"Failed to send message to pushover | {status} | failed to get response text"
)
})?;
Err(anyhow!(

View File

@@ -1,6 +1,7 @@
use ::slack::types::OwnedBlock as Block;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
@@ -23,6 +24,35 @@ pub async fn send_alert(
];
(text, blocks.into())
}
AlertData::ServerVersionMismatch {
id,
name,
region,
server_version,
core_version,
} => {
let region = fmt_region(region);
let text = match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | *{name}*{region} | Periphery version now matches Core version ✅"
)
}
_ => {
format!(
"{level} | *{name}*{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}"
)
}
};
let blocks = vec![
Block::header(text.clone()),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
AlertData::ServerUnreachable {
id,
name,
@@ -33,11 +63,11 @@ pub async fn send_alert(
match alert.level {
SeverityLevel::Ok => {
let text =
format!("{level} | *{name}*{region} is now *reachable*");
format!("{level} | *{name}*{region} is now *connected*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is now *reachable*"
"*{name}*{region} is now *connnected*"
)),
];
(text, blocks.into())
@@ -429,33 +459,40 @@ pub async fn send_alert(
];
(text, blocks.into())
}
AlertData::Custom { message, details } => {
let text = format!("{level} | {message}");
let blocks =
vec![Block::header(text.clone()), Block::section(details)];
(text, blocks.into())
}
AlertData::None {} => Default::default(),
};
if !text.is_empty() {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut url_interpolated = url.to_string();
if text.is_empty() {
return Ok(());
}
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
// interpolate variables and secrets into the url
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut url_interpolated,
&mut global_replacers,
&mut secret_replacers,
)?;
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
let slack = ::slack::Client::new(url_interpolated);
slack.send_message(text, blocks).await.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
interpolator.interpolate_string(&mut url_interpolated)?;
let slack = ::slack::Client::new(url_interpolated);
slack
.send_owned_message_single(&text, None, blocks.as_deref())
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {}",
sanitized_error
"Error with slack request: {sanitized_error}"
))
})?;
}
Ok(())
}

View File

@@ -3,11 +3,12 @@ use std::{sync::OnceLock, time::Instant};
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::auth::*, entities::user::User};
use reqwest::StatusCode;
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use serror::{AddStatusCode, Json};
use typeshare::typeshare;
use uuid::Uuid;
@@ -25,6 +26,7 @@ use crate::{
use super::Variant;
#[derive(Default)]
pub struct AuthArgs {
pub headers: HeaderMap,
}
@@ -41,7 +43,7 @@ pub struct AuthArgs {
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
pub enum AuthRequest {
GetLoginOptions(GetLoginOptions),
CreateLocalUser(CreateLocalUser),
SignUpLocalUser(SignUpLocalUser),
LoginLocalUser(LoginLocalUser),
ExchangeForJwt(ExchangeForJwt),
GetUser(GetUser),
@@ -62,7 +64,7 @@ pub fn router() -> Router {
}
if google_oauth_client().is_some() {
info!("🔑 Github Login Enabled");
info!("🔑 Google Login Enabled");
router = router.nest("/google", google::router())
}
@@ -86,7 +88,6 @@ async fn variant_handler(
handler(headers, Json(req)).await
}
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
async fn handler(
headers: HeaderMap,
Json(request): Json<AuthRequest>,
@@ -123,7 +124,6 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
}
impl Resolve<AuthArgs> for GetLoginOptions {
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
async fn resolve(
self,
_: &AuthArgs,
@@ -133,23 +133,27 @@ impl Resolve<AuthArgs> for GetLoginOptions {
}
impl Resolve<AuthArgs> for ExchangeForJwt {
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
async fn resolve(
self,
_: &AuthArgs,
) -> serror::Result<ExchangeForJwtResponse> {
let jwt = jwt_client().redeem_exchange_token(&self.token).await?;
Ok(ExchangeForJwtResponse { jwt })
jwt_client()
.redeem_exchange_token(&self.token)
.await
.map_err(Into::into)
}
}
impl Resolve<AuthArgs> for GetUser {
#[instrument(name = "GetUser", level = "debug", skip(self))]
async fn resolve(
self,
AuthArgs { headers }: &AuthArgs,
) -> serror::Result<User> {
let user_id = get_user_id_from_headers(headers).await?;
Ok(get_user(&user_id).await?)
let user_id = get_user_id_from_headers(headers)
.await
.status_code(StatusCode::UNAUTHORIZED)?;
get_user(&user_id)
.await
.status_code(StatusCode::UNAUTHORIZED)
}
}

View File

@@ -1,28 +1,34 @@
use std::{
collections::HashSet,
path::{Path, PathBuf},
str::FromStr,
sync::OnceLock,
};
use anyhow::Context;
use command::run_komodo_command;
use command::run_komodo_standard_command;
use config::merge_objects;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
};
use interpolate::Interpolator;
use komodo_client::{
api::{
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
FileFormat, JsonObject,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
random_string,
update::Update,
user::action_user,
},
parsers::parse_key_value_list,
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use tokio::fs;
@@ -31,12 +37,7 @@ use crate::{
api::{execute::ExecuteRequest, user::UserArgs},
config::core_config,
helpers::{
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_string,
},
query::get_variables_and_secrets,
random_string,
query::{VariablesAndSecrets, get_variables_and_secrets},
update::update_update,
},
permission::get_check_permissions,
@@ -49,15 +50,26 @@ use super::ExecuteArgs;
impl super::BatchExecute for BatchRunAction {
type Resource = Action;
fn single_request(action: String) -> ExecuteRequest {
ExecuteRequest::RunAction(RunAction { action })
ExecuteRequest::RunAction(RunAction {
action,
args: Default::default(),
})
}
}
impl Resolve<ExecuteArgs> for BatchRunAction {
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
#[instrument(
"BatchRunAction",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunAction>(&self.pattern, user)
@@ -67,10 +79,19 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
}
impl Resolve<ExecuteArgs> for RunAction {
#[instrument(name = "RunAction", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunAction",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
action = self.action,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut action = get_check_permissions::<Action>(
&self.action,
@@ -87,13 +108,33 @@ impl Resolve<ExecuteArgs> for RunAction {
// This will set action state back to default when dropped.
// Will also check to ensure action not already busy before updating.
let _action_guard =
action_state.update(|state| state.running = true)?;
let _action_guard = action_state.update_custom(
|state| state.running += 1,
|state| state.running -= 1,
false,
)?;
let mut update = update.clone();
update_update(update.clone()).await?;
let default_args = parse_action_arguments(
&action.config.arguments,
action.config.arguments_format,
)
.context("Failed to parse default Action arguments")?;
let args = merge_objects(
default_args,
self.args.unwrap_or_default(),
true,
true,
)
.context("Failed to merge request args with default args")?;
let args = serde_json::to_string(&args)
.context("Failed to serialize action run arguments")?;
let CreateApiKeyResponse { key, secret } = CreateApiKey {
name: update.id.clone(),
expires: 0,
@@ -106,7 +147,7 @@ impl Resolve<ExecuteArgs> for RunAction {
let contents = &mut action.config.file_contents;
// Wrap the file contents in the execution context.
*contents = full_contents(contents, &key, &secret);
*contents = full_contents(contents, &args, &key, &secret);
let replacers =
interpolate(contents, &mut update, key.clone(), secret.clone())
@@ -117,15 +158,11 @@ impl Resolve<ExecuteArgs> for RunAction {
let file = format!("{}.ts", random_string(10));
let path = core_config().action_directory.join(&file);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize Action file parent directory {parent:?}"))?;
}
fs::write(&path, contents).await.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
secret_file::write_async(&path, contents)
.await
.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
let CoreConfig { ssl_enabled, .. } = core_config();
@@ -141,7 +178,7 @@ impl Resolve<ExecuteArgs> for RunAction {
""
};
let mut res = run_komodo_command(
let mut res = run_komodo_standard_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
@@ -182,7 +219,7 @@ impl Resolve<ExecuteArgs> for RunAction {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -192,7 +229,6 @@ impl Resolve<ExecuteArgs> for RunAction {
update_update(update.clone()).await?;
if !update.success && action.config.failure_alert {
warn!("action unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -215,37 +251,38 @@ impl Resolve<ExecuteArgs> for RunAction {
}
}
#[instrument("Interpolate", skip(contents, update, secret))]
async fn interpolate(
contents: &mut String,
update: &mut Update,
key: String,
secret: String,
) -> serror::Result<HashSet<(String, String)>> {
let mut vars_and_secrets = get_variables_and_secrets().await?;
let VariablesAndSecrets {
variables,
mut secrets,
} = get_variables_and_secrets().await?;
vars_and_secrets
.secrets
.insert(String::from("ACTION_API_KEY"), key);
vars_and_secrets
.secrets
.insert(String::from("ACTION_API_SECRET"), secret);
secrets.insert(String::from("ACTION_API_KEY"), key);
secrets.insert(String::from("ACTION_API_SECRET"), secret);
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolate_variables_secrets_into_string(
&vars_and_secrets,
contents,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolator
.interpolate_string(contents)?
.push_logs(&mut update.logs);
add_interp_update_log(update, &global_replacers, &secret_replacers);
Ok(secret_replacers)
Ok(interpolator.secret_replacers)
}
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
fn full_contents(
contents: &str,
// Pre-serialized to JSON string.
args: &str,
key: &str,
secret: &str,
) -> String {
let CoreConfig {
port, ssl_enabled, ..
} = core_config();
@@ -270,6 +307,8 @@ const TOML = {{
parseCargoToml: __TOML__.parse,
}}
const ARGS = {args};
const komodo = KomodoClient('{base_url}', {{
type: 'api-key',
params: {{ key: '{key}', secret: '{secret}' }}
@@ -298,6 +337,7 @@ main()
/// Cleans up file at given path.
/// ALSO if $DENO_DIR is set,
/// will clean up the generated file matching "file"
#[instrument("CleanupRun")]
async fn cleanup_run(file: String, path: &Path) {
if let Err(e) = fs::remove_file(path).await {
warn!(
@@ -317,7 +357,7 @@ fn deno_dir() -> Option<&'static Path> {
DENO_DIR
.get_or_init(|| {
let deno_dir = std::env::var("DENO_DIR").ok()?;
PathBuf::from_str(&deno_dir).ok()
Some(PathBuf::from(&deno_dir))
})
.as_deref()
}
@@ -375,3 +415,25 @@ fn delete_file(
}
})
}
fn parse_action_arguments(
args: &str,
format: FileFormat,
) -> anyhow::Result<JsonObject> {
match format {
FileFormat::KeyValue => {
let args = parse_key_value_list(args)
.context("Failed to parse args as key value list")?
.into_iter()
.map(|(k, v)| (k, serde_json::Value::String(v)))
.collect();
Ok(args)
}
FileFormat::Toml => toml::from_str(args)
.context("Failed to parse Toml to Action args"),
FileFormat::Yaml => serde_yaml_ng::from_str(args)
.context("Failed to parse Yaml to action args"),
FileFormat::Json => serde_json::from_str(args)
.context("Failed to parse Json to action args"),
}
}

View File

@@ -1,27 +1,42 @@
use anyhow::{Context, anyhow};
use formatting::format_serror;
use futures_util::{
StreamExt, TryStreamExt, stream::FuturesUnordered,
};
use komodo_client::{
api::execute::TestAlerter,
api::execute::{SendAlert, TestAlerter},
entities::{
alert::{Alert, AlertData, SeverityLevel},
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
alerter::Alerter,
komodo_timestamp,
permission::PermissionLevel,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
alert::send_alert_to_alerter, helpers::update::update_update,
permission::get_check_permissions,
permission::get_check_permissions, resource::list_full_for_user,
};
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for TestAlerter {
#[instrument(name = "TestAlerter", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"TestAlerter",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
alerter = self.alerter,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerter = get_check_permissions::<Alerter>(
&self.alerter,
@@ -71,3 +86,106 @@ impl Resolve<ExecuteArgs> for TestAlerter {
Ok(update)
}
}
//
impl Resolve<ExecuteArgs> for SendAlert {
#[instrument(
"SendAlert",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
request = format!("{self:?}"),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerters = list_full_for_user::<Alerter>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
.into_iter()
.filter(|a| {
a.config.enabled
&& (self.alerters.is_empty()
|| self.alerters.contains(&a.name)
|| self.alerters.contains(&a.id))
&& (a.config.alert_types.is_empty()
|| a.config.alert_types.contains(&AlertDataVariant::Custom))
})
.collect::<Vec<_>>();
let alerters = if user.admin {
alerters
} else {
// Only keep alerters with execute permissions
alerters
.into_iter()
.map(|alerter| async move {
get_check_permissions::<Alerter>(
&alerter.id,
user,
PermissionLevel::Execute.into(),
)
.await
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flatten()
.collect()
};
if alerters.is_empty() {
return Err(anyhow!(
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
).status_code(StatusCode::BAD_REQUEST));
}
let mut update = update.clone();
let ts = komodo_timestamp();
let alert = Alert {
id: Default::default(),
ts,
resolved: true,
level: self.level,
target: update.target.clone(),
data: AlertData::Custom {
message: self.message,
details: self.details,
},
resolved_ts: Some(ts),
};
update.push_simple_log(
"Send alert",
serde_json::to_string_pretty(&alert)
.context("Failed to serialize alert to JSON")?,
);
if let Err(e) = alerters
.iter()
.map(|alerter| send_alert_to_alerter(alerter, &alert))
.collect::<FuturesUnordered<_>>()
.try_collect::<Vec<_>>()
.await
{
update.push_error_log("Send Error", format_serror(&e.into()));
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,8 +1,21 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use std::{
collections::{HashMap, HashSet},
future::IntoFuture,
time::Duration,
};
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::update_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, to_bson, to_document},
options::FindOneOptions,
},
};
use formatting::format_serror;
use futures::future::join_all;
use futures_util::future::join_all;
use interpolate::Interpolator;
use komodo_client::{
api::execute::{
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
@@ -11,41 +24,31 @@ use komodo_client::{
entities::{
alert::{Alert, AlertData, SeverityLevel},
all_logs_success,
build::{Build, BuildConfig, ImageRegistryConfig},
build::{Build, BuildConfig},
builder::{Builder, BuilderConfig},
deployment::DeploymentState,
komodo_timestamp,
komodo_timestamp, optional_string,
permission::PermissionLevel,
repo::Repo,
update::{Log, Update},
user::auto_redeploy_user,
},
};
use mungos::{
by_id::update_one_by_id,
find::find_collect,
mongodb::{
bson::{doc, to_bson, to_document},
options::FindOneOptions,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
use crate::{
alert::send_alerts,
helpers::{
build_git_token,
builder::{cleanup_builder_instance, get_builder_periphery},
builder::{cleanup_builder_instance, connect_builder_periphery},
channel::build_cancel_channel,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_extra_args,
interpolate_variables_secrets_into_string,
interpolate_variables_secrets_into_system_command,
query::{
VariablesAndSecrets, get_deployment_state,
get_variables_and_secrets,
},
query::{get_deployment_state, get_variables_and_secrets},
registry_token,
update::{init_execution_update, update_update},
},
@@ -64,10 +67,18 @@ impl super::BatchExecute for BatchRunBuild {
}
impl Resolve<ExecuteArgs> for BatchRunBuild {
#[instrument(name = "BatchRunBuild", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchRunBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
@@ -77,10 +88,19 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
}
impl Resolve<ExecuteArgs> for RunBuild {
#[instrument(name = "RunBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut build = get_check_permissions::<Build>(
&self.build,
@@ -99,9 +119,13 @@ impl Resolve<ExecuteArgs> for RunBuild {
None
};
let mut vars_and_secrets = get_variables_and_secrets().await?;
let VariablesAndSecrets {
mut variables,
secrets,
} = get_variables_and_secrets().await?;
// Add the $VERSION to variables. Use with [[$VERSION]]
vars_and_secrets.variables.insert(
variables.insert(
String::from("$VERSION"),
build.config.version.to_string(),
);
@@ -131,8 +155,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
let git_token =
build_git_token(&mut build, repo.as_mut()).await?;
let registry_token =
validate_account_extract_registry_token(&build).await?;
let registry_tokens =
validate_account_extract_registry_tokens(&build).await?;
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
@@ -180,7 +204,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
let (periphery, cleanup_data) = match connect_builder_periphery(
build.name.clone(),
Some(build.config.version),
builder,
@@ -207,51 +231,18 @@ impl Resolve<ExecuteArgs> for RunBuild {
// INTERPOLATE VARIABLES
let secret_replacers = if !build.config.skip_secret_interp {
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut build.config.pre_build,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolator.interpolate_build(&mut build)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.build_args,
&mut global_replacers,
&mut secret_replacers,
)?;
if let Some(repo) = repo.as_mut() {
interpolator.interpolate_repo(repo)?;
}
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.secret_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolator.push_logs(&mut update.logs);
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.dockerfile,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut build.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
interpolator.secret_replacers
} else {
Default::default()
};
@@ -268,15 +259,17 @@ impl Resolve<ExecuteArgs> for RunBuild {
git_token,
environment: Default::default(),
env_file_path: Default::default(),
on_clone: None,
on_pull: None,
skip_secret_interp: Default::default(),
replacers: Default::default(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
debug!("Build cancelled during clone, cleaning up builder");
update.push_error_log("Build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
info!("Builder cleaned up");
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -284,10 +277,10 @@ impl Resolve<ExecuteArgs> for RunBuild {
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.logs.extend(res.res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
res.commit_message.unwrap_or_default()
res.res.commit_hash.unwrap_or_default().to_string();
res.res.commit_message.unwrap_or_default()
}
Err(e) => {
warn!("Failed build at clone repo | {e:#}");
@@ -313,19 +306,17 @@ impl Resolve<ExecuteArgs> for RunBuild {
.request(api::build::Build {
build: build.clone(),
repo,
registry_token,
registry_tokens,
replacers: secret_replacers.into_iter().collect(),
// Push a commit hash tagged image
additional_tags: if update.commit_hash.is_empty() {
Default::default()
} else {
vec![update.commit_hash.clone()]
},
// To push a commit hash tagged image
commit_hash: optional_string(&update.commit_hash),
// Unused for now
additional_tags: Default::default(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(cleanup_data, &mut update)
info!("Build cancelled during build, cleaning up builder");
update.push_error_log("Build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name, true).await
},
@@ -339,7 +330,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log(
"build",
"Build Error",
format_serror(&e.context("failed to build").into()),
)
}
@@ -371,7 +362,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -381,7 +373,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -396,7 +388,6 @@ impl Resolve<ExecuteArgs> for RunBuild {
handle_post_build_redeploy(&build.id).await;
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
@@ -421,7 +412,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
}
}
#[instrument(skip(update))]
#[instrument("HandleEarlyReturn", skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
@@ -437,7 +428,7 @@ async fn handle_early_return(
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -445,7 +436,6 @@ async fn handle_early_return(
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
@@ -515,10 +505,19 @@ pub async fn validate_cancel_build(
}
impl Resolve<ExecuteArgs> for CancelBuild {
#[instrument(name = "CancelBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CancelBuild",
skip(user, update),
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
@@ -575,7 +574,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
}
}
#[instrument]
#[instrument("PostBuildRedeploy")]
async fn handle_post_build_redeploy(build_id: &str) {
let Ok(redeploy_deployments) = find_collect(
&db_client().deployments,
@@ -611,7 +610,11 @@ async fn handle_post_build_redeploy(build_id: &str) {
stop_signal: None,
stop_time: None,
}
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
}
.await;
@@ -637,34 +640,49 @@ async fn handle_post_build_redeploy(build_id: &str) {
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token matching requirements.
/// Otherwise it is left to periphery.
async fn validate_account_extract_registry_token(
#[instrument("ValidateRegistryTokens")]
async fn validate_account_extract_registry_tokens(
Build {
config:
BuildConfig {
image_registry:
ImageRegistryConfig {
domain, account, ..
},
..
},
config: BuildConfig { image_registry, .. },
..
}: &Build,
) -> serror::Result<Option<String>> {
if domain.is_empty() {
return Ok(None);
}
if account.is_empty() {
return Err(
anyhow!(
"Must attach account to use registry provider {domain}"
)
.into(),
// Maps (domain, account) -> token
) -> serror::Result<Vec<(String, String, String)>> {
let mut res = HashMap::with_capacity(image_registry.capacity());
for (domain, account) in image_registry
.iter()
.map(|r| (r.domain.as_str(), r.account.as_str()))
// This ensures uniqueness / prevents redundant logins
.collect::<HashSet<_>>()
{
if domain.is_empty() {
continue;
}
if account.is_empty() {
return Err(
anyhow!(
"Must attach account to use registry provider {domain}"
)
.into(),
);
}
let Some(registry_token) = registry_token(domain, account).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
)? else {
continue;
};
res.insert(
(domain.to_string(), account.to_string()),
registry_token,
);
}
let registry_token = registry_token(domain, account).await.with_context(
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
)?;
Ok(registry_token)
Ok(
res
.into_iter()
.map(|((domain, account), token)| (domain, account, token))
.collect(),
)
}

View File

@@ -1,8 +1,9 @@
use std::{collections::HashSet, sync::OnceLock};
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use cache::TimeoutCache;
use formatting::format_serror;
use interpolate::Interpolator;
use komodo_client::{
api::execute::*,
entities::{
@@ -11,7 +12,7 @@ use komodo_client::{
deployment::{
Deployment, DeploymentImage, extract_registry_domain,
},
get_image_name, komodo_timestamp, optional_string,
komodo_timestamp, optional_string,
permission::PermissionLevel,
server::Server,
update::{Log, Update},
@@ -23,13 +24,8 @@ use resolver_api::Resolve;
use crate::{
helpers::{
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_extra_args,
interpolate_variables_secrets_into_string,
},
periphery_client,
query::get_variables_and_secrets,
query::{VariablesAndSecrets, get_variables_and_secrets},
registry_token,
update::update_update,
},
@@ -53,10 +49,18 @@ impl super::BatchExecute for BatchDeploy {
}
impl Resolve<ExecuteArgs> for BatchDeploy {
#[instrument(name = "BatchDeploy", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDeploy",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeploy>(&self.pattern, user)
@@ -65,6 +69,7 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
}
}
#[instrument("SetupDeploy", skip_all)]
async fn setup_deployment_execution(
deployment: &str,
user: &User,
@@ -91,10 +96,21 @@ async fn setup_deployment_execution(
}
impl Resolve<ExecuteArgs> for Deploy {
#[instrument(name = "Deploy", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"Deploy",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
stop_signal = format!("{:?}", self.stop_signal),
stop_time = self.stop_time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -119,8 +135,11 @@ impl Resolve<ExecuteArgs> for Deploy {
let (version, registry_token) = match &deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(build_id).await?;
let image_name = get_image_name(&build)
.context("failed to create image name")?;
let image_names = build.get_image_names();
let image_name = image_names
.first()
.context("No image name could be created")
.context("Failed to create image name")?;
let version = if version.is_none() {
build.config.version
} else {
@@ -137,21 +156,27 @@ impl Resolve<ExecuteArgs> for Deploy {
deployment.config.image = DeploymentImage::Image {
image: format!("{image_name}:{version_str}"),
};
if build.config.image_registry.domain.is_empty() {
let first_registry = build
.config
.image_registry
.first()
.unwrap_or(ImageRegistryConfig::static_default());
if first_registry.domain.is_empty() {
(version, None)
} else {
let ImageRegistryConfig {
domain, account, ..
} = build.config.image_registry;
} = first_registry;
if deployment.config.image_registry_account.is_empty() {
deployment.config.image_registry_account = account
deployment.config.image_registry_account =
account.to_string();
}
let token = if !deployment
.config
.image_registry_account
.is_empty()
{
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
registry_token(domain, &deployment.config.image_registry_account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
)?
} else {
@@ -180,53 +205,17 @@ impl Resolve<ExecuteArgs> for Deploy {
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers = if !deployment.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut deployment.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolator
.interpolate_deployment(&mut deployment)?
.push_logs(&mut update.logs);
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut deployment.config.ports,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut deployment.config.volumes,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut deployment.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut deployment.config.command,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
interpolator.secret_replacers
} else {
Default::default()
};
@@ -234,7 +223,8 @@ impl Resolve<ExecuteArgs> for Deploy {
update.version = version;
update_update(update.clone()).await?;
match periphery_client(&server)?
match periphery_client(&server)
.await?
.request(api::container::Deploy {
deployment,
stop_signal: self.stop_signal,
@@ -253,7 +243,7 @@ impl Resolve<ExecuteArgs> for Deploy {
}
};
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -273,6 +263,14 @@ fn pull_cache() -> &'static PullCache {
PULL_CACHE.get_or_init(Default::default)
}
#[instrument(
"PullDeploymentInner",
skip_all,
fields(
deployment = deployment.id,
server = server.id
)
)]
pub async fn pull_deployment_inner(
deployment: Deployment,
server: &Server,
@@ -280,8 +278,11 @@ pub async fn pull_deployment_inner(
let (image, account, token) = match deployment.config.image {
DeploymentImage::Build { build_id, version } => {
let build = resource::get::<Build>(&build_id).await?;
let image_name = get_image_name(&build)
.context("failed to create image name")?;
let image_names = build.get_image_names();
let image_name = image_names
.first()
.context("No image name could be created")
.context("Failed to create image name")?;
let version = if version.is_none() {
build.config.version.to_string()
} else {
@@ -295,26 +296,31 @@ pub async fn pull_deployment_inner(
};
// replace image with corresponding build image.
let image = format!("{image_name}:{version}");
if build.config.image_registry.domain.is_empty() {
let first_registry = build
.config
.image_registry
.first()
.unwrap_or(ImageRegistryConfig::static_default());
if first_registry.domain.is_empty() {
(image, None, None)
} else {
let ImageRegistryConfig {
domain, account, ..
} = build.config.image_registry;
} = first_registry;
let account =
if deployment.config.image_registry_account.is_empty() {
account
} else {
deployment.config.image_registry_account
&deployment.config.image_registry_account
};
let token = if !account.is_empty() {
registry_token(&domain, &account).await.with_context(
registry_token(domain, account).await.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
)?
} else {
None
};
(image, optional_string(&account), token)
(image, optional_string(account), token)
}
}
DeploymentImage::Image { image } => {
@@ -354,8 +360,9 @@ pub async fn pull_deployment_inner(
}
let res = async {
let log = match periphery_client(server)?
.request(api::image::PullImage {
let log = match periphery_client(server)
.await?
.request(api::docker::PullImage {
name: image,
account,
token,
@@ -366,7 +373,7 @@ pub async fn pull_deployment_inner(
Err(e) => Log::error("Pull image", format_serror(&e.into())),
};
update_cache_for_server(server).await;
update_cache_for_server(server, true).await;
anyhow::Ok(log)
}
.await;
@@ -379,10 +386,19 @@ pub async fn pull_deployment_inner(
}
impl Resolve<ExecuteArgs> for PullDeployment {
#[instrument(name = "PullDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PullDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -413,10 +429,19 @@ impl Resolve<ExecuteArgs> for PullDeployment {
}
impl Resolve<ExecuteArgs> for StartDeployment {
#[instrument(name = "StartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -437,7 +462,8 @@ impl Resolve<ExecuteArgs> for StartDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::StartContainer {
name: deployment.name,
})
@@ -451,7 +477,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -460,10 +486,19 @@ impl Resolve<ExecuteArgs> for StartDeployment {
}
impl Resolve<ExecuteArgs> for RestartDeployment {
#[instrument(name = "RestartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -484,7 +519,8 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::RestartContainer {
name: deployment.name,
})
@@ -500,7 +536,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -509,10 +545,19 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
}
impl Resolve<ExecuteArgs> for PauseDeployment {
#[instrument(name = "PauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -533,7 +578,8 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::PauseContainer {
name: deployment.name,
})
@@ -547,7 +593,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -556,10 +602,19 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
}
impl Resolve<ExecuteArgs> for UnpauseDeployment {
#[instrument(name = "UnpauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -580,7 +635,8 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::UnpauseContainer {
name: deployment.name,
})
@@ -596,7 +652,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -605,10 +661,21 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
}
impl Resolve<ExecuteArgs> for StopDeployment {
#[instrument(name = "StopDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -629,7 +696,8 @@ impl Resolve<ExecuteArgs> for StopDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::StopContainer {
name: deployment.name,
signal: self
@@ -651,7 +719,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -671,10 +739,18 @@ impl super::BatchExecute for BatchDestroyDeployment {
}
impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
#[instrument(name = "BatchDestroyDeployment", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDestroyDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDestroyDeployment>(
@@ -687,10 +763,21 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
}
impl Resolve<ExecuteArgs> for DestroyDeployment {
#[instrument(name = "DestroyDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DestroyDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -711,7 +798,8 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::RemoveContainer {
name: deployment.name,
signal: self
@@ -734,7 +822,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
update.logs.push(log);
update.finalize();
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update_update(update.clone()).await?;
Ok(update)

View File

@@ -0,0 +1,588 @@
use std::{fmt::Write as _, sync::OnceLock};
use anyhow::{Context, anyhow};
use command::run_komodo_standard_command;
use database::{
bson::{Document, doc},
mungos::find::find_collect,
};
use formatting::{bold, format_serror};
use futures_util::{StreamExt, stream::FuturesOrdered};
use komodo_client::{
api::execute::{
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
RotateAllServerKeys, RotateCoreKeys,
},
entities::{
deployment::DeploymentState, server::ServerState,
stack::StackState,
},
};
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use tokio::sync::Mutex;
use crate::{
api::execute::{
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
},
config::{core_config, core_keys},
helpers::{periphery_client, update::update_update},
resource::rotate_server_keys,
state::{
db_client, deployment_status_cache, server_status_cache,
stack_status_cache,
},
};
/// Makes sure the method can only be called once at a time
fn clear_repo_cache_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(Default::default)
}
impl Resolve<ExecuteArgs> for ClearRepoCache {
#[instrument(
"ClearRepoCache",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = clear_repo_cache_lock()
.try_lock()
.context("Clear already in progress...")?;
let mut update = update.clone();
let mut contents =
tokio::fs::read_dir(&core_config().repo_directory)
.await
.context("Failed to read repo cache directory")?;
loop {
let path = match contents
.next_entry()
.await
.context("Failed to read contents at path")
{
Ok(Some(contents)) => contents.path(),
Ok(None) => break,
Err(e) => {
update.push_error_log(
"Read Directory",
format_serror(&e.into()),
);
continue;
}
};
if path.is_dir() {
match tokio::fs::remove_dir_all(&path)
.await
.context("Failed to clear contents at path")
{
Ok(_) => {}
Err(e) => {
update.push_error_log(
"Clear Directory",
format_serror(&e.into()),
);
}
};
}
}
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
//
/// Makes sure the method can only be called once at a time
fn backup_database_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(Default::default)
}
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
#[instrument(
"BackupCoreDatabase",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = backup_database_lock()
.try_lock()
.context("Backup already in progress...")?;
let mut update = update.clone();
update_update(update.clone()).await?;
let res = run_komodo_standard_command(
"Backup Core Database",
None,
"km database backup --yes",
)
.await;
update.logs.push(res);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
//
/// Makes sure the method can only be called once at a time
fn global_update_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(Default::default)
}
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
#[instrument(
"GlobalAutoUpdate",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = global_update_lock()
.try_lock()
.context("Global update already in progress...")?;
let mut update = update.clone();
update_update(update.clone()).await?;
// This is all done in sequence because there is no rush,
// the pulls / deploys happen spaced out to ease the load on system.
let servers = find_collect(&db_client().servers, None, None)
.await
.context("Failed to query for servers from database")?;
let query = doc! {
"$or": [
{ "config.poll_for_updates": true },
{ "config.auto_update": true }
]
};
let (stacks, repos) = tokio::try_join!(
find_collect(&db_client().stacks, query.clone(), None),
find_collect(&db_client().repos, None, None)
)
.context("Failed to query for resources from database")?;
let server_status_cache = server_status_cache();
let stack_status_cache = stack_status_cache();
// Will be edited later at update.logs[0]
update.push_simple_log("Auto Pull", String::new());
for stack in stacks {
let Some(status) = stack_status_cache.get(&stack.id).await
else {
continue;
};
// Only pull running stacks.
if !matches!(status.curr.state, StackState::Running) {
continue;
}
if let Some(server) =
servers.iter().find(|s| s.id == stack.config.server_id)
// This check is probably redundant along with running check
// but shouldn't hurt
&& server_status_cache
.get(&server.id)
.await
.map(|s| matches!(s.state, ServerState::Ok))
.unwrap_or_default()
{
let name = stack.name.clone();
let repo = if stack.config.linked_repo.is_empty() {
None
} else {
let Some(repo) =
repos.iter().find(|r| r.id == stack.config.linked_repo)
else {
update.push_error_log(
&format!("Pull Stack {name}"),
format!(
"Did not find any Repo matching {}",
stack.config.linked_repo
),
);
continue;
};
Some(repo.clone())
};
if let Err(e) =
pull_stack_inner(stack, Vec::new(), server, repo, None)
.await
{
update.push_error_log(
&format!("Pull Stack {name}"),
format_serror(&e.into()),
);
} else {
if !update.logs[0].stdout.is_empty() {
update.logs[0].stdout.push('\n');
}
update.logs[0]
.stdout
.push_str(&format!("Pulled Stack {}", bold(name)));
}
}
}
let deployment_status_cache = deployment_status_cache();
let deployments =
find_collect(&db_client().deployments, query, None)
.await
.context("Failed to query for deployments from database")?;
for deployment in deployments {
let Some(status) =
deployment_status_cache.get(&deployment.id).await
else {
continue;
};
// Only pull running deployments.
if !matches!(status.curr.state, DeploymentState::Running) {
continue;
}
if let Some(server) =
servers.iter().find(|s| s.id == deployment.config.server_id)
// This check is probably redundant along with running check
// but shouldn't hurt
&& server_status_cache
.get(&server.id)
.await
.map(|s| matches!(s.state, ServerState::Ok))
.unwrap_or_default()
{
let name = deployment.name.clone();
if let Err(e) =
pull_deployment_inner(deployment, server).await
{
update.push_error_log(
&format!("Pull Deployment {name}"),
format_serror(&e.into()),
);
} else {
if !update.logs[0].stdout.is_empty() {
update.logs[0].stdout.push('\n');
}
update.logs[0].stdout.push_str(&format!(
"Pulled Deployment {}",
bold(name)
));
}
}
}
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
//
/// Makes sure the method can only be called once at a time
fn global_rotate_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(Default::default)
}
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
#[instrument(
"RotateAllServerKeys",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = global_rotate_lock()
.try_lock()
.context("Key rotation already in progress...")?;
let mut update = update.clone();
update_update(update.clone()).await?;
let mut servers = db_client()
.servers
.find(Document::new())
.await
.context("Failed to query servers from database")?;
let server_status_cache = server_status_cache();
let mut log = String::new();
while let Some(server) = servers.next().await {
let server = match server {
Ok(server) => server,
Err(e) => {
warn!("Failed to parse Server | {e:#}");
continue;
}
};
if !server.config.auto_rotate_keys {
let _ = write!(
&mut log,
"\nSkipping {}: Key Rotation Disabled ⚙️",
bold(&server.name)
);
continue;
}
let Some(status) = server_status_cache.get(&server.id).await
else {
let _ = write!(
&mut log,
"\nSkipping {}: No Status ⚠️",
bold(&server.name)
);
continue;
};
match status.state {
ServerState::Disabled => {
let _ = write!(
&mut log,
"\nSkipping {}: Server Disabled ⚙️",
bold(&server.name)
);
continue;
}
ServerState::NotOk => {
let _ = write!(
&mut log,
"\nSkipping {}: Server Not Ok ⚠️",
bold(&server.name)
);
continue;
}
_ => {}
}
match rotate_server_keys(&server).await {
Ok(_) => {
let _ = write!(
&mut log,
"\nRotated keys for {} ✅",
bold(&server.name)
);
}
Err(e) => {
update.push_error_log(
"Key Rotation Failure",
format_serror(
&e.context(format!(
"Failed to rotate {} keys",
bold(&server.name)
))
.into(),
),
);
}
}
}
update.push_simple_log("Rotate Server Keys", log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<ExecuteArgs> for RotateCoreKeys {
#[instrument(
"RotateCoreKeys",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
force = self.force,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = global_rotate_lock()
.try_lock()
.context("Key rotation already in progress...")?;
let mut update = update.clone();
update_update(update.clone()).await?;
let core_keys = core_keys();
if !core_keys.rotatable() {
return Err(anyhow!("Core `private_key` must be pointing to file, for example 'file:/config/keys/core.key'").into());
};
let server_status_cache = server_status_cache();
let servers =
find_collect(&db_client().servers, Document::new(), None)
.await
.context("Failed to query servers from database")?
.into_iter()
.map(|server| async move {
let state = server_status_cache
.get(&server.id)
.await
.map(|s| s.state)
.unwrap_or(ServerState::NotOk);
(server, state)
})
.collect::<FuturesOrdered<_>>()
.collect::<Vec<_>>()
.await;
if !self.force
&& let Some((server, _)) = servers
.iter()
.find(|(_, state)| matches!(state, ServerState::NotOk))
{
return Err(
anyhow!("Server {} is NotOk, stopping key rotation. Pass `force: true` to continue anyways.", server.name).into(),
);
}
let public_key = core_keys.rotate().await?.into_inner();
info!("New Public Key: {public_key}");
let mut log = format!("New Public Key: {public_key}\n");
for (server, state) in servers {
match state {
ServerState::Disabled => {
let _ = write!(
&mut log,
"\nSkipping {}: Server Disabled ⚙️",
bold(&server.name)
);
continue;
}
ServerState::NotOk => {
// Shouldn't be reached unless 'force: true'
let _ = write!(
&mut log,
"\nSkipping {}: Server Not Ok ⚠️",
bold(&server.name)
);
continue;
}
_ => {}
}
let periphery = periphery_client(&server).await?;
let res = periphery
.request(api::keys::RotateCorePublicKey {
public_key: public_key.clone(),
})
.await;
match res {
Ok(_) => {
let _ = write!(
&mut log,
"\nRotated key for {} ✅",
bold(&server.name)
);
}
Err(e) => {
update.push_error_log(
"Key Rotation Failure",
format_serror(
&e.context(format!(
"Failed to rotate for {}. The new Core public key will have to be added manually.",
bold(&server.name)
))
.into(),
),
);
}
}
}
update.push_simple_log("Rotate Core Keys", log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,13 +1,14 @@
use std::{pin::Pin, time::Instant};
use std::pin::Pin;
use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
};
use axum_extra::{TypedHeader, headers::ContentType};
use database::mungos::by_id::find_one_by_id;
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use futures_util::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
@@ -17,12 +18,12 @@ use komodo_client::{
user::User,
},
};
use mungos::by_id::find_one_by_id;
use resolver_api::Resolve;
use response::JsonString;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use strum::Display;
use typeshare::typeshare;
use uuid::Uuid;
@@ -37,6 +38,7 @@ mod action;
mod alerter;
mod build;
mod deployment;
mod maintenance;
mod procedure;
mod repo;
mod server;
@@ -50,6 +52,9 @@ pub use {
};
pub struct ExecuteArgs {
/// The execution id.
/// Unique for every /execute call.
pub id: Uuid,
pub user: User,
pub update: Update,
}
@@ -58,7 +63,7 @@ pub struct ExecuteArgs {
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[variant_derive(Debug)]
#[variant_derive(Debug, Display)]
#[args(ExecuteArgs)]
#[response(JsonString)]
#[error(serror::Error)]
@@ -101,6 +106,7 @@ pub enum ExecuteRequest {
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
RunStackService(RunStackService),
// ==== DEPLOYMENT ====
Deploy(Deploy),
@@ -138,9 +144,17 @@ pub enum ExecuteRequest {
// ==== ALERTER ====
TestAlerter(TestAlerter),
SendAlert(SendAlert),
// ==== SYNC ====
RunSync(RunSync),
// ==== MAINTENANCE ====
ClearRepoCache(ClearRepoCache),
BackupCoreDatabase(BackupCoreDatabase),
GlobalAutoUpdate(GlobalAutoUpdate),
RotateAllServerKeys(RotateAllServerKeys),
RotateCoreKeys(RotateCoreKeys),
}
pub fn router() -> Router {
@@ -193,10 +207,12 @@ pub fn inner_handler(
>,
> {
Box::pin(async move {
let req_id = Uuid::new_v4();
let task_id = Uuid::new_v4();
// need to validate no cancel is active before any update is created.
// Need to validate no cancel is active before any update is created.
// This ensures no double update created if Cancel is called more than once for the same request.
build::validate_cancel_build(&request).await?;
repo::validate_cancel_repo_build(&request).await?;
let update = init_execution_update(&request, &user).await?;
@@ -207,28 +223,37 @@ pub fn inner_handler(
// here either.
if update.operation == Operation::None {
return Ok(ExecutionResult::Batch(
task(req_id, request, user, update).await?,
task(task_id, request, user, update).await?,
));
}
// Spawn a task for the execution which continues
// running after this method returns.
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
tokio::spawn(task(task_id, request, user, update.clone()));
// Spawns another task to monitor the first for failures,
// and add the log to Update about it (which primary task can't do because it errored out)
tokio::spawn({
let update_id = update.id.clone();
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
Log::error("task error", format_serror(&e.into()))
warn!("/execute request {task_id} task error: {e:#}",);
Log::error("Task Error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
Log::error("spawn error", format!("{e:#?}"))
warn!("/execute request {task_id} spawn error: {e:?}",);
Log::error("Spawn Error", format!("{e:#?}"))
}
_ => return,
};
let res = async {
// Nothing to do if update was never actually created,
// which is the case when the id is empty.
if update_id.is_empty() {
return Ok(());
}
let mut update =
find_one_by_id(&db_client().updates, &update_id)
.await
@@ -252,40 +277,33 @@ pub fn inner_handler(
})
}
#[instrument(
name = "ExecuteRequest",
skip(user, update),
fields(
user_id = user.id,
update_id = update.id,
request = format!("{:?}", request.extract_variant()))
)
]
async fn task(
req_id: Uuid,
id: Uuid,
request: ExecuteRequest,
user: User,
update: Update,
) -> anyhow::Result<String> {
info!("/execute request {req_id} | user: {}", user.username);
let timer = Instant::now();
let variant = request.extract_variant();
let res = match request.resolve(&ExecuteArgs { user, update }).await
{
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e).context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
info!(
"/execute request {id} | {variant} | user: {}",
user.username
);
let res =
match request.resolve(&ExecuteArgs { user, update, id }).await {
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e)
.context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
if let Err(e) = &res {
warn!("/execute request {req_id} error: {e:#}");
warn!("/execute request {id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}
@@ -294,6 +312,7 @@ trait BatchExecute {
fn single_request(name: String) -> ExecuteRequest;
}
#[instrument("BatchExecute", skip(user))]
async fn batch_execute<E: BatchExecute>(
pattern: &str,
user: &User,
@@ -306,6 +325,7 @@ async fn batch_execute<E: BatchExecute>(
&[],
)
.await?;
let futures = resources.into_iter().map(|resource| {
let user = user.clone();
async move {

View File

@@ -1,5 +1,8 @@
use std::pin::Pin;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
};
use formatting::{Color, bold, colored, format_serror, muted};
use komodo_client::{
api::execute::{
@@ -14,7 +17,6 @@ use komodo_client::{
user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use tokio::sync::Mutex;
@@ -36,7 +38,11 @@ impl super::BatchExecute for BatchRunProcedure {
}
impl Resolve<ExecuteArgs> for BatchRunProcedure {
#[instrument(name = "BatchRunProcedure", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchRunProcedure",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
@@ -49,10 +55,19 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
}
impl Resolve<ExecuteArgs> for RunProcedure {
#[instrument(name = "RunProcedure", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunProcedure",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
procedure = self.procedure,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
Ok(
resolve_inner(self.procedure, user.clone(), update.clone())
@@ -134,7 +149,7 @@ fn resolve_inner(
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -144,7 +159,6 @@ fn resolve_inner(
update_update(update.clone()).await?;
if !update.success && procedure.config.failure_alert {
warn!("procedure unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {

View File

@@ -1,7 +1,15 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::update_one_by_id,
mongodb::{
bson::{doc, to_document},
options::FindOneOptions,
},
};
use formatting::format_serror;
use interpolate::Interpolator;
use komodo_client::{
api::{execute::*, write::RefreshRepoCache},
entities::{
@@ -14,13 +22,6 @@ use komodo_client::{
update::{Log, Update},
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::{
bson::{doc, to_document},
options::FindOneOptions,
},
};
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
@@ -29,16 +30,10 @@ use crate::{
alert::send_alerts,
api::write::WriteArgs,
helpers::{
builder::{cleanup_builder_instance, get_builder_periphery},
builder::{cleanup_builder_instance, connect_builder_periphery},
channel::repo_cancel_channel,
git_token,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_string,
interpolate_variables_secrets_into_system_command,
},
periphery_client,
query::get_variables_and_secrets,
git_token, periphery_client,
query::{VariablesAndSecrets, get_variables_and_secrets},
update::update_update,
},
permission::get_check_permissions,
@@ -56,10 +51,18 @@ impl super::BatchExecute for BatchCloneRepo {
}
impl Resolve<ExecuteArgs> for BatchCloneRepo {
#[instrument(name = "BatchCloneRepo", skip( user), fields(user_id = user.id))]
#[instrument(
"BatchCloneRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
@@ -69,10 +72,19 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
}
impl Resolve<ExecuteArgs> for CloneRepo {
#[instrument(name = "CloneRepo", skip( user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CloneRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -110,7 +122,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
@@ -123,12 +135,14 @@ impl Resolve<ExecuteArgs> for CloneRepo {
git_token,
environment: repo.config.env_vars()?,
env_file_path: repo.config.env_file_path,
on_clone: repo.config.on_clone.into(),
on_pull: repo.config.on_pull.into(),
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(res) => res.logs,
Ok(res) => res.res.logs,
Err(e) => {
vec![Log::error(
"Clone Repo",
@@ -156,7 +170,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
);
};
handle_server_update_return(update).await
handle_repo_update_return(update).await
}
}
@@ -168,10 +182,18 @@ impl super::BatchExecute for BatchPullRepo {
}
impl Resolve<ExecuteArgs> for BatchPullRepo {
#[instrument(name = "BatchPullRepo", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchPullRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
@@ -181,10 +203,19 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
}
impl Resolve<ExecuteArgs> for PullRepo {
#[instrument(name = "PullRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PullRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -223,7 +254,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
@@ -236,14 +267,15 @@ impl Resolve<ExecuteArgs> for PullRepo {
git_token,
environment: repo.config.env_vars()?,
env_file_path: repo.config.env_file_path,
on_pull: repo.config.on_pull.into(),
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(res) => {
update.commit_hash = res.commit_hash.unwrap_or_default();
res.logs
update.commit_hash = res.res.commit_hash.unwrap_or_default();
res.res.logs
}
Err(e) => {
vec![Log::error(
@@ -273,12 +305,16 @@ impl Resolve<ExecuteArgs> for PullRepo {
);
};
handle_server_update_return(update).await
handle_repo_update_return(update).await
}
}
#[instrument(skip_all, fields(update_id = update.id))]
async fn handle_server_update_return(
#[instrument(
"HandleRepoEarlyReturn",
skip_all,
fields(update_id = update.id)
)]
async fn handle_repo_update_return(
update: Update,
) -> serror::Result<Update> {
// Need to manually update the update before cache refresh,
@@ -289,7 +325,7 @@ async fn handle_server_update_return(
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -299,7 +335,7 @@ async fn handle_server_update_return(
Ok(update)
}
#[instrument]
#[instrument("UpdateLastPulledTime")]
async fn update_last_pulled_time(repo_name: &str) {
let res = db_client()
.repos
@@ -323,10 +359,18 @@ impl super::BatchExecute for BatchBuildRepo {
}
impl Resolve<ExecuteArgs> for BatchBuildRepo {
#[instrument(name = "BatchBuildRepo", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchBuildRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
@@ -336,10 +380,19 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
}
impl Resolve<ExecuteArgs> for BuildRepo {
#[instrument(name = "BuildRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"BuildRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -421,7 +474,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
let (periphery, cleanup_data) = match connect_builder_periphery(
repo.name.clone(),
None,
builder,
@@ -457,13 +510,15 @@ impl Resolve<ExecuteArgs> for BuildRepo {
git_token,
environment: repo.config.env_vars()?,
env_file_path: repo.config.env_file_path,
on_clone: repo.config.on_clone.into(),
on_pull: repo.config.on_pull.into(),
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect()
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_builder_early_return(update, repo.id, repo.name, true).await
@@ -473,9 +528,10 @@ impl Resolve<ExecuteArgs> for BuildRepo {
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.commit_hash = res.commit_hash.unwrap_or_default();
res.commit_message.unwrap_or_default()
update.logs.extend(res.res.logs);
update.commit_hash = res.res.commit_hash.unwrap_or_default();
res.res.commit_message.unwrap_or_default()
}
Err(e) => {
update.push_error_log(
@@ -509,7 +565,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -519,7 +576,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -529,7 +586,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
update_update(update.clone()).await?;
if !update.success {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -552,7 +608,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
}
}
#[instrument(skip(update))]
#[instrument("HandleRepoBuildEarlyReturn", skip(update))]
async fn handle_builder_early_return(
mut update: Update,
repo_id: String,
@@ -568,7 +624,7 @@ async fn handle_builder_early_return(
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await;
@@ -576,7 +632,6 @@ async fn handle_builder_early_return(
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -597,7 +652,6 @@ async fn handle_builder_early_return(
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_repo_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
@@ -647,10 +701,19 @@ pub async fn validate_cancel_repo_build(
}
impl Resolve<ExecuteArgs> for CancelRepoBuild {
#[instrument(name = "CancelRepoBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CancelRepoBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
&self.repo,
@@ -707,44 +770,29 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
}
}
#[instrument(
"Interpolate",
skip_all,
fields(
skip_secret_interp = repo.config.skip_secret_interp
)
)]
async fn interpolate(
repo: &mut Repo,
update: &mut Update,
) -> anyhow::Result<HashSet<(String, String)>> {
if !repo.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut repo.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolator
.interpolate_repo(repo)?
.push_logs(&mut update.logs);
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_clone,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_pull,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
update,
&global_replacers,
&secret_replacers,
);
Ok(secret_replacers)
Ok(interpolator.secret_replacers)
} else {
Ok(Default::default())
}

View File

@@ -22,10 +22,20 @@ use crate::{
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for StartContainer {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -50,7 +60,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::StartContainer {
@@ -66,7 +76,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -76,10 +86,20 @@ impl Resolve<ExecuteArgs> for StartContainer {
}
impl Resolve<ExecuteArgs> for RestartContainer {
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -104,7 +124,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::RestartContainer {
@@ -122,7 +142,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -132,10 +152,20 @@ impl Resolve<ExecuteArgs> for RestartContainer {
}
impl Resolve<ExecuteArgs> for PauseContainer {
#[instrument(name = "PauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -160,7 +190,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::PauseContainer {
@@ -176,7 +206,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -186,10 +216,20 @@ impl Resolve<ExecuteArgs> for PauseContainer {
}
impl Resolve<ExecuteArgs> for UnpauseContainer {
#[instrument(name = "UnpauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -214,7 +254,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::UnpauseContainer {
@@ -232,7 +272,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -242,10 +282,22 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
}
impl Resolve<ExecuteArgs> for StopContainer {
#[instrument(name = "StopContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -270,7 +322,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::StopContainer {
@@ -288,7 +340,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -298,10 +350,22 @@ impl Resolve<ExecuteArgs> for StopContainer {
}
impl Resolve<ExecuteArgs> for DestroyContainer {
#[instrument(name = "DestroyContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DestroyContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let DestroyContainer {
server,
@@ -332,7 +396,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::RemoveContainer {
@@ -350,7 +414,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -360,10 +424,19 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
}
impl Resolve<ExecuteArgs> for StartAllContainers {
#[instrument(name = "StartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -387,7 +460,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::StartAllContainers {})
.await
.context("failed to start all containers on host")?;
@@ -401,7 +475,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
);
}
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -410,10 +484,19 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
}
impl Resolve<ExecuteArgs> for RestartAllContainers {
#[instrument(name = "RestartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -437,7 +520,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::RestartAllContainers {})
.await
.context("failed to restart all containers on host")?;
@@ -453,7 +537,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
);
}
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -462,10 +546,19 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
}
impl Resolve<ExecuteArgs> for PauseAllContainers {
#[instrument(name = "PauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -489,7 +582,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::PauseAllContainers {})
.await
.context("failed to pause all containers on host")?;
@@ -503,7 +597,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
);
}
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -512,10 +606,19 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
}
impl Resolve<ExecuteArgs> for UnpauseAllContainers {
#[instrument(name = "UnpauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -539,7 +642,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::UnpauseAllContainers {})
.await
.context("failed to unpause all containers on host")?;
@@ -555,7 +659,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
);
}
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -564,10 +668,19 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
}
impl Resolve<ExecuteArgs> for StopAllContainers {
#[instrument(name = "StopAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -591,7 +704,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::StopAllContainers {})
.await
.context("failed to stop all containers on host")?;
@@ -605,7 +719,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
);
}
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -614,10 +728,19 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
}
impl Resolve<ExecuteArgs> for PruneContainers {
#[instrument(name = "PruneContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -641,7 +764,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::PruneContainers {})
@@ -660,7 +783,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -670,10 +793,20 @@ impl Resolve<ExecuteArgs> for PruneContainers {
}
impl Resolve<ExecuteArgs> for DeleteNetwork {
#[instrument(name = "DeleteNetwork", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteNetwork",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
network = self.name
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -686,10 +819,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::network::DeleteNetwork {
.request(api::docker::DeleteNetwork {
name: self.name.clone(),
})
.await
@@ -711,7 +844,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -721,10 +854,19 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
}
impl Resolve<ExecuteArgs> for PruneNetworks {
#[instrument(name = "PruneNetworks", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneNetworks",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -748,10 +890,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::network::PruneNetworks {})
.request(api::docker::PruneNetworks {})
.await
.context(format!(
"failed to prune networks on server {}",
@@ -765,7 +907,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -775,10 +917,20 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
}
impl Resolve<ExecuteArgs> for DeleteImage {
#[instrument(name = "DeleteImage", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteImage",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
image = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -791,10 +943,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::image::DeleteImage {
.request(api::docker::DeleteImage {
name: self.name.clone(),
})
.await
@@ -813,7 +965,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -823,10 +975,19 @@ impl Resolve<ExecuteArgs> for DeleteImage {
}
impl Resolve<ExecuteArgs> for PruneImages {
#[instrument(name = "PruneImages", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneImages",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -850,10 +1011,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::image::PruneImages {}).await {
match periphery.request(api::docker::PruneImages {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune images",
@@ -865,7 +1026,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -875,10 +1036,20 @@ impl Resolve<ExecuteArgs> for PruneImages {
}
impl Resolve<ExecuteArgs> for DeleteVolume {
#[instrument(name = "DeleteVolume", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteVolume",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
volume = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -891,10 +1062,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::volume::DeleteVolume {
.request(api::docker::DeleteVolume {
name: self.name.clone(),
})
.await
@@ -916,7 +1087,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -926,10 +1097,19 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
}
impl Resolve<ExecuteArgs> for PruneVolumes {
#[instrument(name = "PruneVolumes", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneVolumes",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -953,10 +1133,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::volume::PruneVolumes {}).await {
match periphery.request(api::docker::PruneVolumes {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune volumes",
@@ -968,7 +1148,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -978,10 +1158,19 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
}
impl Resolve<ExecuteArgs> for PruneDockerBuilders {
#[instrument(name = "PruneDockerBuilders", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneDockerBuilders",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -1005,7 +1194,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::build::PruneBuilders {}).await {
@@ -1020,7 +1209,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1030,10 +1219,19 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
}
impl Resolve<ExecuteArgs> for PruneBuildx {
#[instrument(name = "PruneBuildx", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneBuildx",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -1057,7 +1255,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::build::PruneBuildx {}).await {
@@ -1072,7 +1270,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;
@@ -1082,10 +1280,19 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
}
impl Resolve<ExecuteArgs> for PruneSystem {
#[instrument(name = "PruneSystem", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneSystem",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -1109,7 +1316,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery.request(api::PruneSystem {}).await {
Ok(log) => log,
@@ -1123,7 +1330,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update_cache_for_server(&server, true).await;
update.finalize();
update_update(update.clone()).await?;

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,10 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use formatting::{Color, colored, format_serror};
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
@@ -22,8 +26,6 @@ use komodo_client::{
user::sync_user,
},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::oid::ObjectId};
use resolver_api::Resolve;
use crate::{
@@ -47,10 +49,21 @@ use crate::{
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for RunSync {
#[instrument(name = "RunSync", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunSync",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
sync = self.sync,
resource_type = format!("{:?}", self.resource_type),
resources = format!("{:?}", self.resources),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let RunSync {
sync,
@@ -75,10 +88,8 @@ impl Resolve<ExecuteArgs> for RunSync {
};
// get the action state for the sync (or insert default).
let action_state = action_states()
.resource_sync
.get_or_insert_default(&sync.id)
.await;
let action_state =
action_states().sync.get_or_insert_default(&sync.id).await;
// This will set action state back to default when dropped.
// Will also check to ensure sync not already busy before updating.

View File

@@ -131,8 +131,8 @@ impl Resolve<ReadArgs> for GetActionsSummary {
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.running => {
res.running += 1;
(_, action_states) if action_states.running > 0 => {
res.running += action_states.running;
}
(ActionState::Ok, _) => res.ok += 1,
(ActionState::Failed, _) => res.failed += 1,

View File

@@ -1,22 +1,22 @@
use anyhow::Context;
use database::mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use komodo_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{
deployment::Deployment, server::Server, stack::Stack,
sync::ResourceSync,
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, sync::ResourceSync,
},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
config::core_config, permission::get_resource_ids_for_user,
config::core_config, permission::list_resource_ids_for_user,
state::db_client,
};
@@ -31,14 +31,29 @@ impl Resolve<ReadArgs> for ListAlerts {
) -> serror::Result<ListAlertsResponse> {
let mut query = self.query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids =
get_resource_ids_for_user::<Server>(user).await?;
let stack_ids =
get_resource_ids_for_user::<Stack>(user).await?;
let deployment_ids =
get_resource_ids_for_user::<Deployment>(user).await?;
let sync_ids =
get_resource_ids_for_user::<ResourceSync>(user).await?;
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
)?;
// All of the vecs will be non-none if !admin and !transparent mode.
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },

View File

@@ -1,4 +1,6 @@
use anyhow::Context;
use database::mongo_indexed::Document;
use database::mungos::mongodb::bson::doc;
use komodo_client::{
api::read::*,
entities::{
@@ -6,13 +8,13 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mongo_indexed::Document;
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(user)
let query = match list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -2,31 +2,27 @@ use std::collections::{HashMap, HashSet};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use futures::TryStreamExt;
use database::mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use futures_util::TryStreamExt;
use komodo_client::{
api::read::*,
entities::{
Operation,
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{
action_states, build_state_cache, db_client, github_client,
},
state::{action_states, build_state_cache, db_client},
};
use super::ReadArgs;
@@ -306,81 +302,3 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
Ok(res)
}
}
impl Resolve<ReadArgs> for GetBuildWebhookEnabled {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetBuildWebhookEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let build = get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Read.into(),
)
.await?;
if build.config.git_provider != "github.com"
|| build.config.repo.is_empty()
{
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetBuildWebhookEnabledResponse {
managed: false,
enabled: false,
});
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: true,
});
}
}
Ok(GetBuildWebhookEnabledResponse {
managed: true,
enabled: false,
})
}
}

View File

@@ -1,4 +1,6 @@
use anyhow::Context;
use database::mongo_indexed::Document;
use database::mungos::mongodb::bson::doc;
use komodo_client::{
api::read::*,
entities::{
@@ -6,13 +8,13 @@ use komodo_client::{
permission::PermissionLevel,
},
};
use mongo_indexed::Document;
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(user)
let query = match list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -145,7 +145,8 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLog {
name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
@@ -183,7 +184,8 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLogSearch {
name,
terms,
@@ -234,7 +236,8 @@ impl Resolve<ReadArgs> for InspectDeploymentContainer {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer { name })
.await?;
Ok(res)
@@ -262,7 +265,8 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
);
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerStats { name })
.await
.context("failed to get stats from periphery")?;
@@ -321,7 +325,9 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
res.not_deployed += 1;
}
DeploymentState::Unknown => {
res.unknown += 1;
if !deployment.template {
res.unknown += 1;
}
}
_ => {
res.unhealthy += 1;

View File

@@ -1,4 +1,4 @@
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use std::{collections::HashSet, time::Instant};
use anyhow::{Context, anyhow};
use axum::{
@@ -27,7 +27,9 @@ use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request, config::core_config, helpers::periphery_client,
auth::auth_request,
config::{core_config, core_keys},
helpers::periphery_client,
resource,
};
@@ -39,6 +41,7 @@ mod alerter;
mod build;
mod builder;
mod deployment;
mod onboarding_key;
mod permission;
mod procedure;
mod provider;
@@ -48,6 +51,7 @@ mod server;
mod stack;
mod sync;
mod tag;
mod terminal;
mod toml;
mod update;
mod user;
@@ -106,27 +110,31 @@ enum ReadRequest {
GetServersSummary(GetServersSummary),
GetServer(GetServer),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetPeripheryInformation(GetPeripheryInformation),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
// ==== TERMINAL ====
ListTerminals(ListTerminals),
// ==== DOCKER ====
GetDockerContainersSummary(GetDockerContainersSummary),
ListAllDockerContainers(ListAllDockerContainers),
ListDockerContainers(ListDockerContainers),
InspectDockerContainer(InspectDockerContainer),
GetResourceMatchingContainer(GetResourceMatchingContainer),
GetContainerLog(GetContainerLog),
SearchContainerLog(SearchContainerLog),
ListComposeProjects(ListComposeProjects),
ListDockerNetworks(ListDockerNetworks),
InspectDockerNetwork(InspectDockerNetwork),
ListDockerImages(ListDockerImages),
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
GetDockerContainersSummary(GetDockerContainersSummary),
ListAllDockerContainers(ListAllDockerContainers),
ListDockerContainers(ListDockerContainers),
ListDockerNetworks(ListDockerNetworks),
ListDockerImages(ListDockerImages),
ListDockerVolumes(ListDockerVolumes),
ListComposeProjects(ListComposeProjects),
ListTerminals(ListTerminals),
InspectDockerVolume(InspectDockerVolume),
// ==== SERVER STATS ====
GetSystemInformation(GetSystemInformation),
@@ -137,7 +145,6 @@ enum ReadRequest {
GetStacksSummary(GetStacksSummary),
GetStack(GetStack),
GetStackActionState(GetStackActionState),
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
GetStackLog(GetStackLog),
SearchStackLog(SearchStackLog),
InspectStackContainer(InspectStackContainer),
@@ -166,7 +173,6 @@ enum ReadRequest {
GetBuildActionState(GetBuildActionState),
GetBuildMonthlyStats(GetBuildMonthlyStats),
ListBuildVersions(ListBuildVersions),
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
ListBuilds(ListBuilds),
ListFullBuilds(ListFullBuilds),
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
@@ -175,7 +181,6 @@ enum ReadRequest {
GetReposSummary(GetReposSummary),
GetRepo(GetRepo),
GetRepoActionState(GetRepoActionState),
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
ListRepos(ListRepos),
ListFullRepos(ListFullRepos),
@@ -183,7 +188,6 @@ enum ReadRequest {
GetResourceSyncsSummary(GetResourceSyncsSummary),
GetResourceSync(GetResourceSync),
GetResourceSyncActionState(GetResourceSyncActionState),
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
@@ -224,6 +228,9 @@ enum ReadRequest {
ListGitProviderAccounts(ListGitProviderAccounts),
GetDockerRegistryAccount(GetDockerRegistryAccount),
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
// ==== ONBOARDING KEY ====
ListOnboardingKeys(ListOnboardingKeys),
}
pub fn router() -> Router {
@@ -245,7 +252,6 @@ async fn variant_handler(
handler(user, Json(req)).await
}
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,
@@ -273,11 +279,13 @@ impl Resolve<ReadArgs> for GetVersion {
}
}
fn core_info() -> &'static GetCoreInfoResponse {
static CORE_INFO: OnceLock<GetCoreInfoResponse> = OnceLock::new();
CORE_INFO.get_or_init(|| {
impl Resolve<ReadArgs> for GetCoreInfo {
async fn resolve(
self,
_: &ReadArgs,
) -> serror::Result<GetCoreInfoResponse> {
let config = core_config();
GetCoreInfoResponse {
let info = GetCoreInfoResponse {
title: config.title.clone(),
monitoring_interval: config.monitoring_interval,
webhook_base_url: if config.webhook_base_url.is_empty() {
@@ -290,23 +298,11 @@ fn core_info() -> &'static GetCoreInfoResponse {
disable_confirm_dialog: config.disable_confirm_dialog,
disable_non_admin_create: config.disable_non_admin_create,
disable_websocket_reconnect: config.disable_websocket_reconnect,
github_webhook_owners: config
.github_webhook_app
.installations
.iter()
.map(|i| i.namespace.to_string())
.collect(),
enable_fancy_toml: config.enable_fancy_toml,
timezone: config.timezone.clone(),
}
})
}
impl Resolve<ReadArgs> for GetCoreInfo {
async fn resolve(
self,
_: &ReadArgs,
) -> serror::Result<GetCoreInfoResponse> {
Ok(core_info().clone())
public_key: core_keys().load().public.to_string(),
};
Ok(info)
}
}
@@ -342,7 +338,8 @@ impl Resolve<ReadArgs> for ListSecrets {
};
if let Some(id) = server_id {
let server = resource::get::<Server>(&id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListSecrets {})
.await
.with_context(|| {
@@ -514,7 +511,8 @@ async fn merge_git_providers_for_server(
server_id: &str,
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListGitProviders {})
.await
.with_context(|| {
@@ -552,7 +550,8 @@ async fn merge_docker_registries_for_server(
server_id: &str,
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListDockerRegistries {})
.await
.with_context(|| {

View File

@@ -0,0 +1,51 @@
use std::cmp::Ordering;
use anyhow::{Context, anyhow};
use database::mungos::find::find_collect;
use komodo_client::api::read::{
ListOnboardingKeys, ListOnboardingKeysResponse,
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{api::read::ReadArgs, state::db_client};
//
impl Resolve<ReadArgs> for ListOnboardingKeys {
async fn resolve(
self,
ReadArgs { user: admin }: &ReadArgs,
) -> serror::Result<ListOnboardingKeysResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let mut keys =
find_collect(&db_client().onboarding_keys, None, None)
.await
.context(
"Failed to query database for Server onboarding keys",
)?;
// No expiry keys first, followed
keys.sort_by(|a, b| {
if a.expires == b.expires {
Ordering::Equal
} else if a.expires == 0 {
Ordering::Less
} else if b.expires == 0 {
Ordering::Greater
} else {
// Descending
b.expires.cmp(&a.expires)
}
});
Ok(keys)
}
}

View File

@@ -1,4 +1,5 @@
use anyhow::{Context, anyhow};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use komodo_client::{
api::read::{
GetPermission, GetPermissionResponse, ListPermissions,
@@ -7,7 +8,6 @@ use komodo_client::{
},
entities::permission::PermissionLevel,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use resolver_api::Resolve;
use crate::{

View File

@@ -1,10 +1,10 @@
use anyhow::{Context, anyhow};
use komodo_client::api::read::*;
use mongo_indexed::{Document, doc};
use mungos::{
use database::mongo_indexed::{Document, doc};
use database::mungos::{
by_id::find_one_by_id, find::find_collect,
mongodb::options::FindOptions,
};
use komodo_client::api::read::*;
use resolver_api::Resolve;
use crate::state::db_client;

View File

@@ -2,7 +2,6 @@ use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
repo::{Repo, RepoActionState, RepoListItem, RepoState},
},
@@ -10,11 +9,10 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_states, github_client, repo_state_cache},
state::{action_states, repo_state_cache},
};
use super::ReadArgs;
@@ -142,7 +140,11 @@ impl Resolve<ReadArgs> for GetReposSummary {
}
(RepoState::Ok, _) => res.ok += 1,
(RepoState::Failed, _) => res.failed += 1,
(RepoState::Unknown, _) => res.unknown += 1,
(RepoState::Unknown, _) => {
if !repo.template {
res.unknown += 1
}
}
// will never come off the cache in the building state, since that comes from action states
(RepoState::Cloning, _)
| (RepoState::Pulling, _)
@@ -155,104 +157,3 @@ impl Resolve<ReadArgs> for GetReposSummary {
Ok(res)
}
}
impl Resolve<ReadArgs> for GetRepoWebhooksEnabled {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetRepoWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo = get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Read.into(),
)
.await?;
if repo.config.git_provider != "github.com"
|| repo.config.repo.is_empty()
{
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetRepoWebhooksEnabledResponse {
managed: false,
clone_enabled: false,
pull_enabled: false,
build_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let clone_url =
format!("{host}/listener/github/repo/{}/clone", repo.id);
let pull_url =
format!("{host}/listener/github/repo/{}/pull", repo.id);
let build_url =
format!("{host}/listener/github/repo/{}/build", repo.id);
let mut clone_enabled = false;
let mut pull_enabled = false;
let mut build_enabled = false;
for webhook in webhooks {
if !webhook.active {
continue;
}
if webhook.config.url == clone_url {
clone_enabled = true
}
if webhook.config.url == pull_url {
pull_enabled = true
}
if webhook.config.url == build_url {
build_enabled = true
}
}
Ok(GetRepoWebhooksEnabledResponse {
managed: true,
clone_enabled,
pull_enabled,
build_enabled,
})
}
}

View File

@@ -1,9 +1,12 @@
use futures::future::join_all;
use futures_util::future::join_all;
use komodo_client::{
api::read::*,
entities::{
ResourceTarget, action::Action, permission::PermissionLevel,
procedure::Procedure, resource::ResourceQuery,
ResourceTarget,
action::Action,
permission::PermissionLevel,
procedure::Procedure,
resource::{ResourceQuery, TemplatesQueryBehavior},
schedule::Schedule,
},
};
@@ -27,6 +30,7 @@ impl Resolve<ReadArgs> for ListSchedules {
list_full_for_user::<Action>(
ResourceQuery {
names: Default::default(),
templates: TemplatesQueryBehavior::Include,
tag_behavior: self.tag_behavior,
tags: self.tags.clone(),
specific: Default::default(),
@@ -38,6 +42,7 @@ impl Resolve<ReadArgs> for ListSchedules {
list_full_for_user::<Procedure>(
ResourceQuery {
names: Default::default(),
templates: TemplatesQueryBehavior::Include,
tag_behavior: self.tag_behavior,
tags: self.tags.clone(),
specific: Default::default(),

View File

@@ -8,6 +8,10 @@ use anyhow::{Context, anyhow};
use async_timing_util::{
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
};
use database::mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use komodo_client::{
api::read::*,
entities::{
@@ -21,37 +25,31 @@ use komodo_client::{
network::Network,
volume::Volume,
},
komodo_timestamp,
permission::PermissionLevel,
server::{
Server, ServerActionState, ServerListItem, ServerState,
TerminalInfo,
Server, ServerActionState, ServerListItem, ServerQuery,
ServerState,
},
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
update::Log,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api::{
self as periphery,
container::InspectContainer,
image::{ImageHistory, InspectImage},
network::InspectNetwork,
volume::InspectVolume,
docker::{
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use tokio::sync::Mutex;
use crate::{
helpers::{
periphery_client,
query::{get_all_tags, get_system_info},
},
permission::get_check_permissions,
helpers::{periphery_client, query::get_all_tags},
permission::{get_check_permissions, list_resources_for_user},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache},
@@ -71,18 +69,29 @@ impl Resolve<ReadArgs> for GetServersSummary {
&[],
)
.await?;
let core_version = env!("CARGO_PKG_VERSION");
let mut res = GetServersSummaryResponse::default();
for server in servers {
res.total += 1;
match server.info.state {
ServerState::Ok => {
res.healthy += 1;
// Check for version mismatch
if matches!(&server.info.version, Some(version) if version != core_version)
{
res.warning += 1;
} else {
res.healthy += 1;
}
}
ServerState::NotOk => {
res.unhealthy += 1;
}
ServerState::Disabled => {
res.disabled += 1;
if !server.template {
res.disabled += 1;
}
}
}
}
@@ -90,26 +99,6 @@ impl Resolve<ReadArgs> for GetServersSummary {
}
}
impl Resolve<ReadArgs> for GetPeripheryVersion {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPeripheryVersionResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
)
.await?;
let version = server_status_cache()
.get(&server.id)
.await
.map(|s| s.version.clone())
.unwrap_or(String::from("unknown"));
Ok(GetPeripheryVersionResponse { version })
}
}
impl Resolve<ReadArgs> for GetServer {
async fn resolve(
self,
@@ -213,6 +202,29 @@ impl Resolve<ReadArgs> for GetServerActionState {
}
}
impl Resolve<ReadArgs> for GetPeripheryInformation {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPeripheryInformationResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
)
.await?;
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.periphery_info
.as_ref()
.cloned()
.context("Server status missing Periphery Info. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
impl Resolve<ReadArgs> for GetSystemInformation {
async fn resolve(
self,
@@ -223,8 +235,17 @@ impl Resolve<ReadArgs> for GetSystemInformation {
user,
PermissionLevel::Read.into(),
)
.await?;
get_system_info(&server).await.map_err(Into::into)
.await
.status_code(StatusCode::BAD_REQUEST)?;
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.system_info
.as_ref()
.cloned()
.context("Server status missing system Info. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
@@ -239,15 +260,15 @@ impl Resolve<ReadArgs> for GetSystemStats {
PermissionLevel::Read.into(),
)
.await?;
let status =
server_status_cache().get(&server.id).await.with_context(
|| format!("did not find status for server at {}", server.id),
)?;
let stats = status
.stats
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.system_stats
.as_ref()
.context("server stats not available")?;
Ok(stats.clone())
.cloned()
.context("Server status missing system stats. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
@@ -277,7 +298,8 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
cached.0.clone()
}
_ => {
let stats = periphery_client(&server)?
let stats = periphery_client(&server)
.await?
.request(periphery::stats::GetSystemProcesses {})
.await?;
lock.insert(
@@ -375,18 +397,12 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
ServerQuery::builder().names(self.servers.clone()).build(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
.into_iter()
.filter(|server| {
self.servers.is_empty()
|| self.servers.contains(&server.id)
|| self.servers.contains(&server.name)
});
.await?;
let mut containers = Vec::<ContainerListItem>::new();
@@ -394,9 +410,17 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(more_containers) = &cache.containers {
containers.extend(more_containers.clone());
}
let Some(more) = &cache.containers else {
continue;
};
let more = more
.iter()
.filter(|container| {
self.containers.is_empty()
|| self.containers.contains(&container.name)
})
.cloned();
containers.extend(more);
}
Ok(containers)
@@ -466,7 +490,8 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer {
name: self.container,
})
@@ -494,7 +519,8 @@ impl Resolve<ReadArgs> for GetContainerLog {
PermissionLevel::Read.logs(),
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(periphery::container::GetContainerLog {
name: container,
tail: cmp::min(tail, MAX_LOG_LENGTH),
@@ -525,7 +551,8 @@ impl Resolve<ReadArgs> for SearchContainerLog {
PermissionLevel::Read.logs(),
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(periphery::container::GetContainerLogSearch {
name: container,
terms,
@@ -560,12 +587,12 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
}
// then check stacks
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
user,
)
.await?;
let stacks = list_resources_for_user::<Stack>(
doc! { "config.server_id": &server.id },
user,
PermissionLevel::Read.into(),
)
.await?;
// check matching stack
for stack in stacks {
@@ -645,7 +672,8 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectNetwork { name: self.network })
.await?;
Ok(res)
@@ -694,7 +722,8 @@ impl Resolve<ReadArgs> for InspectDockerImage {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectImage { name: self.image })
.await?;
Ok(res)
@@ -724,7 +753,8 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(ImageHistory { name: self.image })
.await?;
Ok(res)
@@ -773,7 +803,8 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectVolume { name: self.volume })
.await?;
Ok(res)
@@ -802,65 +833,46 @@ impl Resolve<ReadArgs> for ListComposeProjects {
}
}
#[derive(Default)]
struct TerminalCacheItem {
list: Vec<TerminalInfo>,
ttl: i64,
}
// impl Resolve<ReadArgs> for ListAllTerminals {
// async fn resolve(
// self,
// args: &ReadArgs,
// ) -> Result<Self::Response, Self::Error> {
// // match self.tar
// let mut terminals = resource::list_full_for_user::<Server>(
// self.query, &args.user, &all_tags,
// )
// .await?
// .into_iter()
// .map(|server| async move {
// (
// list_terminals_inner(&server, self.fresh).await,
// (server.id, server.name),
// )
// })
// .collect::<FuturesUnordered<_>>()
// .collect::<Vec<_>>()
// .await
// .into_iter()
// .flat_map(|(terminals, server)| {
// let terminals = terminals.ok()?;
// Some((terminals, server))
// })
// .flat_map(|(terminals, (server_id, server_name))| {
// terminals.into_iter().map(move |info| {
// TerminalInfoWithServer::from_terminal_info(
// &server_id,
// &server_name,
// info,
// )
// })
// })
// .collect::<Vec<_>>();
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
// terminals.sort_by(|a, b| {
// a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
// });
#[derive(Default)]
struct TerminalCache(
std::sync::Mutex<
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
>,
);
impl TerminalCache {
fn get_or_insert(
&self,
server_id: String,
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
if let Some(cached) =
self.0.lock().unwrap().get(&server_id).cloned()
{
return cached;
}
let to_cache =
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
self.0.lock().unwrap().insert(server_id, to_cache.clone());
to_cache
}
}
fn terminals_cache() -> &'static TerminalCache {
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
TERMINALS.get_or_init(Default::default)
}
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let cache = terminals_cache().get_or_insert(server.id.clone());
let mut cache = cache.lock().await;
if self.fresh || komodo_timestamp() > cache.ttl {
cache.list = periphery_client(&server)?
.request(periphery_client::api::terminal::ListTerminals {})
.await
.context("Failed to get fresh terminal list")?;
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
Ok(cache.list.clone())
} else {
Ok(cache.list.clone())
}
}
}
// Ok(terminals)
// }
// }

View File

@@ -4,7 +4,6 @@ use anyhow::{Context, anyhow};
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
docker::container::Container,
permission::PermissionLevel,
server::{Server, ServerState},
@@ -18,15 +17,11 @@ use periphery_client::api::{
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource,
stack::get_stack_and_server,
state::{
action_states, github_client, server_status_cache,
stack_status_cache,
},
state::{action_states, server_status_cache, stack_status_cache},
};
use super::ReadArgs;
@@ -89,7 +84,8 @@ impl Resolve<ReadArgs> for GetStackLog {
true,
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(GetComposeLog {
project: stack.project_name(false),
services,
@@ -122,7 +118,8 @@ impl Resolve<ReadArgs> for SearchStackLog {
true,
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(GetComposeLogSearch {
project: stack.project_name(false),
services,
@@ -184,7 +181,8 @@ impl Resolve<ReadArgs> for InspectStackContainer {
"No service found matching '{service}'. Was the stack last deployed manually?"
).into());
};
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer { name })
.await?;
Ok(res)
@@ -363,7 +361,11 @@ impl Resolve<ReadArgs> for GetStacksSummary {
StackState::Running => res.running += 1,
StackState::Stopped | StackState::Paused => res.stopped += 1,
StackState::Down => res.down += 1,
StackState::Unknown => res.unknown += 1,
StackState::Unknown => {
if !stack.template {
res.unknown += 1
}
}
_ => res.unhealthy += 1,
}
}
@@ -371,91 +373,3 @@ impl Resolve<ReadArgs> for GetStacksSummary {
Ok(res)
}
}
impl Resolve<ReadArgs> for GetStackWebhooksEnabled {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetStackWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Read.into(),
)
.await?;
if stack.config.git_provider != "github.com"
|| stack.config.repo.is_empty()
{
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetStackWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
deploy_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let refresh_url =
format!("{host}/listener/github/stack/{}/refresh", stack.id);
let deploy_url =
format!("{host}/listener/github/stack/{}/deploy", stack.id);
let mut refresh_enabled = false;
let mut deploy_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == deploy_url {
deploy_enabled = true
}
}
Ok(GetStackWebhooksEnabledResponse {
managed: true,
refresh_enabled,
deploy_enabled,
})
}
}

View File

@@ -2,7 +2,6 @@ use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
permission::PermissionLevel,
sync::{
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
@@ -12,11 +11,8 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_states, github_client},
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::action_states,
};
use super::ReadArgs;
@@ -93,7 +89,7 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
)
.await?;
let action_state = action_states()
.resource_sync
.sync
.get(&sync.id)
.await
.unwrap_or_default()
@@ -138,7 +134,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
continue;
}
if action_states
.resource_sync
.sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
@@ -154,91 +150,3 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
Ok(res)
}
}
impl Resolve<ReadArgs> for GetSyncWebhooksEnabled {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetSyncWebhooksEnabledResponse> {
let Some(github) = github_client() else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Read.into(),
)
.await?;
if sync.config.git_provider != "github.com"
|| sync.config.repo.is_empty()
{
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Ok(GetSyncWebhooksEnabledResponse {
managed: false,
refresh_enabled: false,
sync_enabled: false,
});
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let refresh_url =
format!("{host}/listener/github/sync/{}/refresh", sync.id);
let sync_url =
format!("{host}/listener/github/sync/{}/sync", sync.id);
let mut refresh_enabled = false;
let mut sync_enabled = false;
for webhook in webhooks {
if webhook.active && webhook.config.url == refresh_url {
refresh_enabled = true
}
if webhook.active && webhook.config.url == sync_url {
sync_enabled = true
}
}
Ok(GetSyncWebhooksEnabledResponse {
managed: true,
refresh_enabled,
sync_enabled,
})
}
}

View File

@@ -1,10 +1,12 @@
use anyhow::Context;
use database::mongo_indexed::doc;
use database::mungos::{
find::find_collect, mongodb::options::FindOptions,
};
use komodo_client::{
api::read::{GetTag, ListTags},
entities::tag::Tag,
};
use mongo_indexed::doc;
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{helpers::query::get_tag, state::db_client};

View File

@@ -0,0 +1,247 @@
use anyhow::Context as _;
use futures_util::{
FutureExt, StreamExt as _, stream::FuturesUnordered,
};
use komodo_client::{
api::read::{ListTerminals, ListTerminalsResponse},
entities::{
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{Terminal, TerminalTarget},
user::User,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::periphery_client, permission::get_check_permissions,
resource,
};
use super::ReadArgs;
//
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let Some(target) = self.target else {
return list_all_terminals_for_user(user, self.use_names).await;
};
match &target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Container { server, .. } => {
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
}
}
}
async fn list_all_terminals_for_user(
user: &User,
use_names: bool,
) -> serror::Result<Vec<Terminal>> {
let (mut servers, stacks, deployments) = tokio::try_join!(
resource::list_full_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
)
.map(|res| res.map(|servers| servers
.into_iter()
// true denotes user actually has permission on this Server.
.map(|server| (server, true))
.collect::<Vec<_>>())),
resource::list_full_for_user::<Stack>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
resource::list_full_for_user::<Deployment>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
)?;
// Ensure any missing servers are present to query
for stack in &stacks {
if !stack.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == stack.config.server_id)
{
let server =
resource::get::<Server>(&stack.config.server_id).await?;
servers.push((server, false));
}
}
for deployment in &deployments {
if !deployment.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == deployment.config.server_id)
{
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
servers.push((server, false));
}
}
let mut terminals = servers
.into_iter()
.map(|(server, server_permission)| async move {
(
list_terminals_on_server(&server, None).await,
(server.id, server.name, server_permission),
)
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flat_map(
|(terminals, (server_id, server_name, server_permission))| {
let terminals = terminals
.ok()?
.into_iter()
.filter_map(|mut terminal| {
// Only keep terminals with appropriate perms.
match terminal.target.clone() {
TerminalTarget::Server { .. } => server_permission
.then(|| {
terminal.target = TerminalTarget::Server {
server: Some(if use_names {
server_name.clone()
} else {
server_id.clone()
}),
};
terminal
}),
TerminalTarget::Container { container, .. } => {
server_permission.then(|| {
terminal.target = TerminalTarget::Container {
server: if use_names {
server_name.clone()
} else {
server_id.clone()
},
container,
};
terminal
})
}
TerminalTarget::Stack { stack, service } => {
stacks.iter().find(|s| s.id == stack).map(|s| {
terminal.target = TerminalTarget::Stack {
stack: if use_names {
s.name.clone()
} else {
s.id.clone()
},
service,
};
terminal
})
}
TerminalTarget::Deployment { deployment } => {
deployments.iter().find(|d| d.id == deployment).map(
|d| {
terminal.target = TerminalTarget::Deployment {
deployment: if use_names {
d.name.clone()
} else {
d.id.clone()
},
};
terminal
},
)
}
}
})
.collect::<Vec<_>>();
Some(terminals)
},
)
.flatten()
.collect::<Vec<_>>();
terminals.sort_by(|a, b| {
a.target.cmp(&b.target).then(a.name.cmp(&b.name))
});
Ok(terminals)
}
async fn list_terminals_on_server(
server: &Server,
target: Option<TerminalTarget>,
) -> serror::Result<Vec<Terminal>> {
periphery_client(server)
.await?
.request(periphery_client::api::terminal::ListTerminals {
target,
})
.await
.with_context(|| {
format!(
"Failed to get Terminal list from Server {} ({})",
server.name, server.id
)
})
.map_err(Into::into)
}

View File

@@ -1,4 +1,5 @@
use anyhow::Context;
use database::mungos::find::find_collect;
use komodo_client::{
api::read::{
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
@@ -13,7 +14,6 @@ use komodo_client::{
sync::ResourceSync, toml::ResourcesToml, user::User,
},
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{

View File

@@ -1,6 +1,11 @@
use std::collections::HashMap;
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
@@ -20,16 +25,11 @@ use komodo_client::{
user::User,
},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{get_check_permissions, get_resource_ids_for_user},
permission::{get_check_permissions, list_resource_ids_for_user},
state::db_client,
};
@@ -45,99 +45,137 @@ impl Resolve<ReadArgs> for ListUpdates {
let query = if user.admin || core_config().transparent_mode {
self.query
} else {
let server_query = get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
get_resource_ids_for_user::<Deployment>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
get_resource_ids_for_user::<Procedure>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = get_resource_ids_for_user::<Action>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query = get_resource_ids_for_user::<
ResourceSync,
>(user)
let server_query = list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = list_resource_ids_for_user::<Build>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = list_resource_ids_for_user::<Repo>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = list_resource_ids_for_user::<Procedure>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = list_resource_ids_for_user::<Action>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query =
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = self.query.unwrap_or_default();
query.extend(doc! {

View File

@@ -1,4 +1,9 @@
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use komodo_client::{
api::read::{
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
@@ -8,11 +13,6 @@ use komodo_client::{
},
entities::user::{UserConfig, admin_service_user},
};
use mungos::{
by_id::find_one_by_id,
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{helpers::query::get_user, state::db_client};

View File

@@ -1,14 +1,14 @@
use std::str::FromStr;
use anyhow::Context;
use komodo_client::api::read::*;
use mungos::{
use database::mungos::{
find::find_collect,
mongodb::{
bson::{Document, doc, oid::ObjectId},
options::FindOptions,
},
};
use komodo_client::api::read::*;
use resolver_api::Resolve;
use crate::state::db_client;

View File

@@ -1,7 +1,9 @@
use anyhow::Context;
use database::mongo_indexed::doc;
use database::mungos::{
find::find_collect, mongodb::options::FindOptions,
};
use komodo_client::api::read::*;
use mongo_indexed::doc;
use mungos::{find::find_collect, mongodb::options::FindOptions};
use resolver_api::Resolve;
use crate::{helpers::query::get_variable, state::db_client};

View File

@@ -1,27 +1,15 @@
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::terminal::*,
entities::{
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, user::User,
},
};
use komodo_client::{api::terminal::*, entities::user::User};
use serror::Json;
use uuid::Uuid;
use crate::{
auth::auth_request, helpers::periphery_client,
permission::get_check_permissions, resource::get,
state::stack_status_cache,
auth::auth_request, helpers::terminal::setup_target_for_user,
};
pub fn router() -> Router {
Router::new()
.route("/execute", post(execute_terminal))
.route("/execute/container", post(execute_container_exec))
.route("/execute/deployment", post(execute_deployment_exec))
.route("/execute/stack", post(execute_stack_exec))
.layer(middleware::from_fn(auth_request))
}
@@ -29,271 +17,34 @@ pub fn router() -> Router {
// ExecuteTerminal
// =================
async fn execute_terminal(
Extension(user): Extension<User>,
Json(request): Json<ExecuteTerminalBody>,
) -> serror::Result<axum::body::Body> {
execute_terminal_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteTerminal",
skip(user),
skip_all,
fields(
user_id = user.id,
operator = user.id,
target,
terminal,
init = format!("{init:?}")
)
)]
async fn execute_terminal_inner(
req_id: Uuid,
ExecuteTerminalBody {
server,
async fn execute_terminal(
Extension(user): Extension<User>,
Json(ExecuteTerminalBody {
target,
terminal,
command,
}: ExecuteTerminalBody,
user: User,
init,
}): Json<ExecuteTerminalBody>,
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute request | user: {}", user.username);
let res = async {
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let (target, terminal, periphery) =
setup_target_for_user(target, terminal, init, &user).await?;
let periphery = periphery_client(&server)?;
let stream = periphery
.execute_terminal(target, terminal, command)
.await
.context("Failed to execute command on Terminal")?;
let stream = periphery
.execute_terminal(terminal, command)
.await
.context("Failed to execute command on periphery")?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!("/terminal/execute request {req_id} error: {e:#}");
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
}
// ======================
// ExecuteContainerExec
// ======================
async fn execute_container_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteContainerExecBody>,
) -> serror::Result<axum::body::Body> {
execute_container_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteContainerExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_container_exec_inner(
req_id: Uuid,
ExecuteContainerExecBody {
server,
container,
shell,
command,
}: ExecuteContainerExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!(
"/terminal/execute/container request | user: {}",
user.username
);
let res = async {
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!(
"/terminal/execute/container request {req_id} error: {e:#}"
);
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
}
// =======================
// ExecuteDeploymentExec
// =======================
async fn execute_deployment_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteDeploymentExecBody>,
) -> serror::Result<axum::body::Body> {
execute_deployment_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteDeploymentExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_deployment_exec_inner(
req_id: Uuid,
ExecuteDeploymentExecBody {
deployment,
shell,
command,
}: ExecuteDeploymentExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!(
"/terminal/execute/deployment request | user: {}",
user.username
);
let res = async {
let deployment = get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server)?;
let stream = periphery
.execute_container_exec(deployment.name, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!(
"/terminal/execute/deployment request {req_id} error: {e:#}"
);
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
}
// ==================
// ExecuteStackExec
// ==================
async fn execute_stack_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteStackExecBody>,
) -> serror::Result<axum::body::Body> {
execute_stack_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteStackExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_stack_exec_inner(
req_id: Uuid,
ExecuteStackExecBody {
stack,
service,
shell,
command,
}: ExecuteStackExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute/stack request | user: {}", user.username);
let res = async {
let stack = get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&stack.config.server_id).await?;
let container = stack_status_cache()
.get(&stack.id)
.await
.context("could not get stack status")?
.curr
.services
.iter()
.find(|s| s.service == service)
.context("could not find service")?
.container
.as_ref()
.context("could not find service container")?
.name
.clone();
let periphery = periphery_client(&server)?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!("/terminal/execute/stack request {req_id} error: {e:#}");
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
Ok(axum::body::Body::from_stream(stream))
}

View File

@@ -4,13 +4,16 @@ use anyhow::{Context, anyhow};
use axum::{
Extension, Json, Router, extract::Path, middleware, routing::post,
};
use database::mongo_indexed::doc;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_bson,
};
use derive_variants::EnumVariants;
use komodo_client::entities::random_string;
use komodo_client::{
api::user::*,
entities::{api_key::ApiKey, komodo_timestamp, user::User},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
@@ -19,9 +22,7 @@ use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::{query::get_user, random_string},
state::db_client,
auth::auth_request, helpers::query::get_user, state::db_client,
};
use super::Variant;
@@ -64,7 +65,6 @@ async fn variant_handler(
handler(user, Json(req)).await
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
@@ -87,11 +87,6 @@ async fn handler(
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<UserArgs> for PushRecentlyViewed {
#[instrument(
name = "PushRecentlyViewed",
level = "debug",
skip(user)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -116,7 +111,7 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
update_one_by_id(
&db_client().users,
&user.id,
mungos::update::Update::Set(update),
database::mungos::update::Update::Set(update),
None,
)
.await
@@ -129,11 +124,6 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
}
impl Resolve<UserArgs> for SetLastSeenUpdate {
#[instrument(
name = "SetLastSeenUpdate",
level = "debug",
skip(user)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -141,7 +131,7 @@ impl Resolve<UserArgs> for SetLastSeenUpdate {
update_one_by_id(
&db_client().users,
&user.id,
mungos::update::Update::Set(doc! {
database::mungos::update::Update::Set(doc! {
"last_update_view": komodo_timestamp()
}),
None,
@@ -156,7 +146,11 @@ const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<UserArgs> for CreateApiKey {
#[instrument(name = "CreateApiKey", level = "debug", skip(user))]
#[instrument(
"CreateApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -186,7 +180,11 @@ impl Resolve<UserArgs> for CreateApiKey {
}
impl Resolve<UserArgs> for DeleteApiKey {
#[instrument(name = "DeleteApiKey", level = "debug", skip(user))]
#[instrument(
"DeleteApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,

View File

@@ -11,20 +11,34 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateAction {
#[instrument(name = "CreateAction", skip(user))]
#[instrument(
"CreateAction",
skip_all,
fields(
operator = user.id,
action = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Action> {
Ok(
resource::create::<Action>(&self.name, self.config, user)
.await?,
)
resource::create::<Action>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyAction {
#[instrument(name = "CopyAction", skip(user))]
#[instrument(
"CopyAction",
skip_all,
fields(
operator = user.id,
action = self.name,
copy_action = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -35,15 +49,21 @@ impl Resolve<WriteArgs> for CopyAction {
PermissionLevel::Write.into(),
)
.await?;
Ok(
resource::create::<Action>(&self.name, config.into(), user)
.await?,
)
resource::create::<Action>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for UpdateAction {
#[instrument(name = "UpdateAction", skip(user))]
#[instrument(
"UpdateAction",
skip_all,
fields(
operator = user.id,
action = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -53,7 +73,15 @@ impl Resolve<WriteArgs> for UpdateAction {
}
impl Resolve<WriteArgs> for RenameAction {
#[instrument(name = "RenameAction", skip(user))]
#[instrument(
"RenameAction",
skip_all,
fields(
operator = user.id,
action = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -63,8 +91,18 @@ impl Resolve<WriteArgs> for RenameAction {
}
impl Resolve<WriteArgs> for DeleteAction {
#[instrument(name = "DeleteAction", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Action> {
Ok(resource::delete::<Action>(&self.id, args).await?)
#[instrument(
"DeleteAction",
skip_all,
fields(
operator = user.id,
action = self.id
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Action> {
Ok(resource::delete::<Action>(&self.id, user).await?)
}
}

View File

@@ -0,0 +1,41 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
use komodo_client::{api::write::CloseAlert, entities::NoData};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{api::write::WriteArgs, state::db_client};
impl Resolve<WriteArgs> for CloseAlert {
#[instrument(
"CloseAlert",
skip_all,
fields(
operator = admin.id,
alert_id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
db_client()
.alerts
.update_one(
doc! { "_id": ObjectId::from_str(&self.id)? },
doc! { "$set": { "resolved": true } },
)
.await
.context("Failed to close Alert on database")?;
Ok(NoData {})
}
}

View File

@@ -11,20 +11,34 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateAlerter {
#[instrument(name = "CreateAlerter", skip(user))]
#[instrument(
"CreateAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Alerter> {
Ok(
resource::create::<Alerter>(&self.name, self.config, user)
.await?,
)
resource::create::<Alerter>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyAlerter {
#[instrument(name = "CopyAlerter", skip(user))]
#[instrument(
"CopyAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.name,
copy_alerter = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -35,25 +49,38 @@ impl Resolve<WriteArgs> for CopyAlerter {
PermissionLevel::Write.into(),
)
.await?;
Ok(
resource::create::<Alerter>(&self.name, config.into(), user)
.await?,
)
resource::create::<Alerter>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteAlerter {
#[instrument(name = "DeleteAlerter", skip(args))]
#[instrument(
"DeleteAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Alerter> {
Ok(resource::delete::<Alerter>(&self.id, args).await?)
Ok(resource::delete::<Alerter>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateAlerter {
#[instrument(name = "UpdateAlerter", skip(user))]
#[instrument(
"UpdateAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
update = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -66,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateAlerter {
}
impl Resolve<WriteArgs> for RenameAlerter {
#[instrument(name = "RenameAlerter", skip(user))]
#[instrument(
"RenameAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,64 +1,75 @@
use std::{path::PathBuf, str::FromStr, time::Duration};
use std::{path::PathBuf, time::Duration};
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::to_document;
use database::{
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, FileContents, NoData, Operation, all_logs_success,
build::{Build, BuildInfo, PartialBuildConfig},
FileContents, NoData, Operation, RepoExecutionArgs,
all_logs_success,
build::{Build, BuildInfo},
builder::{Builder, BuilderConfig},
config::core::CoreConfig,
permission::PermissionLevel,
repo::Repo,
server::ServerState,
update::Update,
},
};
use mongo_indexed::doc;
use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::{
PeripheryClient,
api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
},
use periphery_client::api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
config::core_config,
connection::PeripheryConnectionArgs,
helpers::{
git_token, periphery_client,
query::get_server_with_state,
update::{add_update, make_update},
},
periphery::PeripheryClient,
permission::get_check_permissions,
resource,
state::{db_client, github_client},
state::db_client,
};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateBuild {
#[instrument(name = "CreateBuild", skip(user))]
#[instrument(
"CreateBuild",
skip_all,
fields(
operator = user.id,
build = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Build> {
Ok(
resource::create::<Build>(&self.name, self.config, user)
.await?,
)
resource::create::<Build>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyBuild {
#[instrument(name = "CopyBuild", skip(user))]
#[instrument(
"CopyBuild",
skip_all,
fields(
operator = user.id,
build = self.name,
copy_build = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -71,22 +82,38 @@ impl Resolve<WriteArgs> for CopyBuild {
.await?;
// reset version to 0.0.0
config.version = Default::default();
Ok(
resource::create::<Build>(&self.name, config.into(), user)
.await?,
)
resource::create::<Build>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteBuild {
#[instrument(name = "DeleteBuild", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Build> {
Ok(resource::delete::<Build>(&self.id, args).await?)
#[instrument(
"DeleteBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Build> {
Ok(resource::delete::<Build>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateBuild {
#[instrument(name = "UpdateBuild", skip(user))]
#[instrument(
"UpdateBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -96,7 +123,15 @@ impl Resolve<WriteArgs> for UpdateBuild {
}
impl Resolve<WriteArgs> for RenameBuild {
#[instrument(name = "RenameBuild", skip(user))]
#[instrument(
"RenameBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -106,7 +141,14 @@ impl Resolve<WriteArgs> for RenameBuild {
}
impl Resolve<WriteArgs> for WriteBuildFileContents {
#[instrument(name = "WriteBuildFileContents", skip(args))]
#[instrument(
"WriteBuildFileContents",
skip_all,
fields(
operator = args.user.id,
build = self.build,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
@@ -178,6 +220,7 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
}
}
#[instrument("WriteDockerfileContentsGit", skip_all)]
async fn write_dockerfile_contents_git(
req: WriteBuildFileContents,
args: &WriteArgs,
@@ -186,7 +229,9 @@ async fn write_dockerfile_contents_git(
) -> serror::Result<Update> {
let WriteBuildFileContents { build: _, contents } = req;
let mut clone_args: CloneArgs = if !build.config.files_on_host
let mut repo_args: RepoExecutionArgs = if !build
.config
.files_on_host
&& !build.config.linked_repo.is_empty()
{
(&crate::resource::get::<Repo>(&build.config.linked_repo).await?)
@@ -194,8 +239,8 @@ async fn write_dockerfile_contents_git(
} else {
(&build).into()
};
let root = clone_args.unique_path(&core_config().repo_directory)?;
clone_args.destination = Some(root.display().to_string());
let root = repo_args.unique_path(&core_config().repo_directory)?;
repo_args.destination = Some(root.display().to_string());
let build_path = build
.config
@@ -218,11 +263,11 @@ async fn write_dockerfile_contents_git(
})?;
}
let access_token = if let Some(account) = &clone_args.account {
git_token(&clone_args.provider, account, |https| clone_args.https = https)
let access_token = if let Some(account) = &repo_args.account {
git_token(&repo_args.provider, account, |https| repo_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
)?
} else {
None
@@ -233,7 +278,7 @@ async fn write_dockerfile_contents_git(
if !root.join(".git").exists() {
git::init_folder_as_repo(
&root,
&clone_args,
&repo_args,
access_token.as_deref(),
&mut update.logs,
)
@@ -247,20 +292,18 @@ async fn write_dockerfile_contents_git(
}
}
// Save this for later -- repo_args moved next.
let branch = repo_args.branch.clone();
// Pull latest changes to repo to ensure linear commit history
match git::pull_or_clone(
clone_args,
repo_args,
&core_config().repo_directory,
access_token,
Default::default(),
Default::default(),
Default::default(),
Default::default(),
)
.await
.context("Failed to pull latest changes before commit")
{
Ok(res) => update.logs.extend(res.logs),
Ok((res, _)) => update.logs.extend(res.logs),
Err(e) => {
update.push_error_log("Pull Repo", format_serror(&e.into()));
update.finalize();
@@ -275,8 +318,9 @@ async fn write_dockerfile_contents_git(
return Ok(update);
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
if let Err(e) = secret_file::write_async(&full_path, &contents)
.await
.with_context(|| {
format!("Failed to write dockerfile contents to {full_path:?}")
})
{
@@ -300,7 +344,7 @@ async fn write_dockerfile_contents_git(
&format!("{}: Commit Dockerfile", args.user.username),
&root,
&build_path.join(&dockerfile_path),
&build.config.branch,
&branch,
)
.await;
@@ -323,11 +367,6 @@ async fn write_dockerfile_contents_git(
}
impl Resolve<WriteArgs> for RefreshBuildCache {
#[instrument(
name = "RefreshBuildCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -351,23 +390,28 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
None
};
let (
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
) = if build.config.files_on_host {
let RemoteDockerfileContents {
path,
contents,
error,
hash,
message,
} = if build.config.files_on_host {
// =============
// FILES ON HOST
// =============
match get_on_host_dockerfile(&build).await {
Ok(FileContents { path, contents }) => {
(Some(path), Some(contents), None, None, None)
}
Err(e) => {
(None, None, Some(format_serror(&e.into())), None, None)
RemoteDockerfileContents {
path: Some(path),
contents: Some(contents),
..Default::default()
}
}
Err(e) => RemoteDockerfileContents {
error: Some(format_serror(&e.into())),
..Default::default()
},
}
} else if let Some(repo) = &repo {
let Some(res) = get_git_remote(&build, repo.into()).await?
@@ -387,7 +431,7 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
// =============
// UI BASED FILE
// =============
(None, None, None, None, None)
RemoteDockerfileContents::default()
};
let info = BuildInfo {
@@ -395,11 +439,11 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
built_hash: build.info.built_hash,
built_message: build.info.built_message,
built_contents: build.info.built_contents,
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
remote_path: path,
remote_contents: contents,
remote_error: error,
latest_hash: hash,
latest_message: message,
};
let info = to_document(&info)
@@ -434,13 +478,26 @@ async fn get_on_host_periphery(
Err(anyhow!("Files on host doesn't work with AWS builder"))
}
BuilderConfig::Url(config) => {
// TODO: Ensure connection is actually established.
// Builder id no good because it may be active for multiple connections.
let periphery = PeripheryClient::new(
config.address,
config.passkey,
Duration::from_secs(3),
);
periphery.health_check().await?;
Ok(periphery)
PeripheryConnectionArgs::from_url_builder(
&ObjectId::new().to_hex(),
&config,
),
config.insecure_tls,
)
.await?;
// Poll for connection to be estalished
let mut err = None;
for _ in 0..10 {
tokio::time::sleep(Duration::from_secs(1)).await;
match periphery.health_check().await {
Ok(_) => return Ok(periphery),
Err(e) => err = Some(e),
};
}
Err(err.context("Missing error")?)
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
@@ -455,7 +512,7 @@ async fn get_on_host_periphery(
"Builder server is disabled or not reachable"
));
};
periphery_client(&server)
periphery_client(&server).await
}
}
}
@@ -477,16 +534,8 @@ async fn get_on_host_dockerfile(
async fn get_git_remote(
build: &Build,
mut clone_args: CloneArgs,
) -> anyhow::Result<
Option<(
Option<String>,
Option<String>,
Option<String>,
Option<String>,
Option<String>,
)>,
> {
mut clone_args: RepoExecutionArgs,
) -> anyhow::Result<Option<RemoteDockerfileContents>> {
if clone_args.provider.is_empty() {
// Nothing to do here
return Ok(None);
@@ -494,9 +543,6 @@ async fn get_git_remote(
let config = core_config();
let repo_path = clone_args.unique_path(&config.repo_directory)?;
clone_args.destination = Some(repo_path.display().to_string());
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
let access_token = if let Some(username) = &clone_args.account {
git_token(&clone_args.provider, username, |https| {
@@ -510,20 +556,25 @@ async fn get_git_remote(
None
};
let GitRes { hash, message, .. } = git::pull_or_clone(
let (res, _) = git::pull_or_clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.context("failed to clone build repo")?;
.context("Failed to clone Build repo")?;
let relative_path = PathBuf::from_str(&build.config.build_path)
.context("Invalid build path")?
// Ensure clone / pull successful,
// propogate error log -> 'errored' and return.
if let Some(failure) = res.logs.iter().find(|log| !log.success) {
return Ok(Some(RemoteDockerfileContents {
path: Some(format!("Failed at: {}", failure.stage)),
error: Some(failure.combined()),
..Default::default()
}));
}
let relative_path = PathBuf::from(&build.config.build_path)
.join(&build.config.dockerfile_path);
let full_path = repo_path.join(&relative_path);
@@ -534,209 +585,20 @@ async fn get_git_remote(
Ok(contents) => (Some(contents), None),
Err(e) => (None, Some(format_serror(&e.into()))),
};
Ok(Some((
Some(relative_path.display().to_string()),
Ok(Some(RemoteDockerfileContents {
path: Some(relative_path.display().to_string()),
contents,
error,
hash,
message,
)))
hash: res.commit_hash,
message: res.commit_message,
}))
}
impl Resolve<WriteArgs> for CreateBuildWebhook {
#[instrument(name = "CreateBuildWebhook", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<CreateBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let WriteArgs { user } = args;
let build = get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Write.into(),
)
.await?;
if build.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if build.config.webhook_secret.is_empty() {
webhook_secret
} else {
&build.config.webhook_secret
};
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !build.config.webhook_enabled {
UpdateBuild {
id: build.id,
config: PartialBuildConfig {
webhook_enabled: Some(true),
..Default::default()
},
}
.resolve(args)
.await
.map_err(|e| e.error)
.context("failed to update build to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteBuildWebhook {
#[instrument(name = "DeleteBuildWebhook", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteBuildWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let build = get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Write.into(),
)
.await?;
if build.config.git_provider != "github.com" {
return Err(
anyhow!("Can only manage github.com repo webhooks").into(),
);
}
if build.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't delete webhook").into(),
);
}
let mut split = build.config.repo.split('/');
let owner = split.next().context("Build repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Build repo has no repo after the /")?;
let github_repos = github.repos();
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = format!("{host}/listener/github/build/{}", build.id);
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
#[derive(Default)]
pub struct RemoteDockerfileContents {
pub path: Option<String>,
pub contents: Option<String>,
pub error: Option<String>,
pub hash: Option<String>,
pub message: Option<String>,
}

View File

@@ -11,20 +11,34 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateBuilder {
#[instrument(name = "CreateBuilder", skip(user))]
#[instrument(
"CreateBuilder",
skip_all,
fields(
operator = user.id,
builder = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Builder> {
Ok(
resource::create::<Builder>(&self.name, self.config, user)
.await?,
)
resource::create::<Builder>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyBuilder {
#[instrument(name = "CopyBuilder", skip(user))]
#[instrument(
"CopyBuilder",
skip_all,
fields(
operator = user.id,
builder = self.name,
copy_builder = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -35,25 +49,38 @@ impl Resolve<WriteArgs> for CopyBuilder {
PermissionLevel::Write.into(),
)
.await?;
Ok(
resource::create::<Builder>(&self.name, config.into(), user)
.await?,
)
resource::create::<Builder>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteBuilder {
#[instrument(name = "DeleteBuilder", skip(args))]
#[instrument(
"DeleteBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Builder> {
Ok(resource::delete::<Builder>(&self.id, args).await?)
Ok(resource::delete::<Builder>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateBuilder {
#[instrument(name = "UpdateBuilder", skip(user))]
#[instrument(
"UpdateBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -66,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateBuilder {
}
impl Resolve<WriteArgs> for RenameBuilder {
#[instrument(name = "RenameBuilder", skip(user))]
#[instrument(
"RenameBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,4 +1,5 @@
use anyhow::{Context, anyhow};
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use komodo_client::{
api::write::*,
entities::{
@@ -15,7 +16,6 @@ use komodo_client::{
update::Update,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api::{self, container::InspectContainer};
use resolver_api::Resolve;
@@ -33,20 +33,39 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateDeployment {
#[instrument(name = "CreateDeployment", skip(user))]
#[instrument(
"CreateDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
Ok(
resource::create::<Deployment>(&self.name, self.config, user)
.await?,
resource::create::<Deployment>(
&self.name,
self.config,
None,
user,
)
.await
}
}
impl Resolve<WriteArgs> for CopyDeployment {
#[instrument(name = "CopyDeployment", skip(user))]
#[instrument(
"CopyDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.name,
copy_deployment = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -58,15 +77,26 @@ impl Resolve<WriteArgs> for CopyDeployment {
PermissionLevel::Read.into(),
)
.await?;
Ok(
resource::create::<Deployment>(&self.name, config.into(), user)
.await?,
resource::create::<Deployment>(
&self.name,
config.into(),
None,
user,
)
.await
}
}
impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
#[instrument(name = "CreateDeploymentFromContainer", skip(user))]
#[instrument(
"CreateDeploymentFromContainer",
skip_all,
fields(
operator = user.id,
server = self.server,
deployment = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -89,7 +119,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
.into(),
);
}
let container = periphery_client(&server)?
let container = periphery_client(&server)
.await?
.request(InspectContainer {
name: self.name.clone(),
})
@@ -153,25 +184,38 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
});
}
Ok(
resource::create::<Deployment>(&self.name, config, user)
.await?,
)
resource::create::<Deployment>(&self.name, config, None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteDeployment {
#[instrument(name = "DeleteDeployment", skip(args))]
#[instrument(
"DeleteDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
Ok(resource::delete::<Deployment>(&self.id, args).await?)
Ok(resource::delete::<Deployment>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateDeployment {
#[instrument(name = "UpdateDeployment", skip(user))]
#[instrument(
"UpdateDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -184,7 +228,15 @@ impl Resolve<WriteArgs> for UpdateDeployment {
}
impl Resolve<WriteArgs> for RenameDeployment {
#[instrument(name = "RenameDeployment", skip(user))]
#[instrument(
"RenameDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -227,7 +279,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
update_one_by_id(
&db_client().deployments,
&deployment.id,
mungos::update::Update::Set(
database::mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
@@ -238,7 +290,8 @@ impl Resolve<WriteArgs> for RenameDeployment {
if container_state != DeploymentState::NotDeployed {
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
let log = periphery_client(&server)?
let log = periphery_client(&server)
.await?
.request(api::container::RenameContainer {
curr_name: deployment.name.clone(),
new_name: name.clone(),

View File

@@ -1,114 +0,0 @@
use anyhow::anyhow;
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
},
};
use resolver_api::Resolve;
use crate::resource;
use super::WriteArgs;
impl Resolve<WriteArgs> for UpdateDescription {
#[instrument(name = "UpdateDescription", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateDescriptionResponse> {
match self.target {
ResourceTarget::System(_) => {
return Err(
anyhow!(
"cannot update description of System resource target"
)
.into(),
);
}
ResourceTarget::Server(id) => {
resource::update_description::<Server>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Deployment(id) => {
resource::update_description::<Deployment>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Build(id) => {
resource::update_description::<Build>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Repo(id) => {
resource::update_description::<Repo>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Builder(id) => {
resource::update_description::<Builder>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Alerter(id) => {
resource::update_description::<Alerter>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Procedure(id) => {
resource::update_description::<Procedure>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Action(id) => {
resource::update_description::<Action>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_description::<ResourceSync>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::Stack(id) => {
resource::update_description::<Stack>(
&id,
&self.description,
user,
)
.await?;
}
}
Ok(UpdateDescriptionResponse {})
}
}

View File

@@ -1,5 +1,3 @@
use std::time::Instant;
use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
@@ -11,6 +9,7 @@ use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use strum::Display;
use typeshare::typeshare;
use uuid::Uuid;
@@ -19,20 +18,23 @@ use crate::auth::auth_request;
use super::Variant;
mod action;
mod alert;
mod alerter;
mod build;
mod builder;
mod deployment;
mod description;
mod onboarding_key;
mod permissions;
mod procedure;
mod provider;
mod repo;
mod resource;
mod server;
mod service_user;
mod stack;
mod sync;
mod tag;
mod terminal;
mod user;
mod user_group;
mod variable;
@@ -45,13 +47,14 @@ pub struct WriteArgs {
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[variant_derive(Debug)]
#[variant_derive(Debug, Display)]
#[args(WriteArgs)]
#[response(Response)]
#[error(serror::Error)]
#[serde(tag = "type", content = "params")]
pub enum WriteRequest {
// ==== USER ====
CreateLocalUser(CreateLocalUser),
UpdateUserUsername(UpdateUserUsername),
UpdateUserPassword(UpdateUserPassword),
DeleteUser(DeleteUser),
@@ -77,18 +80,18 @@ pub enum WriteRequest {
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
// ==== DESCRIPTION ====
UpdateDescription(UpdateDescription),
// ==== RESOURCE ====
UpdateResourceMeta(UpdateResourceMeta),
// ==== SERVER ====
CreateServer(CreateServer),
CopyServer(CopyServer),
DeleteServer(DeleteServer),
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
UpdateServerPublicKey(UpdateServerPublicKey),
RotateServerKeys(RotateServerKeys),
// ==== STACK ====
CreateStack(CreateStack),
@@ -98,8 +101,6 @@ pub enum WriteRequest {
RenameStack(RenameStack),
WriteStackFileContents(WriteStackFileContents),
RefreshStackCache(RefreshStackCache),
CreateStackWebhook(CreateStackWebhook),
DeleteStackWebhook(DeleteStackWebhook),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
@@ -117,8 +118,6 @@ pub enum WriteRequest {
RenameBuild(RenameBuild),
WriteBuildFileContents(WriteBuildFileContents),
RefreshBuildCache(RefreshBuildCache),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
// ==== BUILDER ====
CreateBuilder(CreateBuilder),
@@ -134,8 +133,6 @@ pub enum WriteRequest {
UpdateRepo(UpdateRepo),
RenameRepo(RenameRepo),
RefreshRepoCache(RefreshRepoCache),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
// ==== ALERTER ====
CreateAlerter(CreateAlerter),
@@ -167,15 +164,18 @@ pub enum WriteRequest {
WriteSyncFileContents(WriteSyncFileContents),
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== TERMINAL ====
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
RenameTag(RenameTag),
UpdateTagColor(UpdateTagColor),
UpdateTagsOnResource(UpdateTagsOnResource),
// ==== VARIABLE ====
CreateVariable(CreateVariable),
@@ -184,13 +184,21 @@ pub enum WriteRequest {
UpdateVariableIsSecret(UpdateVariableIsSecret),
DeleteVariable(DeleteVariable),
// ==== PROVIDERS ====
// ==== PROVIDER ====
CreateGitProviderAccount(CreateGitProviderAccount),
UpdateGitProviderAccount(UpdateGitProviderAccount),
DeleteGitProviderAccount(DeleteGitProviderAccount),
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
// ==== ONBOARDING KEY ====
CreateOnboardingKey(CreateOnboardingKey),
UpdateOnboardingKey(UpdateOnboardingKey),
DeleteOnboardingKey(DeleteOnboardingKey),
// ==== ALERT ====
CloseAlert(CloseAlert),
}
pub fn router() -> Router {
@@ -225,31 +233,22 @@ async fn handler(
res?
}
#[instrument(
name = "WriteRequest",
skip(user, request),
fields(
user_id = user.id,
request = format!("{:?}", request.extract_variant())
)
)]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> serror::Result<axum::response::Response> {
info!("/write request | user: {}", user.username);
let timer = Instant::now();
let variant = request.extract_variant();
info!("/write request | {variant} | user: {}", user.username);
let res = request.resolve(&WriteArgs { user }).await;
if let Err(e) = &res {
warn!("/write request {req_id} error: {:#}", e.error);
warn!(
"/write request {req_id} | {variant} | error: {:#}",
e.error
);
}
let elapsed = timer.elapsed();
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res.map(|res| res.0)
}

View File

@@ -0,0 +1,200 @@
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::{Document, doc};
use komodo_client::{
api::write::{
CreateOnboardingKey, CreateOnboardingKeyResponse,
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
},
entities::{
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
},
};
use noise::key::EncodedKeyPair;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::{AddStatusCode, AddStatusCodeError};
use crate::{api::write::WriteArgs, state::db_client};
//
impl Resolve<WriteArgs> for CreateOnboardingKey {
#[instrument(
"CreateOnboardingKey",
skip_all,
fields(
operator = admin.id,
name = self.name,
expires = self.expires,
tags = format!("{:?}", self.tags),
copy_server = self.copy_server,
create_builder = self.create_builder,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<CreateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let private_key = if let Some(private_key) = self.private_key {
private_key
} else {
format!("O-{}", random_string(30))
};
let public_key = EncodedKeyPair::from_private_key(&private_key)?
.public
.into_inner();
let onboarding_key = OnboardingKey {
public_key,
name: self.name,
enabled: true,
onboarded: Default::default(),
created_at: komodo_timestamp(),
expires: self.expires,
tags: self.tags,
copy_server: self.copy_server,
create_builder: self.create_builder,
};
let db = db_client();
// Create the key
db.onboarding_keys
.insert_one(&onboarding_key)
.await
.context(
"Failed to create Server onboarding key on database",
)?;
let created = db
.onboarding_keys
.find_one(doc! { "public_key": &onboarding_key.public_key })
.await
.context("Failed to query database for Server onboarding keys")?
.context(
"No Server onboarding key found on database after create",
)?;
Ok(CreateOnboardingKeyResponse {
private_key,
created,
})
}
}
//
impl Resolve<WriteArgs> for UpdateOnboardingKey {
#[instrument(
"UpdateOnboardingKey",
skip_all,
fields(
operator = admin.id,
public_key = self.public_key,
update = format!("{:?}", self),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UpdateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let query = doc! { "public_key": &self.public_key };
// No changes
if self.is_none() {
return db_client()
.onboarding_keys
.find_one(query)
.await
.context("Failed to query database for onboarding key")?
.context("No matching onboarding key found")
.status_code(StatusCode::NOT_FOUND);
}
let mut update = Document::new();
if let Some(enabled) = self.enabled {
update.insert("enabled", enabled);
}
if let Some(name) = self.name {
update.insert("name", name);
}
if let Some(expires) = self.expires {
update.insert("expires", expires);
}
if let Some(tags) = self.tags {
update.insert("tags", tags);
}
if let Some(copy_server) = self.copy_server {
update.insert("copy_server", copy_server);
}
if let Some(create_builder) = self.create_builder {
update.insert("create_builder", create_builder);
}
db_client()
.onboarding_keys
.update_one(query.clone(), doc! { "$set": update })
.await
.context("Failed to update onboarding key on database")?;
db_client()
.onboarding_keys
.find_one(query)
.await
.context("Failed to query database for onboarding key")?
.context("No matching onboarding key found")
.status_code(StatusCode::NOT_FOUND)
}
}
//
impl Resolve<WriteArgs> for DeleteOnboardingKey {
#[instrument(
"DeleteOnboardingKey",
skip_all,
fields(
operator = admin.id,
public_key = self.public_key,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<DeleteOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let db = db_client();
let query = doc! { "public_key": &self.public_key };
let creation_key = db
.onboarding_keys
.find_one(query.clone())
.await
.context("Failed to query database for Server onboarding keys")?
.context("Server onboarding key matching provided public key not found")
.status_code(StatusCode::NOT_FOUND)?;
db.onboarding_keys.delete_one(query).await.context(
"Failed to delete Server onboarding key from database",
)?;
Ok(creation_key)
}
}

View File

@@ -1,6 +1,14 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{Document, doc, oid::ObjectId, to_bson},
options::UpdateOptions,
},
};
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::*,
entities::{
@@ -8,13 +16,6 @@ use komodo_client::{
permission::{UserTarget, UserTargetVariant},
},
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{Document, doc, oid::ObjectId, to_bson},
options::UpdateOptions,
},
};
use resolver_api::Resolve;
use crate::{helpers::query::get_user, state::db_client};
@@ -22,7 +23,15 @@ use crate::{helpers::query::get_user, state::db_client};
use super::WriteArgs;
impl Resolve<WriteArgs> for UpdateUserAdmin {
#[instrument(name = "UpdateUserAdmin", skip(super_admin))]
#[instrument(
"UpdateUserAdmin",
skip_all,
fields(
operator = super_admin.id,
target_user = self.user_id,
admin = self.admin,
)
)]
async fn resolve(
self,
WriteArgs { user: super_admin }: &WriteArgs,
@@ -60,7 +69,17 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
}
impl Resolve<WriteArgs> for UpdateUserBasePermissions {
#[instrument(name = "UpdateUserBasePermissions", skip(admin))]
#[instrument(
"UpdateUserBasePermissions",
skip_all,
fields(
operator = admin.id,
target_user = self.user_id,
enabled = self.enabled,
create_servers = self.create_servers,
create_builds = self.create_builds,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -107,7 +126,7 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
update_one_by_id(
&db_client().users,
&user_id,
mungos::update::Update::Set(update_doc),
database::mungos::update::Update::Set(update_doc),
None,
)
.await?;
@@ -117,7 +136,16 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
}
impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
#[instrument(name = "UpdatePermissionOnResourceType", skip(admin))]
#[instrument(
"UpdatePermissionOnResourceType",
skip_all,
fields(
operator = admin.id,
user_target = format!("{:?}", self.user_target),
resource_type = self.resource_type.to_string(),
permission = format!("{:?}", self.permission),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -185,7 +213,17 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
}
impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
#[instrument(name = "UpdatePermissionOnTarget", skip(admin))]
#[instrument(
"UpdatePermissionOnTarget",
skip_all,
fields(
operator = admin.id,
user_target = format!("{:?}", self.user_target),
resource_type = self.resource_target.extract_variant().to_string(),
resource_id = self.resource_target.extract_variant_id().1,
permission = format!("{:?}", self.permission),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -11,20 +11,34 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateProcedure {
#[instrument(name = "CreateProcedure", skip(user))]
#[instrument(
"CreateProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.name,
config = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<CreateProcedureResponse> {
Ok(
resource::create::<Procedure>(&self.name, self.config, user)
.await?,
)
resource::create::<Procedure>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyProcedure {
#[instrument(name = "CopyProcedure", skip(user))]
#[instrument(
"CopyProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.name,
copy_procedure = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -36,15 +50,26 @@ impl Resolve<WriteArgs> for CopyProcedure {
PermissionLevel::Write.into(),
)
.await?;
Ok(
resource::create::<Procedure>(&self.name, config.into(), user)
.await?,
resource::create::<Procedure>(
&self.name,
config.into(),
None,
user,
)
.await
}
}
impl Resolve<WriteArgs> for UpdateProcedure {
#[instrument(name = "UpdateProcedure", skip(user))]
#[instrument(
"UpdateProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -57,7 +82,15 @@ impl Resolve<WriteArgs> for UpdateProcedure {
}
impl Resolve<WriteArgs> for RenameProcedure {
#[instrument(name = "RenameProcedure", skip(user))]
#[instrument(
"RenameProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -70,11 +103,18 @@ impl Resolve<WriteArgs> for RenameProcedure {
}
impl Resolve<WriteArgs> for DeleteProcedure {
#[instrument(name = "DeleteProcedure", skip(args))]
#[instrument(
"DeleteProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteProcedureResponse> {
Ok(resource::delete::<Procedure>(&self.id, args).await?)
Ok(resource::delete::<Procedure>(&self.id, user).await?)
}
}

View File

@@ -1,4 +1,8 @@
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
mongodb::bson::{doc, to_document},
};
use komodo_client::{
api::write::*,
entities::{
@@ -6,11 +10,9 @@ use komodo_client::{
provider::{DockerRegistryAccount, GitProviderAccount},
},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
mongodb::bson::{doc, to_document},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
helpers::update::{add_update, make_update},
@@ -20,25 +22,41 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateGitProviderAccount {
#[instrument(
"CreateGitProviderAccount",
skip_all,
fields(
operator = user.id,
domain = self.account.domain,
username = self.account.username,
https = self.account.https.unwrap_or(true),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<CreateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can create git provider accounts")
.into(),
anyhow!("Only admins can create git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
let mut account: GitProviderAccount = self.account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string.").into());
return Err(
anyhow!("Domain cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string.").into());
return Err(
anyhow!("Username cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
let mut update = make_update(
@@ -51,14 +69,14 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
.git_accounts
.insert_one(&account)
.await
.context("failed to create git provider account on db")?
.context("Failed to create git provider account on db")?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.context("Inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create git provider account",
"Create git provider account",
format!(
"Created git provider account for {} with username {}",
account.domain, account.username
@@ -70,7 +88,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create git provider account | {e:#}")
error!("Failed to add update for create git provider account | {e:#}")
})
.ok();
@@ -79,33 +97,44 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
}
impl Resolve<WriteArgs> for UpdateGitProviderAccount {
#[instrument(
"UpdateGitProviderAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
domain = self.account.domain,
username = self.account.username,
https = self.account.https.unwrap_or(true),
)
)]
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can update git provider accounts")
.into(),
anyhow!("Only admins can update git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
if let Some(domain) = &self.account.domain {
if domain.is_empty() {
return Err(
anyhow!("cannot update git provider with empty domain")
.into(),
);
}
if let Some(domain) = &self.account.domain
&& domain.is_empty()
{
return Err(
anyhow!("Cannot update git provider with empty domain")
.status_code(StatusCode::BAD_REQUEST),
);
}
if let Some(username) = &self.account.username {
if username.is_empty() {
return Err(
anyhow!("cannot update git provider with empty username")
.into(),
);
}
if let Some(username) = &self.account.username
&& username.is_empty()
{
return Err(
anyhow!("Cannot update git provider with empty username")
.status_code(StatusCode::BAD_REQUEST),
);
}
// Ensure update does not change id
@@ -118,7 +147,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
);
let account = to_document(&self.account).context(
"failed to serialize partial git provider account to bson",
"Failed to serialize partial git provider account to bson",
)?;
let db = db_client();
update_one_by_id(
@@ -128,17 +157,17 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
None,
)
.await
.context("failed to update git provider account on db")?;
.context("Failed to update git provider account on db")?;
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(anyhow!("No account found with given id").into());
};
update.push_simple_log(
"update git provider account",
"Update git provider account",
format!(
"Updated git provider account for {} with username {}",
account.domain, account.username
@@ -150,7 +179,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update git provider account | {e:#}")
error!("Failed to add update for update git provider account | {e:#}")
})
.ok();
@@ -159,14 +188,22 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
}
impl Resolve<WriteArgs> for DeleteGitProviderAccount {
#[instrument(
"DeleteGitProviderAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can delete git provider accounts")
.into(),
anyhow!("Only admins can delete git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -179,16 +216,19 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
let db = db_client();
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(
anyhow!("No account found with given id")
.status_code(StatusCode::BAD_REQUEST),
);
};
delete_one_by_id(&db.git_accounts, &self.id, None)
.await
.context("failed to delete git account on db")?;
update.push_simple_log(
"delete git provider account",
"Delete git provider account",
format!(
"Deleted git provider account for {} with username {}",
account.domain, account.username
@@ -200,7 +240,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete git provider account | {e:#}")
error!("Failed to add update for delete git provider account | {e:#}")
})
.ok();
@@ -209,6 +249,15 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
}
impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
#[instrument(
"CreateDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
domain = self.account.domain,
username = self.account.username,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -216,20 +265,26 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
if !user.admin {
return Err(
anyhow!(
"only admins can create docker registry account accounts"
"Only admins can create docker registry account accounts"
)
.into(),
.status_code(StatusCode::FORBIDDEN),
);
}
let mut account: DockerRegistryAccount = self.account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string.").into());
return Err(
anyhow!("Domain cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string.").into());
return Err(
anyhow!("Username cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
let mut update = make_update(
@@ -243,15 +298,15 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
.insert_one(&account)
.await
.context(
"failed to create docker registry account account on db",
"Failed to create docker registry account account on db",
)?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.context("Inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create docker registry account",
"Create docker registry account",
format!(
"Created docker registry account account for {} with username {}",
account.domain, account.username
@@ -263,7 +318,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create docker registry account | {e:#}")
error!("Failed to add update for create docker registry account | {e:#}")
})
.ok();
@@ -272,37 +327,47 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
}
impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
#[instrument(
"UpdateDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
domain = self.account.domain,
username = self.account.username,
)
)]
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can update docker registry accounts")
.into(),
anyhow!("Only admins can update docker registry accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
if let Some(domain) = &self.account.domain {
if domain.is_empty() {
return Err(
anyhow!(
"cannot update docker registry account with empty domain"
)
.into(),
);
}
if let Some(domain) = &self.account.domain
&& domain.is_empty()
{
return Err(
anyhow!(
"Cannot update docker registry account with empty domain"
)
.status_code(StatusCode::BAD_REQUEST),
);
}
if let Some(username) = &self.account.username {
if username.is_empty() {
return Err(
anyhow!(
"cannot update docker registry account with empty username"
)
.into(),
);
}
if let Some(username) = &self.account.username
&& username.is_empty()
{
return Err(
anyhow!(
"Cannot update docker registry account with empty username"
)
.status_code(StatusCode::BAD_REQUEST),
);
}
self.account.id = None;
@@ -314,7 +379,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
);
let account = to_document(&self.account).context(
"failed to serialize partial docker registry account account to bson",
"Failed to serialize partial docker registry account account to bson",
)?;
let db = db_client();
@@ -326,19 +391,19 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
)
.await
.context(
"failed to update docker registry account account on db",
"Failed to update docker registry account account on db",
)?;
let Some(account) =
find_one_by_id(&db.registry_accounts, &self.id)
.await
.context("failed to query db for registry accounts")?
.context("Failed to query db for registry accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(anyhow!("No account found with given id").into());
};
update.push_simple_log(
"update docker registry account",
"Update docker registry account",
format!(
"Updated docker registry account account for {} with username {}",
account.domain, account.username
@@ -350,7 +415,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update docker registry account | {e:#}")
error!("Failed to add update for update docker registry account | {e:#}")
})
.ok();
@@ -359,14 +424,22 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
}
impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
#[instrument(
"DeleteDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can delete docker registry accounts")
.into(),
anyhow!("Only admins can delete docker registry accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -380,16 +453,19 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
let Some(account) =
find_one_by_id(&db.registry_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(
anyhow!("No account found with given id")
.status_code(StatusCode::BAD_REQUEST),
);
};
delete_one_by_id(&db.registry_accounts, &self.id, None)
.await
.context("failed to delete registry account on db")?;
.context("Failed to delete registry account on db")?;
update.push_simple_log(
"delete registry account",
"Delete registry account",
format!(
"Deleted registry account for {} with username {}",
account.domain, account.username
@@ -401,7 +477,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete docker registry account | {e:#}")
error!("Failed to add update for delete docker registry account | {e:#}")
})
.ok();

View File

@@ -1,24 +1,20 @@
use anyhow::{Context, anyhow};
use anyhow::Context;
use database::mongo_indexed::doc;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, NoData, Operation,
config::core::CoreConfig,
komodo_timestamp,
NoData, Operation, RepoExecutionArgs, komodo_timestamp,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo, RepoInfo},
repo::{Repo, RepoInfo},
server::Server,
to_path_compatible_name,
update::{Log, Update},
},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api;
use resolver_api::Resolve;
@@ -30,23 +26,40 @@ use crate::{
},
permission::get_check_permissions,
resource,
state::{action_states, db_client, github_client},
state::{action_states, db_client},
};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateRepo {
#[instrument(name = "CreateRepo", skip(user))]
#[instrument(
"CreateRepo",
skip_all,
fields(
operator = user.id,
repo = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Repo> {
Ok(resource::create::<Repo>(&self.name, self.config, user).await?)
resource::create::<Repo>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyRepo {
#[instrument(name = "CopyRepo", skip(user))]
#[instrument(
"CopyRepo",
skip_all,
fields(
operator = user.id,
repo = self.name,
copy_repo = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -57,22 +70,38 @@ impl Resolve<WriteArgs> for CopyRepo {
PermissionLevel::Read.into(),
)
.await?;
Ok(
resource::create::<Repo>(&self.name, config.into(), user)
.await?,
)
resource::create::<Repo>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteRepo {
#[instrument(name = "DeleteRepo", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Repo> {
Ok(resource::delete::<Repo>(&self.id, args).await?)
#[instrument(
"DeleteRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Repo> {
Ok(resource::delete::<Repo>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateRepo {
#[instrument(name = "UpdateRepo", skip(user))]
#[instrument(
"UpdateRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
update = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -82,7 +111,15 @@ impl Resolve<WriteArgs> for UpdateRepo {
}
impl Resolve<WriteArgs> for RenameRepo {
#[instrument(name = "RenameRepo", skip(user))]
#[instrument(
"RenameRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -118,7 +155,7 @@ impl Resolve<WriteArgs> for RenameRepo {
update_one_by_id(
&db_client().repos,
&repo.id,
mungos::update::Update::Set(
database::mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
@@ -129,7 +166,8 @@ impl Resolve<WriteArgs> for RenameRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::git::RenameRepo {
curr_name: to_path_compatible_name(&repo.name),
new_name: name.clone(),
@@ -158,11 +196,6 @@ impl Resolve<WriteArgs> for RenameRepo {
}
impl Resolve<WriteArgs> for RefreshRepoCache {
#[instrument(
name = "RefreshRepoCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -183,13 +216,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
return Ok(NoData {});
}
let mut clone_args: CloneArgs = (&repo).into();
let mut clone_args: RepoExecutionArgs = (&repo).into();
let repo_path =
clone_args.unique_path(&core_config().repo_directory)?;
clone_args.destination = Some(repo_path.display().to_string());
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
let access_token = if let Some(username) = &clone_args.account {
git_token(&clone_args.provider, username, |https| {
@@ -203,14 +233,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
None
};
let GitRes { hash, message, .. } = git::pull_or_clone(
let (res, _) = git::pull_or_clone(
clone_args,
&core_config().repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.with_context(|| {
@@ -222,8 +248,8 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
last_built_at: repo.info.last_built_at,
built_hash: repo.info.built_hash,
built_message: repo.info.built_message,
latest_hash: hash,
latest_message: message,
latest_hash: res.commit_hash,
latest_message: res.commit_message,
};
let info = to_document(&info)
@@ -241,220 +267,3 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for CreateRepoWebhook {
#[instrument(name = "CreateRepoWebhook", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<CreateRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let repo = get_check_permissions::<Repo>(
&self.repo,
&args.user,
PermissionLevel::Write.into(),
)
.await?;
if repo.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if repo.config.webhook_secret.is_empty() {
webhook_secret
} else {
&repo.config.webhook_secret
};
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo_name, &request)
.await
.context("failed to create webhook")?;
if !repo.config.webhook_enabled {
UpdateRepo {
id: repo.id,
config: PartialRepoConfig {
webhook_enabled: Some(true),
..Default::default()
},
}
.resolve(args)
.await
.map_err(|e| e.error)
.context("failed to update repo to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteRepoWebhook {
#[instrument(name = "DeleteRepoWebhook", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteRepoWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let repo = get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Write.into(),
)
.await?;
if repo.config.git_provider != "github.com" {
return Err(
anyhow!("Can only manage github.com repo webhooks").into(),
);
}
if repo.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = repo.config.repo.split('/');
let owner = split.next().context("Repo repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo_name =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo_name)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
RepoWebhookAction::Clone => {
format!("{host}/listener/github/repo/{}/clone", repo.id)
}
RepoWebhookAction::Pull => {
format!("{host}/listener/github/repo/{}/pull", repo.id)
}
RepoWebhookAction::Build => {
format!("{host}/listener/github/repo/{}/build", repo.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo_name, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -0,0 +1,82 @@
use anyhow::anyhow;
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::resource::{self, ResourceMetaUpdate};
use super::WriteArgs;
impl Resolve<WriteArgs> for UpdateResourceMeta {
#[instrument(
"UpdateResourceMeta",
skip_all,
fields(
operator = args.user.id,
resource_type = self.target.extract_variant().to_string(),
resource_id = self.target.extract_variant_id().1,
description = self.description,
template = self.template,
tags = format!("{:?}", self.tags),
)
)]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<UpdateResourceMetaResponse> {
let meta = ResourceMetaUpdate {
description: self.description,
template: self.template,
tags: self.tags,
};
match self.target {
ResourceTarget::System(_) => {
return Err(
anyhow!("cannot update meta of System resource target")
.status_code(StatusCode::BAD_REQUEST),
);
}
ResourceTarget::Server(id) => {
resource::update_meta::<Server>(&id, meta, args).await?;
}
ResourceTarget::Deployment(id) => {
resource::update_meta::<Deployment>(&id, meta, args).await?;
}
ResourceTarget::Build(id) => {
resource::update_meta::<Build>(&id, meta, args).await?;
}
ResourceTarget::Repo(id) => {
resource::update_meta::<Repo>(&id, meta, args).await?;
}
ResourceTarget::Builder(id) => {
resource::update_meta::<Builder>(&id, meta, args).await?;
}
ResourceTarget::Alerter(id) => {
resource::update_meta::<Alerter>(&id, meta, args).await?;
}
ResourceTarget::Procedure(id) => {
resource::update_meta::<Procedure>(&id, meta, args).await?;
}
ResourceTarget::Action(id) => {
resource::update_meta::<Action>(&id, meta, args).await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_meta::<ResourceSync>(&id, meta, args)
.await?;
}
ResourceTarget::Stack(id) => {
resource::update_meta::<Stack>(&id, meta, args).await?;
}
}
Ok(UpdateResourceMetaResponse {})
}
}

View File

@@ -1,11 +1,11 @@
use anyhow::Context;
use formatting::format_serror;
use formatting::{bold, format_serror};
use komodo_client::{
api::write::*,
entities::{
NoData, Operation,
Operation,
permission::PermissionLevel,
server::Server,
server::{Server, ServerInfo},
to_docker_compatible_name,
update::{Update, UpdateStatus},
},
@@ -19,33 +19,99 @@ use crate::{
update::{add_update, make_update, update_update},
},
permission::get_check_permissions,
resource,
resource::{self, update_server_public_key},
};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateServer {
#[instrument(name = "CreateServer", skip(user))]
#[instrument(
"CreateServer",
skip_all,
fields(
operator = user.id,
server = self.name,
config = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Server> {
Ok(
resource::create::<Server>(&self.name, self.config, user)
.await?,
resource::create::<Server>(
&self.name,
self.config,
self.public_key.map(|public_key| ServerInfo {
public_key,
..Default::default()
}),
user,
)
.await
}
}
impl Resolve<WriteArgs> for CopyServer {
#[instrument(
"CopyServer",
skip_all,
fields(
operator = user.id,
server = self.name,
copy_server = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Server> {
let Server { config, .. } = get_check_permissions::<Server>(
&self.id,
user,
PermissionLevel::Read.into(),
)
.await?;
resource::create::<Server>(
&self.name,
config.into(),
self.public_key.map(|public_key| ServerInfo {
public_key,
..Default::default()
}),
user,
)
.await
}
}
impl Resolve<WriteArgs> for DeleteServer {
#[instrument(name = "DeleteServer", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Server> {
Ok(resource::delete::<Server>(&self.id, args).await?)
#[instrument(
"DeleteServer",
skip_all,
fields(
operator = user.id,
server = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Server> {
Ok(resource::delete::<Server>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateServer {
#[instrument(name = "UpdateServer", skip(user))]
#[instrument(
"UpdateServer",
skip_all,
fields(
operator = user.id,
server = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -55,7 +121,15 @@ impl Resolve<WriteArgs> for UpdateServer {
}
impl Resolve<WriteArgs> for RenameServer {
#[instrument(name = "RenameServer", skip(user))]
#[instrument(
"RenameServer",
skip_all,
fields(
operator = user.id,
server = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -65,7 +139,15 @@ impl Resolve<WriteArgs> for RenameServer {
}
impl Resolve<WriteArgs> for CreateNetwork {
#[instrument(name = "CreateNetwork", skip(user))]
#[instrument(
"CreateNetwork",
skip_all,
fields(
operator = user.id,
server = self.server,
network = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -77,7 +159,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let mut update =
make_update(&server, Operation::CreateNetwork, user);
@@ -85,7 +167,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::CreateNetwork {
.request(api::docker::CreateNetwork {
name: to_docker_compatible_name(&self.name),
driver: None,
})
@@ -94,7 +176,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
format_serror(&e.context("Failed to create network").into()),
),
};
@@ -105,80 +187,80 @@ impl Resolve<WriteArgs> for CreateNetwork {
}
}
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(name = "CreateTerminal", skip(user))]
//
impl Resolve<WriteArgs> for UpdateServerPublicKey {
#[instrument(
"UpdateServerPublicKey",
skip_all,
fields(
operator = args.user.id,
server = self.server,
public_key = self.public_key,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
args: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
&args.user,
PermissionLevel::Write.into(),
)
.await?;
let periphery = periphery_client(&server)?;
update_server_public_key(&server.id, &self.public_key).await?;
periphery
.request(api::terminal::CreateTerminal {
name: self.name,
command: self.command,
recreate: self.recreate,
})
.await
.context("Failed to create terminal on periphery")?;
let mut update =
make_update(&server, Operation::UpdateServerKey, &args.user);
Ok(NoData {})
update.push_simple_log(
"Update Server Public Key",
format!("Public key updated to {}", bold(&self.public_key)),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(name = "DeleteTerminal", skip(user))]
//
impl Resolve<WriteArgs> for RotateServerKeys {
#[instrument(
"RotateServerKeys",
skip_all,
fields(
operator = args.user.id,
server = self.server,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
args: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
&args.user,
PermissionLevel::Write.into(),
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
terminal: self.terminal,
})
let public_key = periphery
.request(api::keys::RotatePrivateKey {})
.await
.context("Failed to delete terminal on periphery")?;
.context("Failed to rotate Periphery private key")?
.public_key;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(name = "DeleteAllTerminals", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on periphery")?;
Ok(NoData {})
UpdateServerPublicKey {
server: server.id,
public_key,
}
.resolve(args)
.await
}
}

View File

@@ -1,6 +1,10 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use komodo_client::{
api::{user::CreateApiKey, write::*},
entities::{
@@ -8,10 +12,6 @@ use komodo_client::{
user::{User, UserConfig},
},
};
use mungos::{
by_id::find_one_by_id,
mongodb::bson::{doc, oid::ObjectId},
};
use resolver_api::Resolve;
use crate::{api::user::UserArgs, state::db_client};
@@ -19,7 +19,15 @@ use crate::{api::user::UserArgs, state::db_client};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateServiceUser {
#[instrument(name = "CreateServiceUser", skip(user))]
#[instrument(
"CreateServiceUser",
skip_all,
fields(
operator = user.id,
username = self.username,
description = self.description,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -63,7 +71,15 @@ impl Resolve<WriteArgs> for CreateServiceUser {
}
impl Resolve<WriteArgs> for UpdateServiceUserDescription {
#[instrument(name = "UpdateServiceUserDescription", skip(user))]
#[instrument(
"UpdateServiceUserDescription",
skip_all,
fields(
operator = user.id,
username = self.username,
description = self.description,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -99,7 +115,16 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
}
impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
#[instrument(name = "CreateApiKeyForServiceUser", skip(user))]
#[instrument(
"CreateApiKeyForServiceUser",
skip_all,
fields(
operator = user.id,
service_user = self.user_id,
name = self.name,
expires = self.expires,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -125,7 +150,14 @@ impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
}
impl Resolve<WriteArgs> for DeleteApiKeyForServiceUser {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(user))]
#[instrument(
"DeleteApiKeyForServiceUser",
skip_all,
fields(
operator = user.id,
key = self.key,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,30 +1,28 @@
use std::path::PathBuf;
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::{doc, to_document};
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
FileContents, NoData, Operation,
config::core::CoreConfig,
FileContents, NoData, Operation, RepoExecutionArgs,
all_logs_success,
permission::PermissionLevel,
repo::Repo,
server::ServerState,
stack::{PartialStackConfig, Stack, StackInfo},
stack::{Stack, StackInfo},
update::Update,
user::stack_user,
},
};
use mungos::mongodb::bson::{doc, to_document};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api::compose::{
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
WriteCommitComposeContents, WriteComposeContentsToHost,
WriteComposeContentsToHost,
};
use resolver_api::Resolve;
use crate::{
api::execute::pull_stack_inner,
config::core_config,
helpers::{
periphery_client,
@@ -35,30 +33,43 @@ use crate::{
permission::get_check_permissions,
resource,
stack::{
get_stack_and_server,
remote::{RemoteComposeContents, get_repo_compose_contents},
services::extract_services_into_res,
},
state::{db_client, github_client},
state::db_client,
};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateStack {
#[instrument(name = "CreateStack", skip(user))]
#[instrument(
"CreateStack",
skip_all,
fields(
operator = user.id,
stack = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Stack> {
Ok(
resource::create::<Stack>(&self.name, self.config, user)
.await?,
)
resource::create::<Stack>(&self.name, self.config, None, user)
.await
}
}
impl Resolve<WriteArgs> for CopyStack {
#[instrument(name = "CopyStack", skip(user))]
#[instrument(
"CopyStack",
skip_all,
fields(
operator = user.id,
stack = self.name,
copy_stack = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -69,22 +80,39 @@ impl Resolve<WriteArgs> for CopyStack {
PermissionLevel::Read.into(),
)
.await?;
Ok(
resource::create::<Stack>(&self.name, config.into(), user)
.await?,
)
resource::create::<Stack>(&self.name, config.into(), None, user)
.await
}
}
impl Resolve<WriteArgs> for DeleteStack {
#[instrument(name = "DeleteStack", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Stack> {
Ok(resource::delete::<Stack>(&self.id, args).await?)
#[instrument(
"DeleteStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Stack> {
Ok(resource::delete::<Stack>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateStack {
#[instrument(name = "UpdateStack", skip(user))]
#[instrument(
"UpdateStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -94,7 +122,15 @@ impl Resolve<WriteArgs> for UpdateStack {
}
impl Resolve<WriteArgs> for RenameStack {
#[instrument(name = "RenameStack", skip(user))]
#[instrument(
"RenameStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -104,7 +140,15 @@ impl Resolve<WriteArgs> for RenameStack {
}
impl Resolve<WriteArgs> for WriteStackFileContents {
#[instrument(name = "WriteStackFileContents", skip(user))]
#[instrument(
"WriteStackFileContents",
skip_all,
fields(
operator = user.id,
stack = self.stack,
path = self.file_path,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -114,24 +158,13 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
file_path,
contents,
} = self;
let (mut stack, server) = get_stack_and_server(
let stack = get_check_permissions::<Stack>(
&stack,
user,
PermissionLevel::Write.into(),
true,
)
.await?;
let mut repo = if !stack.config.files_on_host
&& !stack.config.linked_repo.is_empty()
{
crate::resource::get::<Repo>(&stack.config.linked_repo)
.await?
.into()
} else {
None
};
if !stack.config.files_on_host
&& stack.config.repo.is_empty()
&& stack.config.linked_repo.is_empty()
@@ -146,85 +179,239 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
update.push_simple_log("File contents to write", &contents);
let stack_id = stack.id.clone();
if stack.config.files_on_host {
match periphery_client(&server)?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write File Contents",
format_serror(&e.into()),
);
}
};
} else {
let git_token =
stack_git_token(&mut stack, repo.as_mut()).await?;
match periphery_client(&server)?
.request(WriteCommitComposeContents {
stack,
repo,
username: Some(user.username.clone()),
file_path,
contents,
git_token,
})
.await
.context("Failed to write contents to host")
{
Ok(res) => {
update.logs.extend(res.logs);
}
Err(e) => {
update.push_error_log(
"Write File Contents",
format_serror(&e.into()),
);
}
};
}
if let Err(e) = (RefreshStackCache { stack: stack_id })
.resolve(&WriteArgs {
user: stack_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context(
"Failed to refresh stack cache after writing file contents",
write_stack_file_contents_on_host(
stack, file_path, contents, update,
)
{
update.push_error_log(
"Refresh stack cache",
format_serror(&e.into()),
);
.await
} else {
write_stack_file_contents_git(
stack,
&file_path,
&contents,
&user.username,
update,
)
.await
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
#[instrument("WriteStackFileContentsOnHost", skip_all)]
async fn write_stack_file_contents_on_host(
stack: Stack,
file_path: String,
contents: String,
mut update: Update,
) -> serror::Result<Update> {
if stack.config.server_id.is_empty() {
return Err(anyhow!(
"Cannot write file, Files on host Stack has not configured a Server"
).into());
}
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
if state != ServerState::Ok {
return Err(
anyhow!(
"Cannot write file when server is unreachable or disabled"
)
.into(),
);
}
match periphery_client(&server)
.await?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write File Contents",
format_serror(&e.into()),
);
}
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
// Finish with a cache refresh
if let Err(e) = (RefreshStackCache { stack: stack.id })
.resolve(&WriteArgs {
user: stack_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context(
"Failed to refresh stack cache after writing file contents",
)
{
update.push_error_log(
"Refresh stack cache",
format_serror(&e.into()),
);
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
#[instrument("WriteStackFileContentsGit", skip_all)]
async fn write_stack_file_contents_git(
mut stack: Stack,
file_path: &str,
contents: &str,
username: &str,
mut update: Update,
) -> serror::Result<Update> {
let mut repo = if !stack.config.linked_repo.is_empty() {
crate::resource::get::<Repo>(&stack.config.linked_repo)
.await?
.into()
} else {
None
};
let git_token = stack_git_token(&mut stack, repo.as_mut()).await?;
let mut repo_args: RepoExecutionArgs = if let Some(repo) = &repo {
repo.into()
} else {
(&stack).into()
};
let root = repo_args.unique_path(&core_config().repo_directory)?;
repo_args.destination = Some(root.display().to_string());
let file_path = stack
.config
.run_directory
.parse::<PathBuf>()
.context("Run directory is not a valid path")?
.join(file_path);
let full_path =
root.join(&file_path).components().collect::<PathBuf>();
if let Some(parent) = full_path.parent() {
tokio::fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize stack file parent directory {parent:?}"
)
})?;
}
// Ensure the folder is initialized as git repo.
// This allows a new file to be committed on a branch that may not exist.
if !root.join(".git").exists() {
git::init_folder_as_repo(
&root,
&repo_args,
git_token.as_deref(),
&mut update.logs,
)
.await;
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
}
// Save this for later -- repo_args moved next.
let branch = repo_args.branch.clone();
// Pull latest changes to repo to ensure linear commit history
match git::pull_or_clone(
repo_args,
&core_config().repo_directory,
git_token,
)
.await
.context("Failed to pull latest changes before commit")
{
Ok((res, _)) => update.logs.extend(res.logs),
Err(e) => {
update.push_error_log("Pull Repo", format_serror(&e.into()));
update.finalize();
return Ok(update);
}
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
if let Err(e) = tokio::fs::write(&full_path, &contents)
.await
.with_context(|| {
format!(
"Failed to write compose file contents to {full_path:?}"
)
})
{
update.push_error_log("Write File", format_serror(&e.into()));
} else {
update.push_simple_log(
"Write File",
format!("File written to {full_path:?}"),
);
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
let commit_res = git::commit_file(
&format!("{username}: Write Stack File"),
&root,
&file_path,
&branch,
)
.await;
update.logs.extend(commit_res.logs);
// Finish with a cache refresh
if let Err(e) = (RefreshStackCache { stack: stack.id })
.resolve(&WriteArgs {
user: stack_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context(
"Failed to refresh stack cache after writing file contents",
)
{
update.push_error_log(
"Refresh stack cache",
format_serror(&e.into()),
);
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
impl Resolve<WriteArgs> for RefreshStackCache {
#[instrument(
name = "RefreshStackCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -283,9 +470,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
(vec![], None, None, None, None)
} else if let Some(server) = server {
let GetComposeContentsOnHostResponse { contents, errors } =
match periphery_client(&server)?
match periphery_client(&server)
.await?
.request(GetComposeContentsOnHost {
file_paths: stack.file_paths().to_vec(),
file_paths: stack.all_file_dependencies(),
name: stack.name.clone(),
run_directory: stack.config.run_directory.clone(),
})
@@ -307,6 +495,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
let mut services = Vec::new();
for contents in &contents {
// Don't include additional files in service parsing
if !stack.is_compose_file(&contents.path) {
continue;
}
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
@@ -345,6 +537,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
let mut services = Vec::new();
for contents in &remote_contents {
// Don't include additional files in service parsing
if !stack.is_compose_file(&contents.path) {
continue;
}
if let Err(e) = extract_services_into_res(
&project_name,
&contents.contents,
@@ -411,237 +607,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
.await
.context("failed to update stack info on db")?;
if (stack.config.poll_for_updates || stack.config.auto_update)
&& !stack.config.server_id.is_empty()
{
let (server, state) =
get_server_with_state(&stack.config.server_id).await?;
if state == ServerState::Ok {
let name = stack.name.clone();
if let Err(e) =
pull_stack_inner(stack, Vec::new(), &server, repo, None)
.await
{
warn!(
"Failed to pull latest images for Stack {name} | {e:#}",
);
}
}
}
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for CreateStackWebhook {
#[instrument(name = "CreateStackWebhook", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<CreateStackWebhookResponse> {
let WriteArgs { user } = args;
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Write.into(),
)
.await?;
if stack.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Stack repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if stack.config.webhook_secret.is_empty() {
webhook_secret
} else {
&stack.config.webhook_secret
};
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !stack.config.webhook_enabled {
UpdateStack {
id: stack.id,
config: PartialStackConfig {
webhook_enabled: Some(true),
..Default::default()
},
}
.resolve(args)
.await
.map_err(|e| e.error)
.context("failed to update stack to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteStackWebhook {
#[instrument(name = "DeleteStackWebhook", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteStackWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let stack = get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Write.into(),
)
.await?;
if stack.config.git_provider != "github.com" {
return Err(
anyhow!("Can only manage github.com repo webhooks").into(),
);
}
if stack.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = stack.config.repo.split('/');
let owner = split.next().context("Stack repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
StackWebhookAction::Refresh => {
format!("{host}/listener/github/stack/{}/refresh", stack.id)
}
StackWebhookAction::Deploy => {
format!("{host}/listener/github/stack/{}/deploy", stack.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -4,18 +4,21 @@ use std::{
};
use anyhow::{Context, anyhow};
use database::mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use formatting::format_serror;
use komodo_client::{
api::{read::ExportAllResourcesToToml, write::*},
entities::{
self, CloneArgs, NoData, Operation, ResourceTarget,
self, Operation, RepoExecutionArgs, ResourceTarget,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
all_logs_success,
build::Build,
builder::Builder,
config::core::CoreConfig,
deployment::Deployment,
komodo_timestamp,
permission::PermissionLevel,
@@ -23,24 +26,14 @@ use komodo_client::{
repo::Repo,
server::Server,
stack::Stack,
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
SyncDeployUpdate,
},
sync::{ResourceSync, ResourceSyncInfo, SyncDeployUpdate},
to_path_compatible_name,
update::{Log, Update},
user::sync_user,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use tokio::fs;
use tracing::Instrument;
use crate::{
alert::send_alerts,
@@ -54,7 +47,7 @@ use crate::{
},
permission::get_check_permissions,
resource,
state::{db_client, github_client},
state::db_client,
sync::{
deploy::SyncDeployParams, remote::RemoteResources,
view::push_updates_for_view,
@@ -64,20 +57,39 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateResourceSync {
#[instrument(name = "CreateResourceSync", skip(user))]
#[instrument(
"CreateResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
Ok(
resource::create::<ResourceSync>(&self.name, self.config, user)
.await?,
resource::create::<ResourceSync>(
&self.name,
self.config,
None,
user,
)
.await
}
}
impl Resolve<WriteArgs> for CopyResourceSync {
#[instrument(name = "CopyResourceSync", skip(user))]
#[instrument(
"CopyResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.name,
copy_sync = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -89,29 +101,43 @@ impl Resolve<WriteArgs> for CopyResourceSync {
PermissionLevel::Write.into(),
)
.await?;
Ok(
resource::create::<ResourceSync>(
&self.name,
config.into(),
user,
)
.await?,
resource::create::<ResourceSync>(
&self.name,
config.into(),
None,
user,
)
.await
}
}
impl Resolve<WriteArgs> for DeleteResourceSync {
#[instrument(name = "DeleteResourceSync", skip(args))]
#[instrument(
"DeleteResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
Ok(resource::delete::<ResourceSync>(&self.id, args).await?)
Ok(resource::delete::<ResourceSync>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateResourceSync {
#[instrument(name = "UpdateResourceSync", skip(user))]
#[instrument(
"UpdateResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -124,7 +150,15 @@ impl Resolve<WriteArgs> for UpdateResourceSync {
}
impl Resolve<WriteArgs> for RenameResourceSync {
#[instrument(name = "RenameResourceSync", skip(user))]
#[instrument(
"RenameResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -137,7 +171,16 @@ impl Resolve<WriteArgs> for RenameResourceSync {
}
impl Resolve<WriteArgs> for WriteSyncFileContents {
#[instrument(name = "WriteSyncFileContents", skip(args))]
#[instrument(
"WriteSyncFileContents",
skip_all,
fields(
operator = args.user.id,
sync = self.sync,
resource_path = self.resource_path,
file_path = self.file_path,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
@@ -182,6 +225,7 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
}
}
#[instrument("WriteSyncFileContentsOnHost", skip_all)]
async fn write_sync_file_contents_on_host(
req: WriteSyncFileContents,
args: &WriteArgs,
@@ -205,16 +249,9 @@ async fn write_sync_file_contents_on_host(
.context("Invalid resource path")?;
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
})?;
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
if let Err(e) = secret_file::write_async(&full_path, &contents)
.await
.with_context(|| {
format!(
"Failed to write resource file contents to {full_path:?}"
)
@@ -251,6 +288,7 @@ async fn write_sync_file_contents_on_host(
Ok(update)
}
#[instrument("WriteSyncFileContentsGit", skip_all)]
async fn write_sync_file_contents_git(
req: WriteSyncFileContents,
args: &WriteArgs,
@@ -265,29 +303,32 @@ async fn write_sync_file_contents_git(
contents,
} = req;
let mut clone_args: CloneArgs = if let Some(repo) = &repo {
let mut repo_args: RepoExecutionArgs = if let Some(repo) = &repo {
repo.into()
} else {
(&sync).into()
};
let root = clone_args.unique_path(&core_config().repo_directory)?;
clone_args.destination = Some(root.display().to_string());
let root = repo_args.unique_path(&core_config().repo_directory)?;
repo_args.destination = Some(root.display().to_string());
let access_token = if let Some(account) = &clone_args.account {
git_token(&clone_args.provider, account, |https| clone_args.https = https)
let git_token = if let Some(account) = &repo_args.account {
git_token(&repo_args.provider, account, |https| repo_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
)?
} else {
None
};
let file_path =
file_path.parse::<PathBuf>().context("Invalid file path")?;
let resource_path = resource_path
.parse::<PathBuf>()
.context("Invalid resource path")?;
file_path.parse::<PathBuf>().with_context(|| {
format!("File path is not a valid path: {file_path}")
})?;
let resource_path =
resource_path.parse::<PathBuf>().with_context(|| {
format!("Resource path is not a valid path: {resource_path}")
})?;
let full_path = root
.join(&resource_path)
.join(&file_path)
@@ -295,7 +336,7 @@ async fn write_sync_file_contents_git(
.collect::<PathBuf>();
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).await.with_context(|| {
tokio::fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
@@ -307,8 +348,8 @@ async fn write_sync_file_contents_git(
if !root.join(".git").exists() {
git::init_folder_as_repo(
&root,
&clone_args,
access_token.as_deref(),
&repo_args,
git_token.as_deref(),
&mut update.logs,
)
.await;
@@ -320,20 +361,18 @@ async fn write_sync_file_contents_git(
}
}
// Save this for later -- repo_args moved next.
let branch = repo_args.branch.clone();
// Pull latest changes to repo to ensure linear commit history
match git::pull_or_clone(
clone_args,
repo_args,
&core_config().repo_directory,
access_token,
Default::default(),
Default::default(),
Default::default(),
Default::default(),
git_token,
)
.await
.context("Failed to pull latest changes before commit")
{
Ok(res) => update.logs.extend(res.logs),
Ok((res, _)) => update.logs.extend(res.logs),
Err(e) => {
update.push_error_log("Pull Repo", format_serror(&e.into()));
update.finalize();
@@ -347,8 +386,9 @@ async fn write_sync_file_contents_git(
return Ok(update);
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
if let Err(e) = tokio::fs::write(&full_path, &contents)
.await
.with_context(|| {
format!(
"Failed to write resource file contents to {full_path:?}"
)
@@ -373,7 +413,7 @@ async fn write_sync_file_contents_git(
&format!("{}: Commit Resource File", args.user.username),
&root,
&resource_path.join(&file_path),
&sync.config.branch,
&branch,
)
.await;
@@ -382,10 +422,14 @@ async fn write_sync_file_contents_git(
if let Err(e) = (RefreshResourceSyncPending { sync: sync.name })
.resolve(args)
.await
.map_err(|e| e.error)
.context(
"Failed to refresh sync pending after writing file contents",
)
{
update.push_error_log(
"Refresh sync pending",
format_serror(&e.error.into()),
format_serror(&e.into()),
);
}
@@ -396,7 +440,14 @@ async fn write_sync_file_contents_git(
}
impl Resolve<WriteArgs> for CommitSync {
#[instrument(name = "CommitSync", skip(args))]
#[instrument(
"CommitSync",
skip_all,
fields(
operator = args.user.id,
sync = self.sync,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let WriteArgs { user } = args;
@@ -483,12 +534,9 @@ impl Resolve<WriteArgs> for CommitSync {
.sync_directory
.join(to_path_compatible_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize resource file parent directory {parent:?}"))?;
};
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
let span = info_span!("CommitSyncOnHost");
if let Err(e) = secret_file::write_async(&file_path, &res.toml)
.instrument(span)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
@@ -512,7 +560,7 @@ impl Resolve<WriteArgs> for CommitSync {
// Resource path checked above for repo mode.
unreachable!()
};
let args: CloneArgs = repo.into();
let args: RepoExecutionArgs = repo.into();
if let Err(e) =
commit_git_sync(args, &resource_path, &res.toml, &mut update)
.await
@@ -530,7 +578,7 @@ impl Resolve<WriteArgs> for CommitSync {
// Resource path checked above for repo mode.
unreachable!()
};
let args: CloneArgs = (&sync).into();
let args: RepoExecutionArgs = (&sync).into();
if let Err(e) =
commit_git_sync(args, &resource_path, &res.toml, &mut update)
.await
@@ -581,8 +629,9 @@ impl Resolve<WriteArgs> for CommitSync {
}
}
#[instrument("CommitSyncGit", skip_all)]
async fn commit_git_sync(
mut args: CloneArgs,
mut args: RepoExecutionArgs,
resource_path: &Path,
toml: &str,
update: &mut Update,
@@ -600,18 +649,13 @@ async fn commit_git_sync(
None
};
let pull = git::pull_or_clone(
let (pull_res, _) = git::pull_or_clone(
args.clone(),
&core_config().repo_directory,
access_token,
Default::default(),
Default::default(),
Default::default(),
Default::default(),
)
.await?;
update.logs.extend(pull.logs);
update.logs.extend(pull_res.logs);
if !all_logs_success(&update.logs) {
return Ok(());
}
@@ -630,11 +674,6 @@ async fn commit_git_sync(
}
impl Resolve<WriteArgs> for RefreshResourceSyncPending {
#[instrument(
name = "RefreshResourceSyncPending",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -979,215 +1018,3 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
Ok(crate::resource::get::<ResourceSync>(&sync.id).await?)
}
}
impl Resolve<WriteArgs> for CreateSyncWebhook {
#[instrument(name = "CreateSyncWebhook", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<CreateSyncWebhookResponse> {
let WriteArgs { user } = args;
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Write.into(),
)
.await?;
if sync.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Repo repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
webhook_secret,
..
} = core_config();
let webhook_secret = if sync.config.webhook_secret.is_empty() {
webhook_secret
} else {
&sync.config.webhook_secret
};
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
return Ok(NoData {});
}
}
// Now good to create the webhook
let request = ReposCreateWebhookRequest {
active: Some(true),
config: Some(ReposCreateWebhookRequestConfig {
url,
secret: webhook_secret.to_string(),
content_type: String::from("json"),
insecure_ssl: None,
digest: Default::default(),
token: Default::default(),
}),
events: vec![String::from("push")],
name: String::from("web"),
};
github_repos
.create_webhook(owner, repo, &request)
.await
.context("failed to create webhook")?;
if !sync.config.webhook_enabled {
UpdateResourceSync {
id: sync.id,
config: PartialResourceSyncConfig {
webhook_enabled: Some(true),
..Default::default()
},
}
.resolve(args)
.await
.map_err(|e| e.error)
.context("failed to update sync to enable webhook")?;
}
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteSyncWebhook {
#[instrument(name = "DeleteSyncWebhook", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteSyncWebhookResponse> {
let Some(github) = github_client() else {
return Err(
anyhow!(
"github_webhook_app is not configured in core config toml"
)
.into(),
);
};
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Write.into(),
)
.await?;
if sync.config.git_provider != "github.com" {
return Err(
anyhow!("Can only manage github.com repo webhooks").into(),
);
}
if sync.config.repo.is_empty() {
return Err(
anyhow!("No repo configured, can't create webhook").into(),
);
}
let mut split = sync.config.repo.split('/');
let owner = split.next().context("Sync repo has no owner")?;
let Some(github) = github.get(owner) else {
return Err(
anyhow!("Cannot manage repo webhooks under owner {owner}")
.into(),
);
};
let repo =
split.next().context("Sync repo has no repo after the /")?;
let github_repos = github.repos();
// First make sure the webhook isn't already created (inactive ones are ignored)
let webhooks = github_repos
.list_all_webhooks(owner, repo)
.await
.context("failed to list all webhooks on repo")?
.body;
let CoreConfig {
host,
webhook_base_url,
..
} = core_config();
let host = if webhook_base_url.is_empty() {
host
} else {
webhook_base_url
};
let url = match self.action {
SyncWebhookAction::Refresh => {
format!("{host}/listener/github/sync/{}/refresh", sync.id)
}
SyncWebhookAction::Sync => {
format!("{host}/listener/github/sync/{}/sync", sync.id)
}
};
for webhook in webhooks {
if webhook.active && webhook.config.url == url {
github_repos
.delete_webhook(owner, repo, webhook.id)
.await
.context("failed to delete webhook")?;
return Ok(NoData {});
}
}
// No webhook to delete, all good
Ok(NoData {})
}
}

View File

@@ -1,36 +1,25 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagColor,
UpdateTagsOnResource, UpdateTagsOnResourceResponse,
},
entities::{
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
stack::Stack,
sync::ResourceSync,
tag::{Tag, TagColor},
},
};
use mungos::{
use database::mungos::{
by_id::{delete_one_by_id, update_one_by_id},
mongodb::bson::{doc, oid::ObjectId},
};
use komodo_client::{
api::write::{CreateTag, DeleteTag, RenameTag, UpdateTagColor},
entities::{
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, stack::Stack, sync::ResourceSync, tag::Tag,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
config::core_config,
helpers::query::{get_tag, get_tag_check_owner},
permission::get_check_permissions,
resource,
state::db_client,
};
@@ -38,19 +27,37 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateTag {
#[instrument(name = "CreateTag", skip(user))]
#[instrument(
"CreateTag",
skip_all,
fields(
operator = user.id,
tag = self.name,
color = format!("{:?}", self.color),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Tag> {
if core_config().disable_non_admin_create && !user.admin {
return Err(
anyhow!("Non admins cannot create tags")
.status_code(StatusCode::FORBIDDEN),
);
}
if ObjectId::from_str(&self.name).is_ok() {
return Err(anyhow!("tag name cannot be ObjectId").into());
return Err(
anyhow!("Tag name cannot be ObjectId")
.status_code(StatusCode::BAD_REQUEST),
);
}
let mut tag = Tag {
id: Default::default(),
name: self.name,
color: TagColor::Slate,
color: self.color.unwrap_or_default(),
owner: user.id.clone(),
};
@@ -69,7 +76,15 @@ impl Resolve<WriteArgs> for CreateTag {
}
impl Resolve<WriteArgs> for RenameTag {
#[instrument(name = "RenameTag", skip(user))]
#[instrument(
"RenameTag",
skip_all,
fields(
operator = user.id,
tag = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -94,7 +109,15 @@ impl Resolve<WriteArgs> for RenameTag {
}
impl Resolve<WriteArgs> for UpdateTagColor {
#[instrument(name = "UpdateTagColor", skip(user))]
#[instrument(
"UpdateTagColor",
skip_all,
fields(
operator = user.id,
tag = self.tag,
color = format!("{:?}", self.color),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -115,7 +138,14 @@ impl Resolve<WriteArgs> for UpdateTagColor {
}
impl Resolve<WriteArgs> for DeleteTag {
#[instrument(name = "DeleteTag", skip(user))]
#[instrument(
"DeleteTag",
skip_all,
fields(
operator = user.id,
tag_id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -124,13 +154,15 @@ impl Resolve<WriteArgs> for DeleteTag {
tokio::try_join!(
resource::remove_tag_from_all::<Server>(&self.id),
resource::remove_tag_from_all::<Deployment>(&self.id),
resource::remove_tag_from_all::<Stack>(&self.id),
resource::remove_tag_from_all::<Deployment>(&self.id),
resource::remove_tag_from_all::<Build>(&self.id),
resource::remove_tag_from_all::<Repo>(&self.id),
resource::remove_tag_from_all::<Procedure>(&self.id),
resource::remove_tag_from_all::<Action>(&self.id),
resource::remove_tag_from_all::<ResourceSync>(&self.id),
resource::remove_tag_from_all::<Builder>(&self.id),
resource::remove_tag_from_all::<Alerter>(&self.id),
resource::remove_tag_from_all::<Procedure>(&self.id),
)?;
delete_one_by_id(&db_client().tags, &self.id, None).await?;
@@ -138,112 +170,3 @@ impl Resolve<WriteArgs> for DeleteTag {
Ok(tag)
}
}
impl Resolve<WriteArgs> for UpdateTagsOnResource {
#[instrument(name = "UpdateTagsOnResource", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<UpdateTagsOnResourceResponse> {
let WriteArgs { user } = args;
match self.target {
ResourceTarget::System(_) => {
return Err(anyhow!("Invalid target type: System").into());
}
ResourceTarget::Build(id) => {
get_check_permissions::<Build>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Build>(&id, self.tags, args).await?;
}
ResourceTarget::Builder(id) => {
get_check_permissions::<Builder>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Builder>(&id, self.tags, args).await?
}
ResourceTarget::Deployment(id) => {
get_check_permissions::<Deployment>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Deployment>(&id, self.tags, args)
.await?
}
ResourceTarget::Server(id) => {
get_check_permissions::<Server>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Server>(&id, self.tags, args).await?
}
ResourceTarget::Repo(id) => {
get_check_permissions::<Repo>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Repo>(&id, self.tags, args).await?
}
ResourceTarget::Alerter(id) => {
get_check_permissions::<Alerter>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Alerter>(&id, self.tags, args).await?
}
ResourceTarget::Procedure(id) => {
get_check_permissions::<Procedure>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Procedure>(&id, self.tags, args)
.await?
}
ResourceTarget::Action(id) => {
get_check_permissions::<Action>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Action>(&id, self.tags, args).await?
}
ResourceTarget::ResourceSync(id) => {
get_check_permissions::<ResourceSync>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<ResourceSync>(&id, self.tags, args)
.await?
}
ResourceTarget::Stack(id) => {
get_check_permissions::<Stack>(
&id,
user,
PermissionLevel::Write.into(),
)
.await?;
resource::update_tags::<Stack>(&id, self.tags, args).await?
}
};
Ok(UpdateTagsOnResourceResponse {})
}
}

View File

@@ -0,0 +1,309 @@
use anyhow::Context as _;
use futures_util::{StreamExt as _, stream::FuturesUnordered};
use komodo_client::{
api::write::*,
entities::{
NoData, deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, terminal::TerminalTarget,
user::User,
},
};
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::{
periphery_client,
query::get_all_tags,
terminal::{
create_container_terminal_inner,
get_deployment_periphery_container,
get_stack_service_periphery_container,
},
},
permission::get_check_permissions,
resource,
};
use super::WriteArgs;
//
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(
"CreateTerminal",
skip_all,
fields(
operator = user.id,
terminal = self.name,
target = format!("{:?}", self.target),
command = self.command,
mode = format!("{:?}", self.mode),
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
match self.target.clone() {
TerminalTarget::Server { server } => {
let server = server
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
create_server_terminal(self, server, user).await?;
}
TerminalTarget::Container { server, container } => {
create_container_terminal(self, server, container, user)
.await?;
}
TerminalTarget::Stack { stack, service } => {
let service = service
.context("Must provide 'target.params.service'")
.status_code(StatusCode::BAD_REQUEST)?;
create_stack_service_terminal(self, stack, service, user)
.await?;
}
TerminalTarget::Deployment { deployment } => {
create_deployment_terminal(self, deployment, user).await?;
}
};
Ok(NoData {})
}
}
async fn create_server_terminal(
CreateTerminal {
name,
command,
recreate,
target: _,
mode: _,
}: CreateTerminal,
server: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateServerTerminal {
name,
command,
recreate,
})
.await
.context("Failed to create Server Terminal on Periphery")?;
Ok(())
}
async fn create_container_terminal(
req: CreateTerminal,
server: String,
container: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_stack_service_terminal(
req: CreateTerminal,
stack: String,
service: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_deployment_terminal(
req: CreateTerminal,
deployment: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
create_container_terminal_inner(req, &periphery, container).await
}
//
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(
"DeleteTerminal",
skip_all,
fields(
operator = user.id,
target = format!("{:?}", self.target),
terminal = self.terminal,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = match &self.target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Container { server, .. } => {
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
};
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
target: self.target,
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(
"DeleteAllTerminals",
skip_all,
fields(
operator = user.id,
server = self.server,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for BatchDeleteAllTerminals {
#[instrument(
"BatchDeleteAllTerminals",
skip_all,
fields(
operator = user.id,
query = format!("{:?}", self.query),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.terminal(),
&all_tags,
)
.await?
.into_iter()
.map(|server| async move {
let res = async {
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
anyhow::Ok(())
}
.await;
if let Err(e) = res {
warn!(
"Failed to delete all terminals on {} ({}) | {e:#}",
server.name, server.id
)
}
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await;
Ok(NoData {})
}
}

View File

@@ -1,26 +1,121 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use async_timing_util::unix_timestamp_ms;
use database::{
hash_password,
mungos::mongodb::bson::{doc, oid::ObjectId},
};
use komodo_client::{
api::write::{
DeleteUser, DeleteUserResponse, UpdateUserPassword,
UpdateUserPasswordResponse, UpdateUserUsername,
UpdateUserUsernameResponse,
api::write::*,
entities::{
NoData,
user::{User, UserConfig},
},
entities::{NoData, user::UserConfig},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
config::core_config, helpers::hash_password, state::db_client,
};
use crate::{config::core_config, state::db_client};
use super::WriteArgs;
//
impl Resolve<WriteArgs> for CreateLocalUser {
#[instrument(
"CreateLocalUser",
skip_all,
fields(
admin_id = admin.id,
username = self.username
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<CreateLocalUserResponse> {
if !admin.admin {
return Err(
anyhow!("This method is admin-only.")
.status_code(StatusCode::FORBIDDEN),
);
}
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
if ObjectId::from_str(&self.username).is_ok() {
return Err(
anyhow!("Username cannot be valid ObjectId").into(),
);
}
if self.password.is_empty() {
return Err(anyhow!("Password cannot be empty.").into());
}
let db = db_client();
if db
.users
.find_one(doc! { "username": &self.username })
.await
.context("Failed to query for existing users")?
.is_some()
{
return Err(anyhow!("Username already taken.").into());
}
let ts = unix_timestamp_ms() as i64;
let hashed_password = hash_password(self.password)?;
let mut user = User {
id: Default::default(),
username: self.username,
enabled: true,
admin: false,
super_admin: false,
create_server_permissions: false,
create_build_permissions: false,
updated_at: ts,
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local {
password: hashed_password,
},
};
user.id = db_client()
.users
.insert_one(&user)
.await
.context("failed to create user")?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.to_string();
user.sanitize();
Ok(user)
}
}
//
impl Resolve<WriteArgs> for UpdateUserUsername {
#[instrument(
"UpdateUserUsername",
skip_all,
fields(
operator = user.id,
new_username = self.username,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -38,6 +133,13 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
if ObjectId::from_str(&self.username).is_ok() {
return Err(
anyhow!("Username cannot be valid ObjectId").into(),
);
}
let db = db_client();
if db
.users
@@ -64,6 +166,11 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
//
impl Resolve<WriteArgs> for UpdateUserPassword {
#[instrument(
"UpdateUserPassword",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -78,25 +185,7 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
);
}
}
let UserConfig::Local { .. } = user.config else {
return Err(anyhow!("User is not local user").into());
};
if self.password.is_empty() {
return Err(anyhow!("Password cannot be empty.").into());
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
let hashed_password = hash_password(self.password)?;
db_client()
.users
.update_one(
doc! { "_id": id },
doc! { "$set": {
"config.data.password": hashed_password
} },
)
.await
.context("Failed to update user password on database.")?;
db_client().set_user_password(user, &self.password).await?;
Ok(NoData {})
}
}
@@ -104,12 +193,23 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
//
impl Resolve<WriteArgs> for DeleteUser {
#[instrument(
"DeleteUser",
skip_all,
fields(
admin_id = admin.id,
user_to_delete = self.user
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<DeleteUserResponse> {
if !admin.admin {
return Err(anyhow!("Calling user is not admin.").into());
return Err(
anyhow!("This method is admin-only.")
.status_code(StatusCode::FORBIDDEN),
);
}
if admin.username == self.user || admin.id == self.user {
return Err(anyhow!("User cannot delete themselves.").into());
@@ -145,6 +245,14 @@ impl Resolve<WriteArgs> for DeleteUser {
.delete_one(query)
.await
.context("Failed to delete user from database")?;
// Also remove user id from all user groups
if let Err(e) = db
.user_groups
.update_many(doc! {}, doc! { "$pull": { "users": &user.id } })
.await
{
warn!("Failed to remove deleted user from user groups | {e:?}");
};
Ok(user)
}
}

Some files were not shown because too many files have changed in this diff Show More