Compare commits

...

255 Commits
v0.1.10 ... v0

Author SHA1 Message Date
mbecker20
9c0be07ae1 refactor caching to use custom Cache struct 2023-05-28 08:31:18 +00:00
mbecker20
ab945aadde fix set termination timeout 0 2023-05-25 20:18:39 +00:00
mbecker20
c1c461c273 add command crud and run api 2023-05-24 07:45:52 +00:00
mbecker20
336742ee69 update author and clap args 2023-05-24 05:28:46 +00:00
beckerinj
405dacce1c sanitize container logs for any script tags 2023-05-13 02:23:05 -04:00
beckerinj
9acd45aa93 fix log whitespace non preservation issue 2023-05-13 02:14:14 -04:00
beckerinj
c889c2cc03 clean up log component imports 2023-05-13 01:49:06 -04:00
mbecker20
7ac91ef416 view images on server 2023-05-12 07:22:56 +00:00
mbecker20
8e28669aa1 potentially fix deployment update getting crossed with another deployment 2023-05-09 21:17:10 +00:00
mbecker20
6cdb91f8b8 more readable container state in header 2023-05-04 01:12:55 +00:00
mbecker20
e892474713 modify create deployment initializer 2023-05-03 21:03:57 +00:00
mbecker20
abdae98816 core handle term signal 2023-05-03 20:02:37 +00:00
mbecker20
ab4fe49f33 deployment / build config reset 2023-05-03 19:31:59 +00:00
mbecker20
1ace35103b fix ansi-to-html install 2023-05-03 07:21:38 +00:00
beckerinj
dbee729eee show ansi colors in the logs correctly 2023-05-03 03:13:42 -04:00
mbecker20
792576ce59 add auto redeploy user 2023-05-02 17:21:25 +00:00
mbecker20
a07624e9b9 0.3.4 fix docker stop --signal on older docker versions 2023-05-01 21:03:25 +00:00
mbecker20
bb8054af8a log version first 2023-05-01 08:43:00 +00:00
mbecker20
7738f3e066 core logs version on startup 2023-05-01 08:34:20 +00:00
mbecker20
5dee16a100 0.3.3 add default term signal and timeout to deployment 2023-05-01 08:28:12 +00:00
mbecker20
35f3bcdf2f update core version 2023-05-01 03:05:51 +00:00
mbecker20
130ca8e1f1 bump versions to 0.3.2 2023-05-01 01:53:58 +00:00
mbecker20
ced4c21688 update monitor client to 0.3.1 2023-05-01 01:41:23 +00:00
mbecker20
6ec7078024 custom termination signals 2023-04-30 06:52:27 +00:00
mbecker20
b28d8f2506 update frontend types 2023-04-30 03:26:01 +00:00
mbecker20
c88a9291a0 support auto redeploy and custom stop signals 2023-04-30 00:10:59 +00:00
mbecker20
1e82d19306 build summary defaults to time view 2023-04-21 16:34:03 +00:00
mbecker20
dd87e50cb2 build stats summary 2023-04-21 08:52:17 +00:00
mbecker20
4c8f96a30f build stats card 2023-04-21 08:08:15 +00:00
mbecker20
c4f45e05f1 finish build stats api 2023-04-21 08:08:01 +00:00
mbecker20
6aa382c7c1 finish build stats api 2023-04-20 16:34:14 +00:00
mbecker20
ccb9f059e6 get build stats api 2023-04-20 07:34:49 +00:00
mbecker20
1cdcea0771 start on route to get daily build stats (time, count) 2023-04-19 07:02:21 +00:00
mbecker20
88dda0de80 update rename deployment to check whether deployment has repo attached, and if so, reclone it to account for name change. 2023-04-19 06:44:58 +00:00
mbecker20
30ed99e2b0 publish monitor client 0.3.1 with Readme 2023-04-18 07:56:30 +00:00
mbecker20
e5953b7541 monitor client readme 2023-04-18 07:55:05 +00:00
mbecker20
1f9d01c59f new home servers png 2023-04-18 06:31:23 +00:00
mbecker20
cc5210a3d8 fix server children add new button 2023-04-18 06:17:42 +00:00
mbecker20
26559e2d3b delete builds screenshots 2023-04-18 03:20:20 +00:00
mbecker20
7eeddb300f add link to screenshots docsite 2023-04-18 02:59:37 +00:00
mbecker20
1e01bae16b add screenshots to monitor readme 2023-04-18 02:47:46 +00:00
mbecker20
87c03924e5 remove second universal search 2023-04-18 02:46:46 +00:00
mbecker20
f0998b1d43 add universal search screenshot 2023-04-18 02:42:21 +00:00
mbecker20
1995a04244 add screenshots 2023-04-18 02:38:07 +00:00
mbecker20
420fe6bcd5 add build time to version selector 2023-04-17 08:26:34 +00:00
mbecker20
d4e26c0553 fix docker repo reference 2023-04-16 19:33:28 +00:00
mbecker20
5f5e7cb45e add note about oauth 2023-04-16 07:44:34 +00:00
beckerinj
8aa0304738 core setup doc 2023-04-16 03:32:06 -04:00
beckerinj
8ec98c33a4 first user is auto enabled and made admin 2023-04-16 03:23:30 -04:00
beckerinj
2667182ca3 update core example config 2023-04-16 02:49:00 -04:00
mbecker20
1cd0018b93 0.3.0 check whether pre_build / on_clone / on_pull are non empty before running 2023-04-14 22:58:10 +00:00
beckerinj
359789ee29 update aws sdk version 2023-04-14 12:52:52 -04:00
mbecker20
e79c860c0f make update hover 2023-04-14 15:06:05 +00:00
mbecker20
765f53f30e types doc 2023-04-14 06:14:33 +00:00
beckerinj
3c3c21d7f5 move header to top. redesign build 2023-04-14 01:57:10 -04:00
mbecker20
eb700cb500 improve search functionality 2023-04-14 03:53:21 +00:00
beckerinj
b3b723a717 build show unknown if builds[id] cannot be found 2023-04-13 11:19:51 -04:00
beckerinj
555c230d2e update menu stays open through updates 2023-04-13 03:07:31 -04:00
beckerinj
adf4b97aef lots of api docs 2023-04-13 02:23:40 -04:00
beckerinj
32c38d796b docs 2023-04-12 18:19:13 -04:00
beckerinj
c8829e15ed get most of servers api docs 2023-04-10 17:58:52 -04:00
beckerinj
453df417d0 finish deployment api doc 2023-04-10 04:39:47 -04:00
mbecker20
02a7741a9c don't show group editing ui if user doesn't have permissions 2023-04-09 22:55:30 +00:00
mbecker20
96fc5b0ca8 group page / edit work with non admin users 2023-04-09 22:38:36 +00:00
mbecker20
b13e624a66 support manage user permissions on groups 2023-04-09 19:24:07 +00:00
beckerinj
6a8f66f272 work on deployment api docs 2023-04-09 02:15:08 -04:00
mbecker20
0c638a08fd fix problems with build config page breaking 2023-04-07 20:15:21 +00:00
mbecker20
b07f8af8e5 deployment extra args fix as well 2023-04-07 19:50:16 +00:00
mbecker20
3bbb2a985f extra args frontend needs to account for when they don't exist 2023-04-07 19:50:06 +00:00
beckerinj
afdf71c545 work on api docs 2023-04-07 11:12:44 -04:00
beckerinj
8de8d2df9a work on API docs 2023-04-06 01:03:01 -04:00
mbecker20
1dffdbddc2 sort by ts increasing 2023-04-05 18:40:12 +00:00
mbecker20
11fff633b0 don't use $mod in stats query 2023-04-05 18:17:56 +00:00
mbecker20
61bc44d1f4 add hover class to home tree build 2023-04-04 21:18:13 +00:00
beckerinj
e8fabb8cfa Update index.mdx 2023-04-03 16:40:15 -04:00
mbecker20
7a50885847 exit group view with Escape in addition to ArrowLeft 2023-04-03 16:35:44 +00:00
beckerinj
6239da45f4 delete old docs and provide link to docsite 2023-04-03 11:28:50 -04:00
mbecker20
af597eb3c7 clean up builds 2023-04-03 05:55:37 +00:00
mbecker20
d66cda068c connect up delete group 2023-04-03 05:52:58 +00:00
beckerinj
91fcd07c1c make group behavior more sensible frontned 2023-04-03 01:50:45 -04:00
mbecker20
85aa470da1 publish docs 2023-04-03 02:48:00 +00:00
mbecker20
6f0d5f37a5 update home page description for sso 2023-04-02 22:30:37 +00:00
beckerinj
1b4d604404 deploymentBranch 2023-04-02 18:22:04 -04:00
beckerinj
a7f6cbe0b9 small docsite fixes 2023-04-02 12:44:35 -04:00
beckerinj
9cf28bf123 improve docs 2023-04-02 04:15:06 -04:00
beckerinj
c92e04294a monitor docs site working 2023-04-02 04:03:01 -04:00
beckerinj
36f059b455 docsite 2023-04-02 01:40:56 -04:00
mbecker20
4aac301852 0.2.14 only try to merge files, filter nested directories 2023-04-01 19:35:04 +00:00
mbecker20
b375708bbd 0.2.13 support config directories 2023-04-01 19:02:43 +00:00
mbecker20
10b6a9482b update aws sdk verison and implement merge_config_files 2023-04-01 07:06:17 +00:00
mbecker20
84d45c5df8 0.2.12 fix docker build command interp 2023-03-31 18:06:18 +00:00
mbecker20
c6559814b1 frontend for docker build extra args and use buildx 2023-03-31 17:31:39 +00:00
mbecker20
c8c080183f remove publish for cli 2023-03-31 17:04:15 +00:00
mbecker20
597b67f799 0.2.11 support buildx and arbitrary extra args 2023-03-31 17:03:38 +00:00
mbecker20
ec52d5f422 support docker buildx build and passing arbitrary extra args 2023-03-31 16:57:02 +00:00
mbecker20
34806304d6 add center menu title bottom border and adjust copy menu 2023-03-31 05:41:35 +00:00
beckerinj
87953d5495 menu padding 2rem 2023-03-31 01:27:17 -04:00
beckerinj
b6c7c80c95 full width input for copy menu 2023-03-31 01:26:19 -04:00
beckerinj
77e568d5c3 small 2023-03-27 12:41:59 -04:00
mbecker20
699fc51cf7 link to build if click on image deployment header 2023-03-27 15:30:11 +00:00
mbecker20
21029c90b7 info page on stats page 2023-03-27 05:13:12 +00:00
mbecker20
6b0530eb7f brush up server stats page 2023-03-26 23:15:58 +00:00
beckerinj
f7061c7225 toggle to show absolutes for mem and disk stat graphs 2023-03-26 18:47:21 -04:00
mbecker20
750f698369 updates page 2023-03-26 02:20:39 +00:00
mbecker20
ec5ef42298 add max height / scrolling to copy menu target selector 2023-03-24 00:45:47 +00:00
beckerinj
46820b0044 increase the tab title padding 2023-03-23 20:36:31 -04:00
beckerinj
425a6648f7 improve summary styling 2023-03-23 03:13:19 -04:00
mbecker20
349fc297ce 0.2.10 add renaming functionality 2023-03-22 20:33:26 +00:00
mbecker20
5ad87c03ed show none when none 2023-03-22 07:16:04 +00:00
mbecker20
d16006f28f improve design 2023-03-22 07:03:28 +00:00
beckerinj
7f0452a5f5 improve pie chart home page 2023-03-22 02:59:29 -04:00
mbecker20
c605b2f6fc implement pie chart summary 2023-03-22 06:41:57 +00:00
beckerinj
6c2d8a8494 unnecessary import 2023-03-21 23:10:01 -07:00
mbecker20
874691f729 add a pie chart component 2023-03-21 09:44:00 +00:00
beckerinj
cdf702e17d orange 2023-03-21 00:52:25 -07:00
mbecker20
25fdb32627 rename deployments 2023-03-19 08:14:54 +00:00
mbecker20
e976ea0a3a improve the behavior 2023-03-17 20:55:37 +00:00
mbecker20
34e6b4fc69 rename server working 2023-03-17 20:40:19 +00:00
mbecker20
a2d77567b3 dont need to 'to_monitor_name' servers 2023-03-15 07:35:54 +00:00
mbecker20
ecb460f9b5 add rename deployment to monitor client 2023-03-14 20:15:27 +00:00
mbecker20
63444b089c rename deployment func 2023-03-12 23:36:20 +00:00
mbecker20
c787984b77 initialize mongo with builder 2023-03-12 22:03:31 +00:00
mbecker20
bf3d03e801 fix problem of repeated query for docker accounts, secrets, etc 2023-03-12 05:07:55 +00:00
mbecker20
bc2e69b975 use resource to load stuff 2023-03-12 03:45:49 +00:00
mbecker20
7b94fcf3da 0.2.9 finish implement secret helpers on frontend 2023-03-12 00:48:18 +00:00
mbecker20
9cf03b8b88 add route to get available secret keys 2023-03-12 00:16:03 +00:00
mbecker20
a288edcf61 0.2.8 implement secret interpolation on builds and deployments 2023-03-11 23:34:17 +00:00
mbecker20
89cc18ad37 update tokio version 2023-03-10 19:27:40 +00:00
mbecker20
ffa3b671e1 change default alerting thresholds 2023-03-09 07:08:38 +00:00
beckerinj
f32eeb413b add label to home sort by 2023-03-08 16:10:23 -05:00
mbecker20
b5a5103cfc move core dockerfile 2023-03-08 18:26:42 +00:00
mbecker20
c5697e59f3 delete sample file 2023-03-08 18:24:15 +00:00
mbecker20
f030667ff4 update image in deployment header as well 2023-03-07 17:41:00 +00:00
mbecker20
e9fef5d97c change get_deployment_deployed_version to 'unknown' if not known 2023-03-07 17:39:59 +00:00
beckerinj
f5818ac7ea actually return image 2023-03-07 12:37:45 -05:00
mbecker20
c85ab4110d show derived image is container.image is sha256: 2023-03-07 16:30:59 +00:00
mbecker20
9690ea35b8 make description text area larger 2023-03-07 08:44:31 +00:00
mbecker20
6300c8011b fix modify global user permissions operator - make operator the admin, instead of the target 2023-03-06 17:09:12 +00:00
mbecker20
97f582b381 customizable page title 2023-03-06 02:07:08 +00:00
mbecker20
5135a9c228 show server name under deployment on admin user manage page 2023-03-06 01:46:40 +00:00
mbecker20
b7d1212a82 make resources links in account page 2023-03-05 21:50:15 +00:00
mbecker20
7d9d0a9fc4 add view of resources you can access on account page 2023-03-05 21:42:11 +00:00
beckerinj
ed9aef4321 add resources to account page 2023-03-05 16:33:40 -05:00
mbecker20
0aa638bdf4 only do daily update if servers not empty 2023-03-05 20:19:06 +00:00
mbecker20
0ec39d793d one page to view all permissions for user 2023-03-05 09:24:59 +00:00
mbecker20
5579ba869c v0.2.7 remove passkeys from periphery startup log 2023-03-03 17:27:42 +00:00
mbecker20
210940038c hide passkeys on periphery startup config log 2023-03-03 17:24:28 +00:00
mbecker20
98a1a60362 /home/ubuntu/example 2023-03-03 08:15:17 +00:00
mbecker20
86cf9116ba update builds and deployments docs with link to file paths doc 2023-03-03 08:09:41 +00:00
mbecker20
8b2defe0d9 add doc about file paths 2023-03-03 07:58:09 +00:00
mbecker20
50b14b3ce5 0.2.6 store ami name instead of ami_id (because the id has to change sometimes) 2023-03-03 07:11:55 +00:00
mbecker20
1bfb17cb5d handle setting default ami id correctly 2023-03-02 21:32:22 +00:00
mbecker20
b90acb66c7 0.2.5 stop leaking github token 2023-03-02 21:25:46 +00:00
mbecker20
7648b0dd10 don't let github access token leak when clone fails 2023-03-02 21:21:07 +00:00
mbecker20
2d69f1791a default builds to use aws config on create 2023-03-02 17:11:23 +00:00
mbecker20
5ba887095a allow select "none" for docker organization 2023-03-02 17:00:40 +00:00
mbecker20
19b7405562 show organization immediately if it exists on build 2023-03-01 23:20:54 +00:00
mbecker20
f5c5f734e1 clean deployment / build config before update 2023-03-01 21:18:40 +00:00
mbecker20
8d1639bcaf fix build permissions 2023-03-01 10:18:58 +00:00
mbecker20
e2446af00e remove print 2023-03-01 10:12:26 +00:00
mbecker20
1b39aaaa38 implement description 2023-03-01 09:46:50 +00:00
mbecker20
5a2a1a3d98 0.2.4: add description and update description 2023-03-01 08:13:07 +00:00
mbecker20
39eceb745b v0.2.2: configure docker organizations for builds 2023-03-01 07:18:49 +00:00
beckerinj
4c1ec5db33 edit permissions.md 2023-02-28 02:26:57 -05:00
beckerinj
8b68b9481e permissions.md 2023-02-28 02:21:46 -05:00
beckerinj
14843f83c6 add core setup link to table of contents 2023-02-28 01:56:05 -05:00
beckerinj
e67d87e885 even 2023-02-28 01:53:54 -05:00
beckerinj
7d4d865d58 elaborate on networks 2023-02-28 01:53:04 -05:00
beckerinj
1e4aaff23c if to is 2023-02-28 01:37:43 -05:00
beckerinj
df3f4a5f4a improve builds.md 2023-02-28 01:36:35 -05:00
beckerinj
1f8557300d fix type 2023-02-28 01:33:55 -05:00
beckerinj
bf17d705f0 fix typo 2023-02-28 01:33:09 -05:00
beckerinj
0d24b792c6 container lifetime management 2023-02-28 01:30:43 -05:00
mbecker20
fb61e36417 remove download log button, its kind of unsafe if the log is long 2023-02-28 06:18:42 +00:00
beckerinj
c39869d2f8 deployments.md 2023-02-28 01:18:11 -05:00
mbecker20
750e0274da #example 2023-02-28 05:54:43 +00:00
beckerinj
a9d37ab667 add placeholders to show to to pass env 2023-02-28 00:52:09 -05:00
mbecker20
eacb549d5e update core config example with github_webhook_base_url 2023-02-28 05:08:05 +00:00
mbecker20
ce7cb8fe45 improve confirm menu with copy button 2023-02-28 04:58:39 +00:00
mbecker20
f9fe4e32b4 restyle builds and deployments 2023-02-28 04:24:15 +00:00
mbecker20
2c9fc2bad4 always show docker account 2023-02-28 03:57:35 +00:00
mbecker20
94949291c2 fix notifications, add dynamic listener url 2023-02-28 03:41:25 +00:00
beckerinj
2944ba6ef9 cli v0.2.3 2023-02-27 22:18:04 -05:00
beckerinj
997e68a31d dynamic github webhook base url 2023-02-27 22:17:37 -05:00
beckerinj
bfb9d9e34d add periphery version in builder connected logs 2023-02-27 21:46:43 -05:00
mbecker20
3b9219b586 fix updates selector style 2023-02-27 05:55:33 +00:00
mbecker20
7bf2a88ab1 finish build args section 2023-02-27 05:53:56 +00:00
mbecker20
d21ed093dc fix build args gap 2023-02-27 05:52:13 +00:00
mbecker20
6e89671e91 switch cli build and build args build config 2023-02-27 05:47:10 +00:00
beckerinj
ee1128a666 Update builds.md 2023-02-27 00:45:16 -05:00
beckerinj
63b5deecd7 Update servers.md 2023-02-27 00:44:01 -05:00
mbecker20
f4f97ce1a7 finish builds / servers 2023-02-27 05:42:22 +00:00
mbecker20
a666df099f use image on deployment container 2023-02-26 06:55:31 +00:00
mbecker20
21dd0ee072 cli should be 0.2.2 2023-02-26 06:48:38 +00:00
mbecker20
bd2a1d4236 v0.2.1 merge multiple config files 2023-02-26 06:25:26 +00:00
mbecker20
7acdbcfd8f improve updates selector - add class 2023-02-25 22:34:37 +00:00
mbecker20
58514c5c93 fix height of builder config when no builder type chosen 2023-02-25 22:07:05 +00:00
mbecker20
580e800923 fix clap args with - 2023-02-23 23:14:02 +00:00
mbecker20
29f6b19f33 cli 0.2.0. fix starting mongo when no existing container present 2023-02-23 22:46:17 +00:00
mbecker20
e090247723 fix error when user doesn't have access to build on deployment 2023-02-23 08:00:55 +00:00
mbecker20
1374c26cd8 0.2.0 cleanup 2023-02-23 07:28:11 +00:00
mbecker20
5467b40b2e fix to git clone <TOKEN> splice 2023-02-23 07:22:32 +00:00
mbecker20
165b9012da improve users responsiveness 2023-02-23 07:17:18 +00:00
mbecker20
22630f665e update the manage users page 2023-02-23 07:09:33 +00:00
mbecker20
3d867084ba log poll default to false 2023-02-23 06:44:55 +00:00
mbecker20
171dd2d9e0 remove menu animation, change builder type to selector 2023-02-23 06:38:36 +00:00
mbecker20
9709239f88 build version h2 2023-02-22 22:45:05 +00:00
mbecker20
60d457b285 improve deployment in tree and display deployed version in header 2023-02-22 22:27:54 +00:00
mbecker20
8b1d4793a7 0.1.17 support building with ec2 instances 2023-02-22 21:05:03 +00:00
mbecker20
f2166c8435 configure aws config on builds 2023-02-22 20:49:56 +00:00
mbecker20
07d723a748 more prog on frontend, some api etc 2023-02-22 06:39:32 +00:00
mbecker20
b36f485287 put server / aws build on build header 2023-02-21 23:05:49 +00:00
mbecker20
a121ae0828 begin frontend refactor for ephemeral build support 2023-02-21 18:11:43 +00:00
mbecker20
e2b5a02008 building works 2023-02-21 05:22:26 +00:00
mbecker20
575aa62625 update versions to 0.1.16 2023-02-21 04:32:41 +00:00
mbecker20
ac88a2c4ed testing and fixes for aws build 2023-02-21 04:27:30 +00:00
mbecker20
f1dcb71a8a poll periphery on build instance to ensure connectivity before moving on 2023-02-20 22:56:22 +00:00
mbecker20
30d04bc201 support building on epheral ec2 2023-02-20 09:41:15 +00:00
mbecker20
33a00bb1a2 poll when instance running 2023-02-20 04:55:44 +00:00
mbecker20
ccca44ea89 start working on build instance spawn on aws 2023-02-20 01:19:07 +00:00
mbecker20
ae5f36fe51 0.1.15 use passkey for addition core - periphery auth layer 2023-02-17 18:29:13 +00:00
beckerinj
69ce1e4f36 start adding passkey auth to core periphery communication 2023-02-17 12:53:09 -05:00
mbecker20
6e444b9032 temp remove deployment state polling 2023-02-14 18:33:32 +00:00
beckerinj
73eff72da4 work on docs 2023-02-13 11:16:27 -05:00
beckerinj
698e3c214b opinionated 2023-02-12 14:48:28 -05:00
beckerinj
9da77667dc add next page 2023-02-12 04:14:06 -05:00
beckerinj
c30793fb8f to 2023-02-12 04:12:49 -05:00
beckerinj
84fdaab24d close intro better 2023-02-12 04:10:10 -05:00
beckerinj
cbd67bb609 back to table 2023-02-12 04:05:35 -05:00
beckerinj
00f58e9008 architecture 2023-02-12 03:59:26 -05:00
beckerinj
7738fab351 intro 2023-02-12 03:57:55 -05:00
beckerinj
06e8f6589b links 2023-02-12 00:55:14 -05:00
beckerinj
57d9287724 start docs 2023-02-12 00:51:05 -05:00
mbecker20
2cc65595ee log only polls when container is running 2023-02-11 20:29:25 +00:00
mbecker20
3dd2b97873 add polling for deployment state and logs 2023-02-11 20:17:13 +00:00
mbecker20
3c805ebbf7 fix deployment action build A tag styling 2023-02-11 19:36:36 +00:00
beckerinj
a854160018 Update periphery.config.example.toml 2023-02-08 13:40:00 -05:00
mbecker20
a99d9e5969 make update menu wider 2023-02-08 05:56:09 +00:00
mbecker20
813b6c1182 confirm menu 2023-02-08 05:37:27 +00:00
mbecker20
2958f9589b deployment / build delete wont fail if server disabled / unreachable. 2023-02-08 05:37:12 +00:00
beckerinj
69b4e26176 ConfirmMenuButton 2023-02-07 23:49:27 -05:00
beckerinj
78b00f139d delete server side effects 2023-02-07 23:23:46 -05:00
beckerinj
dc1e8de851 widen env 2023-02-07 23:23:18 -05:00
mbecker20
3187b335a3 cli 0.1.20 2023-02-07 20:12:20 +00:00
beckerinj
54b5a2b420 add logrotate to mongo startup 2023-02-07 15:10:36 -05:00
beckerinj
14c6bd00a8 use the scroller class for LogContainer 2023-02-02 16:14:11 -05:00
mbecker20
e9c3646450 0.1.14 fix deployment builder defaults 2023-01-30 17:45:34 +00:00
mbecker20
4f20257479 0.1.13 improve Builder structs with defaults 2023-01-30 17:24:03 +00:00
beckerinj
65749991de 0.1.12 MonitorClient::new_from_env 2023-01-30 11:50:48 -05:00
mbecker20
237a1d802d handle stuff when server disabled / unreachable 2023-01-28 09:03:00 +00:00
mbecker20
e4336f19f3 default copy server to curr server 2023-01-25 23:08:31 +00:00
mbecker20
c895e5e67f fix selector search behavior when using itemMap 2023-01-25 23:01:11 +00:00
mbecker20
4e4e210736 implement build / deployment copy 2023-01-25 22:53:27 +00:00
beckerinj
09dfc8faa3 cli 0.1.19 add Restart=on-failure to service unit file 2023-01-25 14:58:54 -05:00
mbecker20
3c4f77cc78 0.1.11 custom DockerRunArgs Default implementation 2023-01-25 06:29:09 +00:00
283 changed files with 21834 additions and 3947 deletions

25
.vscode/tasks.json vendored
View File

@@ -92,15 +92,6 @@
"cwd": "${workspaceFolder}/lib/types"
}
},
{
"type": "cargo",
"command": "publish",
"args": ["--allow-dirty"],
"label": "publish monitor helpers",
"options": {
"cwd": "${workspaceFolder}/lib/helpers"
}
},
{
"type": "cargo",
"command": "publish",
@@ -109,22 +100,6 @@
"cwd": "${workspaceFolder}/lib/monitor_client"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish monitor periphery",
"options": {
"cwd": "${workspaceFolder}/periphery"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish monitor cli",
"options": {
"cwd": "${workspaceFolder}/cli"
}
},
{
"type": "shell",
"command": "docker compose up -d",

1504
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
FROM rust:latest as builder
WORKDIR /builder
COPY ./periphery ./periphery
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
RUN cd periphery && cargo build --release
FROM debian:stable-slim
ARG DEPS_INSTALLER
COPY ./${DEPS_INSTALLER}.sh ./
RUN sh ./${DEPS_INSTALLER}.sh
COPY --from=builder /builder/periphery/target/release/periphery /usr/local/bin/periphery
EXPOSE 8000
CMD "periphery"

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_cli"
version = "0.1.17"
version = "0.3.4"
edition = "2021"
authors = ["MoghTech"]
description = "monitor cli | tools to setup monitor system"
@@ -13,12 +13,13 @@ path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
monitor_types = { path = "../lib/types" }
clap = "4.0"
async_timing_util = "0.1.14"
rand = "0.8"
serde = "1.0"
serde_derive = "1.0"
toml = "0.5"
toml = "0.7"
run_command = "0.0.5"
colored = "2"
strum = "0.24"

View File

@@ -7,15 +7,13 @@ use std::{
str::FromStr,
};
use async_timing_util::Timelength;
use clap::ArgMatches;
use colored::Colorize;
use monitor_types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode, Timelength};
use rand::{distributions::Alphanumeric, Rng};
use run_command::run_command_pipe_to_terminal;
use serde::Serialize;
use crate::types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode};
const CORE_IMAGE_NAME: &str = "mbecker2020/monitor_core";
const PERIPHERY_IMAGE_NAME: &str = "mbecker2020/monitor_periphery";
const PERIPHERY_CRATE: &str = "monitor_periphery";
@@ -64,6 +62,7 @@ pub fn gen_core_config(sub_matches: &ArgMatches) {
.map(|p| p.to_owned());
let config = CoreConfig {
title: String::from("monitor"),
host,
port,
jwt_valid_for,
@@ -74,6 +73,8 @@ pub fn gen_core_config(sub_matches: &ArgMatches) {
local_auth: true,
github_oauth: Default::default(),
google_oauth: Default::default(),
aws: Default::default(),
docker_organizations: Default::default(),
mongo: MongoConfig {
uri: mongo_uri,
db_name: mongo_db_name,
@@ -81,6 +82,8 @@ pub fn gen_core_config(sub_matches: &ArgMatches) {
},
jwt_secret: generate_secret(40),
github_webhook_secret: generate_secret(30),
github_webhook_base_url: None,
passkey: generate_secret(30),
};
write_to_toml(&path, &config);
@@ -176,7 +179,10 @@ pub fn start_mongo(sub_matches: &ArgMatches) {
}
}
let command = format!("docker stop {name} && docker container rm {name} && docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} mongo --quiet");
let stop =
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
let command = format!("docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} --log-opt max-size=15m --log-opt max-file=3 mongo --quiet");
let output = run_command_pipe_to_terminal(&command);
@@ -314,13 +320,16 @@ pub fn gen_periphery_config(sub_matches: &ArgMatches) {
.map(|p| p.as_str())
.unwrap_or("~/.monitor/repos")
.to_string()
.replace("~", env::var("HOME").unwrap().as_str());
.replace("~", env::var("HOME").unwrap().as_str())
.parse()
.expect("failed to parse --repo_dir as path");
let config = PeripheryConfig {
port,
repo_dir,
stats_polling_rate,
allowed_ips,
repo_dir,
passkeys: vec![],
secrets: Default::default(),
github_accounts: Default::default(),
docker_accounts: Default::default(),
@@ -588,6 +597,7 @@ Description=agent to connect with monitor core
[Service]
ExecStart={home}/.monitor/bin/periphery --config-path {config_path} --home-dir {home}
Restart=on-failure
TimeoutStartSec=0
[Install]

View File

@@ -3,7 +3,6 @@
use clap::{arg, Arg, Command};
mod helpers;
mod types;
use helpers::*;
@@ -36,19 +35,19 @@ fn cli() -> Command {
.required(false)
)
.arg(
arg!(--mongo-uri <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
arg!(--"mongo-uri" <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
.required(false)
)
.arg(
arg!(--mongo-db-name <NAME> "sets the db name to use. default is 'monitor'")
arg!(--"mongo-db-name" <NAME> "sets the db name to use. default is 'monitor'")
.required(false)
)
.arg(
arg!(--jwt-valid-for <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
arg!(--"jwt-valid-for" <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
.required(false)
)
.arg(
arg!(--slack-url <URL> "sets the slack url to use for slack notifications")
arg!(--"slack-url" <URL> "sets the slack url to use for slack notifications")
.required(false)
),
)
@@ -96,7 +95,7 @@ fn cli() -> Command {
arg!(--name <NAME> "specify the name of the monitor core container. default is monitor-core")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
.required(false)
)
.arg(
@@ -111,7 +110,7 @@ fn cli() -> Command {
arg!(--restart <RESTART> "sets docker restart mode of monitor core container. default is unless-stopped")
)
.arg(
arg!(--add-internal-host "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
arg!(--"add-internal-host" "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
)
),
)
@@ -133,15 +132,15 @@ fn cli() -> Command {
.required(false)
)
.arg(
arg!(--stats-polling-rate <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
arg!(--"stats-polling-rate" <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
.required(false)
)
.arg(
arg!(--allowed-ips <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
arg!(--"allowed-ips" <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
.required(false)
)
.arg(
arg!(--repo-dir <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
arg!(--"repo-dir" <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
)
)
.subcommand(
@@ -157,7 +156,7 @@ fn cli() -> Command {
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
)
@@ -171,7 +170,7 @@ fn cli() -> Command {
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
.arg(
@@ -183,32 +182,32 @@ fn cli() -> Command {
.required(false)
)
)
.subcommand(
Command::new("container")
.about("start up monitor periphery in docker container")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
.arg(arg!(--repo-dir <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
.arg(
arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
.required(false)
)
.arg(
arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
.required(false)
)
.arg(
arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
)
)
// .subcommand(
// Command::new("container")
// .about("start up monitor periphery in docker container")
// .arg(
// arg!(--yes "used in scripts to skip 'enter to continue' step")
// )
// .arg(
// arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
// )
// .arg(
// arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
// .required(false)
// )
// .arg(arg!(--"repo-dir" <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
// .arg(
// arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
// .required(false)
// )
// .arg(
// arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
// .required(false)
// )
// .arg(
// arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
// )
// )
),
)
}
@@ -239,7 +238,7 @@ fn main() {
match periphery_start_command {
("systemd", sub_matches) => start_periphery_systemd(sub_matches),
("daemon", sub_matches) => start_periphery_daemon(sub_matches),
("container", sub_matches) => start_periphery_container(sub_matches),
// ("container", sub_matches) => start_periphery_container(sub_matches),
_ => println!("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n")
}
}

View File

@@ -1,140 +0,0 @@
use std::{collections::HashMap, net::IpAddr};
use async_timing_util::Timelength;
use serde_derive::{Deserialize, Serialize};
use strum_macros::{Display, EnumString};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CoreConfig {
// the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech'
pub host: String,
// port the core web server runs on
#[serde(default = "default_core_port")]
pub port: u16,
// daily utc offset in hours to run daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
#[serde(default)]
pub daily_offset_hours: u8,
// number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
#[serde(default)]
pub keep_stats_for_days: u64, // 0 means never prune
// jwt config
pub jwt_secret: String,
#[serde(default = "default_jwt_valid_for")]
pub jwt_valid_for: Timelength,
// interval at which to collect server stats and alert for out of bounds
pub monitoring_interval: Timelength,
// used to verify validity from github webhooks
pub github_webhook_secret: String,
// integration with slack app
pub slack_url: Option<String>,
// enable login with local auth
pub local_auth: bool,
// github integration
pub github_oauth: OauthCredentials,
// google integration
pub google_oauth: OauthCredentials,
// mongo config
pub mongo: MongoConfig,
}
fn default_core_port() -> u16 {
9000
}
fn default_jwt_valid_for() -> Timelength {
Timelength::OneWeek
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct OauthCredentials {
#[serde(default)]
pub enabled: bool,
#[serde(default)]
pub id: String,
#[serde(default)]
pub secret: String,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MongoConfig {
pub uri: String,
#[serde(default = "default_core_mongo_app_name")]
pub app_name: String,
#[serde(default = "default_core_mongo_db_name")]
pub db_name: String,
}
fn default_core_mongo_app_name() -> String {
"monitor_core".to_string()
}
fn default_core_mongo_db_name() -> String {
"monitor".to_string()
}
pub type GithubUsername = String;
pub type GithubToken = String;
pub type GithubAccounts = HashMap<GithubUsername, GithubToken>;
pub type DockerUsername = String;
pub type DockerToken = String;
pub type DockerAccounts = HashMap<DockerUsername, DockerToken>;
pub type SecretsMap = HashMap<String, String>;
#[derive(Serialize, Deserialize, Debug)]
pub struct PeripheryConfig {
#[serde(default = "default_periphery_port")]
pub port: u16,
#[serde(default = "default_repo_dir")]
pub repo_dir: String,
#[serde(default = "default_stats_refresh_interval")]
pub stats_polling_rate: Timelength,
#[serde(default)]
pub allowed_ips: Vec<IpAddr>,
#[serde(default)]
pub secrets: SecretsMap,
#[serde(default)]
pub github_accounts: GithubAccounts,
#[serde(default)]
pub docker_accounts: DockerAccounts,
}
fn default_periphery_port() -> u16 {
8000
}
fn default_repo_dir() -> String {
"/repos".to_string()
}
fn default_stats_refresh_interval() -> Timelength {
Timelength::FiveSeconds
}
#[derive(Serialize, Deserialize, Debug, Display, EnumString, PartialEq, Hash, Eq, Clone, Copy)]
pub enum RestartMode {
#[serde(rename = "no")]
#[strum(serialize = "no")]
NoRestart,
#[serde(rename = "on-failure")]
#[strum(serialize = "on-failure")]
OnFailure,
#[serde(rename = "always")]
#[strum(serialize = "always")]
Always,
#[serde(rename = "unless-stopped")]
#[strum(serialize = "unless-stopped")]
UnlessStopped,
}

View File

@@ -1,5 +1,8 @@
# optional. this will be the document title on the web page (shows up as text in the browser tab). default is 'monitor'
title = "monitor"
# this should be the url used to access monitor in browser, potentially behind DNS, eg https://monitor.mogh.tech or http://12.34.56.78:9000
host = "http://localhost:9000"
host = "https://monitor.mogh.tech"
# the port the core system will run on. if running core in docker container, leave as this port as 9000 and use port bind eg. -p 9001:9000
port = 9000
@@ -7,36 +10,64 @@ port = 9000
# daily utc offset in hours to send daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
daily_offset_hours = 13
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
keep_stats_for_days = 120
# secret used to generate the jwt. should be some randomly generated hash.
jwt_secret = "your_jwt_secret"
# can be 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day
jwt_valid_for = "1-wk"
# webhook url given by slack app
# webhook url given by slack app that monitor will send alerts and a daily update to
slack_url = "your_slack_app_webhook_url"
# token that has to be given to github during webhook config as the Secret
# token that has to be given to github during repo webhook config as the secret
github_webhook_secret = "your_random_webhook_secret"
# can be 30-sec, 1-min, 2-min, 5-min
# optional. an alternate base url that is used to recieve github webhook requests. if not provided, will use 'host' address as base
github_webhook_base_url = "https://monitor-github-webhook.mogh.tech"
# token used to authenticate core requests to periphery
passkey = "your_random_passkey"
# controls the granularity of the system stats collection by monitor core
# can be 15-sec, 30-sec, 1-min, 2-min, 5-min
monitoring_interval = "1-min"
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
keep_stats_for_days = 14
# these will be used by the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
# when attached to build, image will be pushed to repo under the specified organization
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
# allow or deny user login with username / password
local_auth = true
[github_oauth]
enabled = true
id = "your_github_client_id"
secret = "your_github_client_secret"
[google_oauth]
enabled = true
id = "your_google_client_id"
secret = "your_google_client_secret"
[mongo]
uri = "your_mongo_uri"
app_name = "monitor_core"
db_name = "monitor"
db_name = "monitor" # this is the name of the mongo database that monitor will create its collections in.
[aws]
access_key_id = "your_aws_key_id"
secret_access_key = "your_aws_secret_key"
default_region = "us-east-1"
default_ami_name = "your_ami_name" # must be defined below in [aws.available_ami_accounts]
default_instance_type = "m5.2xlarge"
default_volume_gb = 8
default_subnet_id = "your_default_subnet_id"
default_security_group_ids = ["sg_id_1", "sg_id_2"]
default_key_pair_name = "your_default_key_pair_name"
default_assign_public_ip = false
[aws.available_ami_accounts]
your_ami_name = { ami_id = "ami-1234567890", github = ["github_username"], docker = ["docker_username"] }

View File

@@ -1,7 +1,8 @@
port = 9001 # optional. 9001 is default
port = 8000 # optional. 8000 is default
repo_dir = "/repos" # optional. /repos is default. no reason to change if running the docker container, just mount your desired repo dir to /repos in the container
stats_polling_rate = "5-sec" # optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded
allowed_ips = ["127.0.0.1"] # optional. default is empty, which will not block any request by ip.
passkeys = ["abcdefghijk"] # optional. default is empty, which will not require any passkey to be passed by core.
[secrets] # optional. can inject these values into your deployments configuration.
secret_variable = "secret_value"
@@ -12,4 +13,4 @@ github_username2 = "github_token2"
[docker_accounts] # optional
docker_username1 = "docker_token1"
docker_username2 = "docker_token2"
docker_username2 = "docker_token2"

View File

@@ -1,39 +1,39 @@
[package]
name = "core"
version = "0.1.10"
version = "0.3.4"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# helpers = { package = "monitor_helpers", path = "../lib/helpers" }
# types = { package = "monitor_types", path = "../lib/types" }
helpers = { package = "monitor_helpers", version = "0.1.10" }
types = { package = "monitor_types", version = "0.1.10" }
helpers = { package = "monitor_helpers", path = "../lib/helpers" }
types = { package = "monitor_types", path = "../lib/types" }
db = { package = "db_client", path = "../lib/db_client" }
periphery = { package = "periphery_client", path = "../lib/periphery_client" }
axum_oauth2 = { path = "../lib/axum_oauth2" }
tokio = { version = "1.24", features = ["full"] }
tokio-tungstenite = { version = "0.18", features=["native-tls"] }
tokio-util = "0.7"
tokio = { version = "1.28", features = ["full"] }
tokio-tungstenite = { version = "0.19", features=["native-tls"] }
tokio-util = { version = "0.7"}
axum = { version = "0.6", features = ["ws", "json"] }
axum-extra = { version = "0.4", features = ["spa"] }
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["cors"] }
tower = { version = "0.4", features = ["timeout"] }
tower-http = { version = "0.4", features = ["fs", "cors"] }
slack = { package = "slack_client_rs", version = "0.0.8" }
mungos = "0.3.3"
futures-util = "0.3"
mungos = "0.3.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
dotenv = "0.15"
envy = "0.4"
anyhow = "1.0"
bcrypt = "0.13"
bcrypt = "0.14"
jwt = "0.16"
hmac = "0.12"
sha2 = "0.10"
async_timing_util = "0.1.14"
futures-util = "0.3"
diff-struct = "0.5"
typeshare = "1.0.0"
hex = "0.4"
typeshare = "1.0.1"
hex = "0.4"
aws-config = "0.55.2"
aws-sdk-ec2 = "0.27.0"
merge_config_files = "0.1.3"
termination_signal = "0.1.2"

View File

@@ -3,8 +3,8 @@ WORKDIR /builder
COPY ./core ./core
# COPY ./lib/types ./lib/types
# COPY ./lib/helpers ./lib/helpers
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
COPY ./lib/db_client ./lib/db_client
COPY ./lib/periphery_client ./lib/periphery_client

View File

@@ -1,19 +1,28 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use aws_sdk_ec2::Client;
use diff::Diff;
use futures_util::future::join_all;
use helpers::{all_logs_success, to_monitor_name};
use mungos::{doc, to_bson};
use mungos::mongodb::bson::{doc, to_bson};
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Build, Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, traits::Permissioned, AwsBuilderBuildConfig, Build, DockerContainerState,
Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget, Version,
};
use crate::{
auth::RequestUser,
helpers::{any_option_diff_is_some, option_diff_is_some},
cloud::aws::{
create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
},
helpers::empty_or_only_spaces,
state::State,
};
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
impl State {
pub async fn get_build_check_permissions(
&self,
@@ -32,25 +41,19 @@ impl State {
}
}
pub async fn build_busy(&self, id: &str) -> bool {
match self.build_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
pub async fn create_build(&self, name: &str, user: &RequestUser) -> anyhow::Result<Build> {
if !user.is_admin && !user.create_build_permissions {
return Err(anyhow!("user does not have permission to create builds"));
}
}
pub async fn create_build(
&self,
name: &str,
server_id: String,
user: &RequestUser,
) -> anyhow::Result<Build> {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let build = Build {
name: to_monitor_name(name),
server_id,
docker_organization: self
.config
.docker_organizations
.get(0)
.map(|d| d.to_string()),
aws_config: Some(AwsBuilderBuildConfig::default()),
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
@@ -84,10 +87,7 @@ impl State {
mut build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
build.id = self
.create_build(&build.name, build.server_id.clone(), user)
.await?
.id;
build.id = self.create_build(&build.name, user).await?.id;
let build = self.update_build(build, user).await?;
Ok(build)
}
@@ -96,32 +96,25 @@ impl State {
&self,
target_id: &str,
new_name: String,
new_server_id: String,
user: &RequestUser,
) -> anyhow::Result<Build> {
let mut build = self
.get_build_check_permissions(target_id, user, PermissionLevel::Update)
.await?;
build.name = new_name;
build.server_id = new_server_id;
build.version = Version::default();
let build = self.create_full_build(build, user).await?;
Ok(build)
}
pub async fn delete_build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Build> {
if self.build_busy(build_id).await {
if self.build_action_states.busy(build_id).await {
return Err(anyhow!("build busy"));
}
let build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let server = self.db.get_server(&build.server_id).await?;
let delete_repo_log = self
.periphery
.delete_repo(&server, &build.name)
.await
.context("failed at deleting repo")?;
self.db.builds.delete_one(build_id).await?;
let update = Update {
target: UpdateTarget::Build(build_id.to_string()),
@@ -129,13 +122,10 @@ impl State {
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![
delete_repo_log,
Log::simple(
"delete build",
format!("deleted build {} on server {}", build.name, server.name),
),
],
logs: vec![Log::simple(
"delete build",
format!("deleted build {}", build.name),
)],
success: true,
..Default::default()
};
@@ -148,21 +138,23 @@ impl State {
new_build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
if self.build_busy(&new_build.id).await {
if self.build_action_states.busy(&new_build.id).await {
return Err(anyhow!("build busy"));
}
let id = new_build.id.clone();
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
self.build_action_states
.update_entry(id.clone(), |entry| {
entry.updating = true;
})
.await;
let res = self.update_build_inner(new_build, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
self.build_action_states
.update_entry(id.clone(), |entry| {
entry.updating = false;
})
.await;
res
}
@@ -171,19 +163,39 @@ impl State {
mut new_build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
let start_ts = monitor_timestamp();
let current_build = self
.get_build_check_permissions(&new_build.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
if let Some(new_server_id) = &new_build.server_id {
if current_build.server_id.is_none()
|| new_server_id != current_build.server_id.as_ref().unwrap()
{
self.get_server_check_permissions(new_server_id, user, PermissionLevel::Update)
.await
.context("user does not have permission to attach build to this server")?;
}
}
// none of these should be changed through this method
new_build.name = current_build.name.clone();
new_build.permissions = current_build.permissions.clone();
new_build.server_id = current_build.server_id.clone();
new_build.last_built_at = String::new();
new_build.last_built_at = current_build.last_built_at.clone();
new_build.created_at = current_build.created_at.clone();
new_build.updated_at = start_ts.clone();
// filter out any build args that contain empty strings
// these could only happen by accident
new_build.docker_build_args = new_build.docker_build_args.map(|mut args| {
args.build_args = args
.build_args
.into_iter()
.filter(|a| !empty_or_only_spaces(&a.variable) && !empty_or_only_spaces(&a.value))
.collect();
args
});
self.db
.builds
.update_one(&new_build.id, mungos::Update::Regular(new_build.clone()))
@@ -192,71 +204,69 @@ impl State {
let diff = current_build.diff(&new_build);
let mut update = Update {
let update = Update {
operation: Operation::UpdateBuild,
target: UpdateTarget::Build(new_build.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"build update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
end_ts: Some(monitor_timestamp()),
success: true,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
// update.id = self.add_update(update.clone()).await?;
if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
|| option_diff_is_some(&diff.on_clone)
{
let server = self.db.get_server(&current_build.server_id).await?;
match self.periphery.clone_repo(&server, &new_build).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
}
Err(e) => update
.logs
.push(Log::error("cloning repo", format!("{e:#?}"))),
}
}
// if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
// || option_diff_is_some(&diff.on_clone)
// {
// let server = self.db.get_server(&current_build.server_id).await?;
// match self.periphery.clone_repo(&server, &new_build).await {
// Ok(clone_logs) => {
// update.logs.extend(clone_logs);
// }
// Err(e) => update
// .logs
// .push(Log::error("cloning repo", format!("{e:#?}"))),
// }
// }
update.end_ts = Some(monitor_timestamp());
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
// update.end_ts = Some(monitor_timestamp());
// update.success = all_logs_success(&update.logs);
// update.status = UpdateStatus::Complete;
self.update_update(update).await?;
self.add_update(update).await?;
Ok(new_build)
}
pub async fn build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
if self.build_busy(build_id).await {
if self.build_action_states.busy(build_id).await {
return Err(anyhow!("build busy"));
}
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = true;
}
self.build_action_states
.update_entry(build_id.to_string(), |entry| {
entry.building = true;
})
.await;
let res = self.build_inner(build_id, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = false;
}
self.build_action_states
.update_entry(build_id.to_string(), |entry| {
entry.building = false;
})
.await;
res
}
async fn build_inner(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
let mut build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.get_build_check_permissions(build_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&build.server_id).await?;
build.version.increment();
let mut update = Update {
target: UpdateTarget::Build(build_id.to_string()),
operation: Operation::BuildBuild,
@@ -267,12 +277,95 @@ impl State {
version: build.version.clone().into(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let (server, aws_client) = if let Some(server_id) = &build.server_id {
let server = self.db.get_server(server_id).await;
if let Err(e) = server {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update
.logs
.push(Log::error("get build server", format!("{e:#?}")));
self.update_update(update.clone()).await?;
return Err(e);
}
let server = Ec2Instance {
instance_id: String::new(),
server: server.unwrap(),
};
(server, None)
} else if build.aws_config.is_some() {
let start_ts = monitor_timestamp();
let res = self.create_ec2_instance_for_build(&build).await;
if let Err(e) = res {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update.logs.push(Log {
stage: "start build server".to_string(),
stderr: format!("{e:#?}"),
success: false,
start_ts,
end_ts: monitor_timestamp(),
..Default::default()
});
self.update_update(update).await?;
return Err(e);
}
let (server, aws_client, logs) = res.unwrap();
update.logs.extend(logs);
self.update_update(update.clone()).await?;
(server, aws_client)
} else {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update.logs.push(Log::error(
"start build",
"build has neither server_id nor aws_config attached".to_string(),
));
self.update_update(update).await?;
return Err(anyhow!(
"build has neither server_id or aws_config attached"
));
};
let clone_success = match self.periphery.clone_repo(&server.server, &build).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
all_logs_success(&update.logs)
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
if !clone_success {
let _ = self
.periphery
.delete_repo(&server.server, &build.name)
.await;
if let Some(aws_client) = aws_client {
self.terminate_ec2_instance(aws_client, &server, &mut update)
.await;
}
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
self.update_update(update.clone()).await?;
return Ok(update);
}
self.update_update(update.clone()).await?;
let build_logs = match self
.periphery
.build(&server, &build)
.build(&server.server, &build)
.await
.context("failed at call to periphery to build")
{
@@ -282,9 +375,9 @@ impl State {
match build_logs {
Some(logs) => {
let success = all_logs_success(&logs);
update.logs.extend(logs);
update.success = all_logs_success(&update.logs);
if update.success {
if success {
let _ = self
.db
.builds
@@ -305,73 +398,288 @@ impl State {
.push(Log::error("build", "builder busy".to_string()));
}
}
let _ = self
.periphery
.delete_repo(&server.server, &build.name)
.await;
if let Some(aws_client) = aws_client {
self.terminate_ec2_instance(aws_client, &server, &mut update)
.await;
}
self.handle_post_build_redeploy(build_id, &mut update).await;
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn reclone_build(
&self,
build_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.build_busy(build_id).await {
return Err(anyhow!("build busy"));
async fn handle_post_build_redeploy(&self, build_id: &str, update: &mut Update) {
let redeploy_deployments = self
.db
.deployments
.get_some(
doc! { "build_id": build_id, "redeploy_on_build": true },
None,
)
.await;
if let Ok(deployments) = redeploy_deployments {
let futures = deployments.into_iter().map(|d| async move {
let request_user = RequestUser {
id: "auto redeploy".to_string(),
is_admin: true,
..Default::default()
};
let state = self
.get_deployment_with_container_state(&request_user, &d.id)
.await
.map(|r| r.state)
.unwrap_or_default();
if state == DockerContainerState::Running {
Some((
d.id.clone(),
self.deploy_container(
&d.id,
&RequestUser {
id: "auto redeploy".to_string(),
is_admin: true,
..Default::default()
},
None,
None,
)
.await,
))
} else {
None
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
}
}
if redeploys.len() > 0 {
update.logs.push(Log::simple(
"redeploy",
format!("redeployed deployments: {}", redeploys.join(", ")),
))
}
if redeploy_failures.len() > 0 {
update.logs.push(Log::simple(
"redeploy failures",
redeploy_failures.join("\n"),
))
}
} else if let Err(e) = redeploy_deployments {
update.logs.push(Log::simple(
"redeploys failed",
format!("failed to get deployments to redeploy: {e:#?}"),
))
}
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.recloning = true;
}
let res = self.reclone_build_inner(build_id, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.recloning = false;
}
res
}
async fn reclone_build_inner(
async fn create_ec2_instance_for_build(
&self,
build_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.await?;
let server = self.db.get_server(&build.server_id).await?;
let mut update = Update {
target: UpdateTarget::Build(build_id.to_string()),
operation: Operation::RecloneBuild,
start_ts: monitor_timestamp(),
status: UpdateStatus::InProgress,
operator: user.id.clone(),
build: &Build,
) -> anyhow::Result<(Ec2Instance, Option<Client>, Vec<Log>)> {
if build.aws_config.is_none() {
return Err(anyhow!("build has no aws_config attached"));
}
let start_instance_ts = monitor_timestamp();
let aws_config = build.aws_config.as_ref().unwrap();
let region = aws_config
.region
.as_ref()
.unwrap_or(&self.config.aws.default_region)
.to_string();
let aws_client = create_ec2_client(
region,
&self.config.aws.access_key_id,
self.config.aws.secret_access_key.clone(),
)
.await;
let ami_name = aws_config
.ami_name
.as_ref()
.unwrap_or(&self.config.aws.default_ami_name);
let ami_id = &self
.config
.aws
.available_ami_accounts
.get(ami_name)
.ok_or(anyhow!("no ami id associated with ami name {ami_name}"))?
.ami_id;
let instance_type = aws_config
.instance_type
.as_ref()
.unwrap_or(&self.config.aws.default_instance_type);
let subnet_id = aws_config
.subnet_id
.as_ref()
.unwrap_or(&self.config.aws.default_subnet_id);
let security_group_ids = aws_config
.security_group_ids
.as_ref()
.unwrap_or(&self.config.aws.default_security_group_ids)
.to_owned();
let readable_sec_group_ids = security_group_ids.join(", ");
let volume_size_gb = *aws_config
.volume_gb
.as_ref()
.unwrap_or(&self.config.aws.default_volume_gb);
let key_pair_name = aws_config
.key_pair_name
.as_ref()
.unwrap_or(&self.config.aws.default_key_pair_name);
let assign_public_ip = *aws_config
.assign_public_ip
.as_ref()
.unwrap_or(&self.config.aws.default_assign_public_ip);
let instance = create_instance_with_ami(
&aws_client,
&format!("BUILDER-{}-v{}", build.name, build.version.to_string()),
ami_id,
instance_type,
subnet_id,
security_group_ids,
volume_size_gb,
key_pair_name,
assign_public_ip,
)
.await?;
let instance_id = &instance.instance_id;
let start_log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: format!("instance id: {instance_id}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_size_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}"),
start_ts: start_instance_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
update.success = match self.periphery.clone_repo(&server, &build).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
let start_connect_ts = monitor_timestamp();
let mut res = Ok(String::new());
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = self.periphery.get_version(&instance.server).await;
if let Ok(version) = version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!("established contact with periphery on builder\nperiphery version: v{version}"),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
return Ok((instance, Some(aws_client), vec![start_log, connect_log]));
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)).await;
}
let _ = terminate_ec2_instance(&aws_client, &instance.instance_id).await;
Err(anyhow!(
"unable to reach periphery agent on build server\n{res:#?}"
))
}
async fn terminate_ec2_instance(
&self,
aws_client: Client,
server: &Ec2Instance,
update: &mut Update,
) {
let res = terminate_ec2_instance(&aws_client, &server.instance_id).await;
if let Err(e) = res {
update
.logs
.push(Log::error("terminate instance", format!("{e:#?}")))
} else {
update.logs.push(Log::simple(
"terminate instance",
format!("terminate instance id {}", server.instance_id),
))
}
}
// pub async fn reclone_build(
// &self,
// build_id: &str,
// user: &RequestUser,
// ) -> anyhow::Result<Update> {
// if self.build_busy(build_id).await {
// return Err(anyhow!("build busy"));
// }
// {
// let mut lock = self.build_action_states.lock().await;
// let entry = lock.entry(build_id.to_string()).or_default();
// entry.recloning = true;
// }
// let res = self.reclone_build_inner(build_id, user).await;
// {
// let mut lock = self.build_action_states.lock().await;
// let entry = lock.entry(build_id.to_string()).or_default();
// entry.recloning = false;
// }
// res
// }
// async fn reclone_build_inner(
// &self,
// build_id: &str,
// user: &RequestUser,
// ) -> anyhow::Result<Update> {
// let build = self
// .get_build_check_permissions(build_id, user, PermissionLevel::Update)
// .await?;
// let server = self.db.get_server(&build.server_id).await?;
// let mut update = Update {
// target: UpdateTarget::Build(build_id.to_string()),
// operation: Operation::RecloneBuild,
// start_ts: monitor_timestamp(),
// status: UpdateStatus::InProgress,
// operator: user.id.clone(),
// success: true,
// ..Default::default()
// };
// update.id = self.add_update(update.clone()).await?;
// update.success = match self.periphery.clone_repo(&server, &build).await {
// Ok(clone_logs) => {
// update.logs.extend(clone_logs);
// true
// }
// Err(e) => {
// update
// .logs
// .push(Log::error("clone repo", format!("{e:#?}")));
// false
// }
// };
// update.status = UpdateStatus::Complete;
// update.end_ts = Some(monitor_timestamp());
// self.update_update(update.clone()).await?;
// Ok(update)
// }
}

238
core/src/actions/command.rs Normal file
View File

@@ -0,0 +1,238 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::all_logs_success;
use types::{
monitor_timestamp, traits::Permissioned, Log, Operation, PeripheryCommand,
PeripheryCommandBuilder, PermissionLevel, Update, UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_command_check_permissions(
&self,
command_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<PeripheryCommand> {
let command = self.db.get_command(command_id).await?;
let permissions = command.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(command)
} else {
Err(anyhow!(
"user does not have required permissions on this command"
))
}
}
pub async fn create_command(
&self,
name: &str,
server_id: String,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let command = PeripheryCommandBuilder::default()
.name(name.to_string())
.server_id(server_id)
.build()
.context("failed to build command")?;
let command_id = self
.db
.commands
.create_one(command)
.await
.context("failed at adding command to db")?;
let command = self.db.get_command(&command_id).await?;
let update = Update {
target: UpdateTarget::Command(command_id),
operation: Operation::CreateCommand,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(command)
}
pub async fn create_full_command(
&self,
mut command: PeripheryCommand,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
command.id = self
.create_command(&command.name, command.server_id.clone(), user)
.await?
.id;
let command = self.update_command(command, user).await?;
Ok(command)
}
pub async fn copy_command(
&self,
target_id: &str,
new_name: String,
new_server_id: String,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
let mut command = self
.get_command_check_permissions(target_id, user, PermissionLevel::Update)
.await?;
command.name = new_name;
command.server_id = new_server_id;
let command = self.create_full_command(command, user).await?;
Ok(command)
}
pub async fn delete_command(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
if self.command_action_states.busy(command_id).await {
return Err(anyhow!("command busy"));
}
let command = self
.get_command_check_permissions(command_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db.commands.delete_one(command_id).await?;
let update = Update {
target: UpdateTarget::Command(command_id.to_string()),
operation: Operation::DeleteCommand,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete command",
format!("deleted command {}", command.name),
)],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(command)
}
pub async fn update_command(
&self,
mut new_command: PeripheryCommand,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
let current_command = self
.get_command_check_permissions(&new_command.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
// none of these should be changed through this method
new_command.permissions = current_command.permissions.clone();
new_command.server_id = current_command.server_id.clone();
new_command.created_at = current_command.created_at.clone();
new_command.updated_at = start_ts.clone();
self.db
.commands
.update_one(
&new_command.id,
mungos::Update::Regular(new_command.clone()),
)
.await
.context("failed at update one command")?;
let diff = current_command.diff(&new_command);
let update = Update {
operation: Operation::UpdateCommand,
target: UpdateTarget::Command(new_command.id.clone()),
start_ts,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"command update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update.clone()).await?;
self.update_update(update).await?;
Ok(new_command)
}
pub async fn run_command(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.command_action_states.busy(command_id).await {
return Err(anyhow!("command busy"));
}
self.command_action_states
.update_entry(command_id.to_string(), |entry| {
entry.running = true;
})
.await;
let res = self.run_command_inner(command_id, user).await;
self.command_action_states
.update_entry(command_id.to_string(), |entry| {
entry.running = false;
})
.await;
res
}
async fn run_command_inner(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let command = self
.get_command_check_permissions(command_id, user, PermissionLevel::Execute)
.await?;
if command.command.path.is_empty() || command.command.command.is_empty() {
return Err(anyhow!("command or path is empty, aborting"));
}
let server = self.db.get_server(&command.server_id).await?;
let mut update = Update {
target: UpdateTarget::Command(command_id.to_string()),
operation: Operation::RunCommand,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
match self.periphery.run_command(&server, &command.command).await {
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
}
}
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,15 +1,16 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::{all_logs_success, to_monitor_name};
use mungos::mongodb::bson::doc;
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Deployment, Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, traits::Permissioned, Deployment, DeploymentBuilder,
DeploymentWithContainerState, DockerContainerState, Log, Operation, PermissionLevel,
ServerStatus, ServerWithStatus, TerminationSignal, Update, UpdateStatus, UpdateTarget,
};
use crate::{
auth::RequestUser,
helpers::{any_option_diff_is_some, option_diff_is_some},
helpers::{any_option_diff_is_some, empty_or_only_spaces, get_image_name, option_diff_is_some},
state::State,
};
@@ -31,13 +32,6 @@ impl State {
}
}
pub async fn deployment_busy(&self, id: &str) -> bool {
match self.deployment_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_deployment(
&self,
name: &str,
@@ -47,16 +41,18 @@ impl State {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let deployment = Deployment {
name: to_monitor_name(name),
server_id,
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let mut deployment = DeploymentBuilder::default()
.name(to_monitor_name(name))
.server_id(server_id)
.build()
.context("failed to build deployment")?;
deployment.permissions = [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect();
deployment.created_at = start_ts.clone();
deployment.updated_at = start_ts.clone();
let deployment_id = self
.db
.deployments
@@ -110,8 +106,10 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
let deployment = self
@@ -119,10 +117,14 @@ impl State {
.await?;
let start_ts = monitor_timestamp();
let server = self.db.get_server(&deployment.server_id).await?;
let log = self
let log = match self
.periphery
.container_remove(&server, &deployment.name)
.await?;
.container_remove(&server, &deployment.name, stop_signal, stop_time)
.await
{
Ok(log) => log,
Err(e) => Log::error("destroy container", format!("{e:#?}")),
};
self.db
.deployments
.delete_one(deployment_id)
@@ -158,21 +160,25 @@ impl State {
new_deployment: Deployment,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(&new_deployment.id).await {
if self.deployment_action_states.busy(&new_deployment.id).await {
return Err(anyhow!("deployment busy"));
}
let id = new_deployment.id.clone();
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
self.deployment_action_states
.update_entry(id.clone(), |entry| {
entry.updating = true;
})
.await;
let res = self.update_deployment_inner(new_deployment, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
self.deployment_action_states
.update_entry(id.clone(), |entry| {
entry.updating = false;
})
.await;
res
}
@@ -193,6 +199,33 @@ impl State {
new_deployment.created_at = current_deployment.created_at.clone();
new_deployment.updated_at = start_ts.clone();
// filter out any volumes, ports, env vars, extra args which are or contain empty strings
// these could only happen by accident
new_deployment.docker_run_args.volumes = new_deployment
.docker_run_args
.volumes
.into_iter()
.filter(|v| !empty_or_only_spaces(&v.local) && !empty_or_only_spaces(&v.container))
.collect();
new_deployment.docker_run_args.ports = new_deployment
.docker_run_args
.ports
.into_iter()
.filter(|p| !empty_or_only_spaces(&p.local) && !empty_or_only_spaces(&p.container))
.collect();
new_deployment.docker_run_args.environment = new_deployment
.docker_run_args
.environment
.into_iter()
.filter(|e| !empty_or_only_spaces(&e.variable) && !empty_or_only_spaces(&e.value))
.collect();
new_deployment.docker_run_args.extra_args = new_deployment
.docker_run_args
.extra_args
.into_iter()
.filter(|a| a.len() != 0)
.collect();
self.db
.deployments
.update_one(
@@ -243,25 +276,198 @@ impl State {
Ok(new_deployment)
}
pub async fn rename_deployment(
&self,
deployment_id: &str,
new_name: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.renaming = true;
})
.await;
let res = self
.rename_deployment_inner(deployment_id, new_name, user)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.renaming = false;
})
.await;
res
}
async fn rename_deployment_inner(
&self,
deployment_id: &str,
new_name: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Update)
.await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::RenameDeployment,
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.to_string(),
success: true,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let server_with_status = self.get_server(&deployment.server_id, user).await;
if server_with_status.is_err() {
update.logs.push(Log::error(
"get server",
format!(
"failed to get server info: {:?}",
server_with_status.as_ref().err().unwrap()
),
));
update.status = UpdateStatus::Complete;
update.end_ts = monitor_timestamp().into();
update.success = false;
self.update_update(update).await?;
return Err(server_with_status.err().unwrap());
}
let ServerWithStatus { server, status } = server_with_status.unwrap();
if status != ServerStatus::Ok {
update.logs.push(Log::error(
"check server status",
String::from("cannot rename deployment when periphery is disabled or unreachable"),
));
update.status = UpdateStatus::Complete;
update.end_ts = monitor_timestamp().into();
update.success = false;
self.update_update(update).await?;
return Err(anyhow!(
"cannot rename deployment when periphery is disabled or unreachable"
));
}
let deployment_state = self
.get_deployment_with_container_state(user, deployment_id)
.await;
if deployment_state.is_err() {
update.logs.push(Log::error(
"check deployment status",
format!(
"could not get current state of deployment: {:?}",
deployment_state.as_ref().err().unwrap()
),
));
update.status = UpdateStatus::Complete;
update.end_ts = monitor_timestamp().into();
update.success = false;
self.update_update(update).await?;
return Err(deployment_state.err().unwrap());
}
let DeploymentWithContainerState {
deployment, state, ..
} = deployment_state.unwrap();
if state != DockerContainerState::NotDeployed {
let log = self
.periphery
.container_rename(&server, &deployment.name, new_name)
.await;
if log.is_err() {
update.logs.push(Log::error(
"rename container",
format!("{:?}", log.as_ref().err().unwrap()),
));
update.status = UpdateStatus::Complete;
update.end_ts = monitor_timestamp().into();
update.success = false;
self.update_update(update).await?;
return Err(log.err().unwrap());
}
let log = log.unwrap();
if !log.success {
update.logs.push(log);
update.status = UpdateStatus::Complete;
update.end_ts = monitor_timestamp().into();
update.success = false;
self.update_update(update).await?;
return Err(anyhow!("rename container on periphery not successful"));
}
update.logs.push(log);
}
let res = self
.db
.deployments
.update_one(
deployment_id,
mungos::Update::<()>::Set(
doc! { "name": to_monitor_name(new_name), "updated_at": monitor_timestamp() },
),
)
.await
.context("failed to update deployment name on mongo");
if let Err(e) = res {
update
.logs
.push(Log::error("mongo update", format!("{e:?}")));
} else {
update.logs.push(Log::simple(
"mongo update",
String::from("updated name on mongo"),
))
}
if deployment.repo.is_some() {
let res = self.reclone_deployment(deployment_id, user, false).await;
if let Err(e) = res {
update
.logs
.push(Log::error("reclone repo", format!("{e:?}")));
} else {
update.logs.push(Log::simple(
"reclone repo",
"deployment repo cloned with new name".to_string(),
));
}
}
update.end_ts = monitor_timestamp().into();
update.status = UpdateStatus::Complete;
update.success = all_logs_success(&update.logs);
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn reclone_deployment(
&self,
deployment_id: &str,
user: &RequestUser,
check_deployment_busy: bool,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if check_deployment_busy && self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.recloning = true;
})
.await;
let res = self.reclone_deployment_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.recloning = false;
})
.await;
res
}
@@ -285,19 +491,18 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
update.success = match self.periphery.clone_repo(&server, &deployment).await {
match self.periphery.clone_repo(&server, &deployment).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
@@ -310,21 +515,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = true;
}
let res = self.deploy_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.deploying = true;
})
.await;
let res = self
.deploy_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.deploying = false;
})
.await;
res
}
@@ -332,6 +543,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let mut deployment = self
@@ -339,14 +552,12 @@ impl State {
.await?;
let version = if let Some(build_id) = &deployment.build_id {
let build = self.db.get_build(build_id).await?;
let image = if let Some(docker_account) = &build.docker_account {
if deployment.docker_run_args.docker_account.is_none() {
let image = get_image_name(&build);
if deployment.docker_run_args.docker_account.is_none() {
if let Some(docker_account) = &build.docker_account {
deployment.docker_run_args.docker_account = Some(docker_account.to_string())
}
format!("{docker_account}/{}", to_monitor_name(&build.name))
} else {
to_monitor_name(&build.name)
};
};
}
let version = if let Some(version) = &deployment.build_version {
version.clone()
} else {
@@ -371,7 +582,14 @@ impl State {
update.id = self.add_update(update.clone()).await?;
let deploy_log = match self.periphery.deploy(&server, &deployment).await {
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let deploy_log = match self
.periphery
.deploy(&server, &deployment, stop_signal, stop_time)
.await
{
Ok(log) => log,
Err(e) => Log::error("deploy container", format!("{e:#?}")),
};
@@ -391,20 +609,22 @@ impl State {
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.starting = true;
})
.await;
let res = self.start_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.starting = false;
})
.await;
res
}
@@ -460,21 +680,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = true;
}
let res = self.stop_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.stopping = true;
})
.await;
let res = self
.stop_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.stopping = false;
})
.await;
res
}
@@ -482,6 +708,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
@@ -499,9 +727,12 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let log = self
.periphery
.container_stop(&server, &deployment.name)
.container_stop(&server, &deployment.name, stop_signal, stop_time)
.await;
update.success = match log {
@@ -530,21 +761,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = true;
}
let res = self.remove_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.removing = true;
})
.await;
let res = self
.remove_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.removing = false;
})
.await;
res
}
@@ -552,6 +789,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
@@ -569,9 +808,12 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let log = self
.periphery
.container_remove(&server, &deployment.name)
.container_remove(&server, &deployment.name, stop_signal, stop_time)
.await;
update.success = match log {
@@ -601,20 +843,22 @@ impl State {
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.pulling = true;
})
.await;
let res = self.pull_deployment_repo_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.pulling = false;
})
.await;
res
}

View File

@@ -11,17 +11,17 @@ use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_group_check_permissions(
&self,
deployment_id: &str,
group_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Group> {
let group = self.db.get_group(deployment_id).await?;
let group = self.db.get_group(group_id).await?;
let permissions = group.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(group)
} else {
Err(anyhow!(
"user does not have required permissions on this deployment"
"user does not have required permissions on this group"
))
}
}

View File

@@ -4,6 +4,7 @@ use types::Update;
use crate::state::State;
mod build;
mod command;
mod deployment;
mod group;
mod procedure;

View File

@@ -125,7 +125,7 @@ impl State {
} in &new_procedure.stages
{
match operation {
BuildBuild | RecloneBuild => {
BuildBuild => {
self.get_build_check_permissions(&target_id, user, PermissionLevel::Execute)
.await?;
}
@@ -209,7 +209,7 @@ impl State {
}
StopContainer => {
let update = self
.stop_container(&target_id, user)
.stop_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at stop container for deployment (id: {target_id})"
@@ -218,7 +218,7 @@ impl State {
}
RemoveContainer => {
let update = self
.remove_container(&target_id, user)
.remove_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at remove container for deployment (id: {target_id})"
@@ -227,7 +227,7 @@ impl State {
}
DeployContainer => {
let update = self
.deploy_container(&target_id, user)
.deploy_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at deploy container for deployment (id: {target_id})"
@@ -236,14 +236,18 @@ impl State {
}
RecloneDeployment => {
let update = self
.reclone_deployment(&target_id, user)
.reclone_deployment(&target_id, user, true)
.await
.context(format!("failed at reclone deployment (id: {target_id})"))?;
updates.push(update);
}
PullDeployment => {
// implement this one
// let update = self.pull
let update = self
.pull_deployment_repo(&target_id, user)
.await
.context(format!("failed at pull deployment (id: {target_id})"))?;
updates.push(update);
}
// build
BuildBuild => {
@@ -253,13 +257,6 @@ impl State {
.context(format!("failed at build (id: {target_id})"))?;
updates.push(update);
}
RecloneBuild => {
let update = self
.reclone_build(&target_id, user)
.await
.context(format!("failed at reclone build (id: {target_id})"))?;
updates.push(update);
}
// server
PruneImagesServer => {
let update = self.prune_images(&target_id, user).await.context(format!(

View File

@@ -1,10 +1,10 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::to_monitor_name;
use futures_util::future::join_all;
use mungos::mongodb::bson::doc;
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Log, Operation, PermissionLevel, Server, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, traits::Permissioned, Log, Operation, PermissionLevel, Server, Update,
UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
@@ -27,13 +27,6 @@ impl State {
}
}
pub async fn server_busy(&self, id: &str) -> bool {
match self.server_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_server(
&self,
name: &str,
@@ -47,7 +40,7 @@ impl State {
}
let start_ts = monitor_timestamp();
let server = Server {
name: to_monitor_name(name),
name: name.to_string(),
address,
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
@@ -95,28 +88,66 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db.servers.delete_one(&server_id).await?;
let update = Update {
let mut update = Update {
target: UpdateTarget::Server(server_id.to_string()),
operation: Operation::DeleteServer,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete server",
format!("deleted server {}", server.name),
)],
success: true,
status: UpdateStatus::InProgress,
..Default::default()
};
self.add_update(update).await?;
update.id = self.add_update(update.clone()).await?;
let res = {
let delete_deployments = self
.db
.deployments
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
let delete_builds = self
.db
.builds
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
let update_groups = self
.db
.groups
.update_many(doc! {}, doc! { "$pull": { "servers": server_id } });
let (dep_res, build_res, group_res) = tokio::join!(
join_all(delete_deployments),
join_all(delete_builds),
update_groups
);
dep_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
build_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
group_res?;
self.db.servers.delete_one(&server_id).await?;
anyhow::Ok(())
};
let log = match res {
Ok(_) => Log::simple("delete server", format!("deleted server {}", server.name)),
Err(e) => Log::error("delete server", format!("failed to delete server\n{e:#?}")),
};
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
update.success = log.success;
update.logs.push(log);
self.update_update(update).await?;
Ok(server)
}
@@ -125,7 +156,7 @@ impl State {
mut new_server: Server,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(&new_server.id).await {
if self.server_action_states.busy(&new_server.id).await {
return Err(anyhow!("server busy"));
}
let current_server = self
@@ -169,20 +200,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_networks = true;
})
.await;
let res = self.prune_networks_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_networks = false;
})
.await;
res
}
@@ -230,20 +263,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_images = true;
})
.await;
let res = self.prune_images_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_images = false;
})
.await;
res
}
@@ -292,20 +327,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_containers = true;
})
.await;
let res = self.prune_containers_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_containers = false;
})
.await;
res
}

View File

@@ -1,18 +1,27 @@
use std::{cmp::Ordering, collections::HashMap};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use futures_util::TryStreamExt;
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, FindOptions, Serialize};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOptions,
};
use serde::{Deserialize, Serialize};
use types::{
traits::Permissioned, Build, BuildActionState, BuildVersionsReponse, Operation,
PermissionLevel, UpdateStatus,
monitor_ts_from_unix, traits::Permissioned, unix_from_monitor_ts, AwsBuilderConfig, Build,
BuildActionState, BuildVersionsReponse, Operation, PermissionLevel, UpdateStatus,
};
use typeshare::typeshare;
const NUM_VERSIONS_PER_PAGE: u64 = 10;
const ONE_DAY_MS: i64 = 86400000;
use crate::{
auth::{RequestUser, RequestUserExtension},
@@ -31,14 +40,12 @@ struct BuildId {
#[derive(Serialize, Deserialize)]
struct CreateBuildBody {
name: String,
server_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct CopyBuildBody {
name: String,
server_id: String,
}
#[typeshare]
@@ -51,12 +58,35 @@ pub struct BuildVersionsQuery {
patch: Option<i32>,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct BuildStatsQuery {
#[serde(default)]
page: u32,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct BuildStatsResponse {
pub total_time: f64, // in hours
pub total_count: f64, // number of builds
pub days: Vec<BuildStatsDay>,
}
#[typeshare]
#[derive(Serialize, Deserialize, Default)]
pub struct BuildStatsDay {
pub time: f64,
pub count: f64,
pub ts: f64,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = state
@@ -70,7 +100,7 @@ pub fn router() -> Router {
.route(
"/list",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let builds = state
@@ -84,11 +114,11 @@ pub fn router() -> Router {
.route(
"/create",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<CreateBuildBody>| async move {
let build = state
.create_build(&build.name, build.server_id, &user)
.create_build(&build.name, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(build))
@@ -98,7 +128,7 @@ pub fn router() -> Router {
.route(
"/create_full",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
@@ -115,13 +145,13 @@ pub fn router() -> Router {
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>,
Json(build): Json<CopyBuildBody>| async move {
let build = spawn_request_action(async move {
state
.copy_build(&id, build.name, build.server_id, &user)
.copy_build(&id, build.name, &user)
.await
.map_err(handle_anyhow_error)
})
@@ -133,7 +163,7 @@ pub fn router() -> Router {
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = spawn_request_action(async move {
@@ -150,7 +180,7 @@ pub fn router() -> Router {
.route(
"/update",
patch(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
@@ -167,7 +197,7 @@ pub fn router() -> Router {
.route(
"/:id/build",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let update = spawn_request_action(async move {
@@ -181,27 +211,10 @@ pub fn router() -> Router {
},
),
)
.route(
"/:id/reclone",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let update = spawn_request_action(async move {
state
.reclone_build(&build_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>| async move {
let action_state = state
@@ -215,7 +228,7 @@ pub fn router() -> Router {
.route(
"/:id/versions",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }),
Query(query): Query<BuildVersionsQuery>| async move {
@@ -227,6 +240,29 @@ pub fn router() -> Router {
},
),
)
.route(
"/aws_builder_defaults",
get(|state: StateExtension| async move {
Json(AwsBuilderConfig {
access_key_id: String::new(),
secret_access_key: String::new(),
..state.config.aws.clone()
})
}),
)
.route(
"/docker_organizations",
get(|state: StateExtension| async move {
Json(state.config.docker_organizations.clone())
}),
)
.route(
"/stats",
get(|state: StateExtension, query: Query<BuildStatsQuery>| async move {
let stats = state.get_build_stats(query.page).await.map_err(handle_anyhow_error)?;
response!(Json(stats))
}),
)
}
impl State {
@@ -261,17 +297,11 @@ impl State {
) -> anyhow::Result<BuildActionState> {
self.get_build_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.build_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.build_action_states.get_or_default(id).await;
Ok(action_state)
}
pub async fn get_build_versions(
async fn get_build_versions(
&self,
id: &str,
user: &RequestUser,
@@ -320,4 +350,86 @@ impl State {
.collect();
Ok(versions)
}
async fn get_build_stats(&self, page: u32) -> anyhow::Result<BuildStatsResponse> {
let curr_ts = unix_timestamp_ms() as i64;
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
let close_ts = next_day - page as i64 * 30 * ONE_DAY_MS;
let open_ts = close_ts - 30 * ONE_DAY_MS;
let mut build_updates = self
.db
.updates
.collection
.find(
doc! {
"start_ts": {
"$gte": monitor_ts_from_unix(open_ts)
.context("open_ts out of bounds")?,
"$lt": monitor_ts_from_unix(close_ts)
.context("close_ts out of bounds")?
},
"operation": Operation::BuildBuild.to_string(),
},
None,
)
.await?;
let mut days = HashMap::<i64, BuildStatsDay>::with_capacity(32);
let mut curr = open_ts;
while curr < close_ts {
let stats = BuildStatsDay {
ts: curr as f64,
..Default::default()
};
days.insert(curr, stats);
curr += ONE_DAY_MS;
}
while let Some(update) = build_updates.try_next().await? {
if let Some(end_ts) = update.end_ts {
let start_ts = unix_from_monitor_ts(&update.start_ts)
.context("failed to parse update start_ts")?;
let end_ts =
unix_from_monitor_ts(&end_ts).context("failed to parse update end_ts")?;
let day = start_ts - start_ts % ONE_DAY_MS;
let mut entry = days.entry(day).or_default();
entry.count += 1.0;
entry.time += ms_to_hour(end_ts - start_ts);
}
}
Ok(BuildStatsResponse::new(days.into_values().collect()))
}
}
impl BuildStatsResponse {
fn new(mut days: Vec<BuildStatsDay>) -> BuildStatsResponse {
days.sort_by(|a, b| {
if a.ts < b.ts {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut total_time = 0.0;
let mut total_count = 0.0;
for day in &days {
total_time += day.time;
total_count += day.count;
}
BuildStatsResponse {
total_time,
total_count,
days,
}
}
}
const MS_TO_HOUR_DIVISOR: f64 = 1000.0 * 60.0 * 60.0;
fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}

220
core/src/api/command.rs Normal file
View File

@@ -0,0 +1,220 @@
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, CommandActionState, PeripheryCommand, PermissionLevel};
use typeshare::typeshare;
use crate::{
api::spawn_request_action,
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
#[derive(Serialize, Deserialize)]
pub struct CommandId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CreateCommandBody {
name: String,
server_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CopyCommandBody {
name: String,
server_id: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let command = state
.get_command_check_permissions(&id, &user, PermissionLevel::Read)
.await
.map_err(handle_anyhow_error)?;
response!(Json(command))
},
),
)
.route(
"/list",
get(
|state: StateExtension,
user: RequestUserExtension,
Query(query): Query<Document>| async move {
let commands = state
.list_commands(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(commands))
},
),
)
.route(
"/create",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<CreateCommandBody>| async move {
let command = state
.create_command(&command.name, command.server_id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(command))
},
),
)
.route(
"/create_full",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<PeripheryCommand>| async move {
let command = spawn_request_action(async move {
state
.create_full_command(command, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/copy",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id }),
Json(command): Json<CopyCommandBody>| async move {
let command = spawn_request_action(async move {
state
.copy_command(&id, command.name, command.server_id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/delete",
delete(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let build = spawn_request_action(async move {
state
.delete_command(&id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/update",
patch(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<PeripheryCommand>| async move {
let command = spawn_request_action(async move {
state
.update_command(command, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/action_state",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let action_state = state
.get_command_action_states(id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(action_state))
},
),
)
.route(
"/:id/run",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let update = spawn_request_action(async move {
state
.run_command(&id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
}
impl State {
async fn list_commands(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<PeripheryCommand>> {
let commands: Vec<PeripheryCommand> = self
.db
.commands
.get_some(query, None)
.await
.context("failed at get all commands query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
Ok(commands)
}
async fn get_command_action_states(
&self,
id: String,
user: &RequestUser,
) -> anyhow::Result<CommandActionState> {
self.get_command_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self.command_action_states.get_or_default(id).await;
Ok(action_state)
}
}

View File

@@ -4,14 +4,19 @@ use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
Json, Router,
};
use futures_util::future::join_all;
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOneOptions,
};
use serde::{Deserialize, Serialize};
use types::{
traits::Permissioned, Deployment, DeploymentActionState, DeploymentWithContainerState,
DockerContainerState, DockerContainerStats, Log, PermissionLevel, Server,
DockerContainerState, DockerContainerStats, Log, Operation, PermissionLevel, Server,
TerminationSignal, UpdateStatus,
};
use typeshare::typeshare;
@@ -42,22 +47,35 @@ pub struct CopyDeploymentBody {
server_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct RenameDeploymentBody {
new_name: String,
}
#[typeshare]
#[derive(Deserialize)]
pub struct GetContainerLogQuery {
tail: Option<u32>,
}
#[typeshare]
#[derive(Deserialize)]
pub struct StopContainerQuery {
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let res = state
.get_deployment_with_container_state(&user, &deployment_id.id)
.get_deployment_with_container_state(&user, &id)
.await
.map_err(handle_anyhow_error)?;
response!(Json(res))
@@ -67,8 +85,8 @@ pub fn router() -> Router {
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Query(query): Query<Document>| async move {
let deployments = state
.list_deployments_with_container_state(&user, query)
@@ -81,8 +99,8 @@ pub fn router() -> Router {
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(deployment): Json<CreateDeploymentBody>| async move {
let deployment = state
.create_deployment(&deployment.name, deployment.server_id, &user)
@@ -95,8 +113,8 @@ pub fn router() -> Router {
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(full_deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
@@ -112,9 +130,9 @@ pub fn router() -> Router {
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Json(deployment): Json<CopyDeploymentBody>| async move {
let deployment = spawn_request_action(async move {
state
@@ -130,12 +148,13 @@ pub fn router() -> Router {
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId{ id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let deployment = spawn_request_action(async move {
state
.delete_deployment(&deployment_id.id, &user)
.delete_deployment(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -147,8 +166,8 @@ pub fn router() -> Router {
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
@@ -162,14 +181,32 @@ pub fn router() -> Router {
),
)
.route(
"/:id/reclone",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
"/:id/rename",
patch(
|state: StateExtension,
user: RequestUserExtension,
deployment: Path<DeploymentId>,
body: Json<RenameDeploymentBody>| async move {
let update = spawn_request_action(async move {
state
.reclone_deployment(&deployment_id.id, &user)
.rename_deployment(&deployment.id, &body.new_name, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/reclone",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.reclone_deployment(&id, &user, true)
.await
.map_err(handle_anyhow_error)
})
@@ -181,12 +218,13 @@ pub fn router() -> Router {
.route(
"/:id/deploy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.deploy_container(&deployment_id.id, &user)
.deploy_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -198,12 +236,12 @@ pub fn router() -> Router {
.route(
"/:id/start_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.start_container(&deployment_id.id, &user)
.start_container(&id, &user)
.await
.map_err(handle_anyhow_error)
})
@@ -215,12 +253,13 @@ pub fn router() -> Router {
.route(
"/:id/stop_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.stop_container(&deployment_id.id, &user)
.stop_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -232,12 +271,13 @@ pub fn router() -> Router {
.route(
"/:id/remove_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.remove_container(&deployment_id.id, &user)
.remove_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -249,12 +289,12 @@ pub fn router() -> Router {
.route(
"/:id/pull",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.pull_deployment_repo(&deployment_id.id, &user)
.pull_deployment_repo(&id, &user)
.await
.map_err(handle_anyhow_error)
})
@@ -266,8 +306,8 @@ pub fn router() -> Router {
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>| async move {
let action_state = state
.get_deployment_action_states(id, &user)
@@ -280,12 +320,12 @@ pub fn router() -> Router {
.route(
"/:id/log",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(query): Query<GetContainerLogQuery>| async move {
let log = state
.get_deployment_container_log(&deployment_id.id, &user, query.tail)
.get_deployment_container_log(&id, &user, query.tail)
.await
.map_err(handle_anyhow_error)?;
response!(Json(log))
@@ -295,21 +335,35 @@ pub fn router() -> Router {
.route(
"/:id/stats",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let stats = state
.get_deployment_container_stats(&deployment_id.id, &user)
.get_deployment_container_stats(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/deployed_version",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let version = state
.get_deployment_deployed_version(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(version)
},
),
)
}
impl State {
async fn get_deployment_with_container_state(
pub async fn get_deployment_with_container_state(
&self,
user: &RequestUser,
id: &str,
@@ -401,13 +455,7 @@ impl State {
) -> anyhow::Result<DeploymentActionState> {
self.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.deployment_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.deployment_action_states.get_or_default(id).await;
Ok(action_state)
}
@@ -443,4 +491,53 @@ impl State {
.await?;
Ok(stats)
}
async fn get_deployment_deployed_version(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<String> {
let deployment = self
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
if deployment.build_id.is_some() {
let latest_deploy_update = self
.db
.updates
.find_one(
doc! {
"target": {
"type": "Deployment",
"id": id
},
"operation": Operation::DeployContainer.to_string(),
"status": UpdateStatus::Complete.to_string(),
"success": true,
},
FindOneOptions::builder().sort(doc! { "_id": -1 }).build(),
)
.await
.context("failed at query to get latest deploy update from mongo")?;
if let Some(update) = latest_deploy_update {
if let Some(version) = update.version {
Ok(version.to_string())
} else {
Ok("unknown".to_string())
}
} else {
Ok("unknown".to_string())
}
} else {
let split = deployment
.docker_run_args
.image
.split(':')
.collect::<Vec<&str>>();
if let Some(version) = split.get(1) {
Ok(version.to_string())
} else {
Ok("unknown".to_string())
}
}
}
}

View File

@@ -4,7 +4,7 @@ use axum_oauth2::random_duration;
use helpers::handle_anyhow_error;
use hex::ToHex;
use hmac::{Hmac, Mac};
use mungos::Deserialize;
use serde::Deserialize;
use sha2::Sha256;
use types::GITHUB_WEBHOOK_USER_ID;
@@ -74,6 +74,7 @@ impl State {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;
@@ -103,6 +104,7 @@ impl State {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;
@@ -127,6 +129,7 @@ impl State {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;

View File

@@ -5,7 +5,8 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, Group, PermissionLevel};
use typeshare::typeshare;

View File

@@ -1,54 +1,102 @@
use anyhow::Context;
use anyhow::{anyhow, Context};
use axum::{
body::Body,
extract::Path,
http::{Request, StatusCode},
middleware,
routing::get,
routing::{get, post},
Extension, Json, Router,
};
use futures_util::Future;
use helpers::handle_anyhow_error;
use mungos::Deserialize;
use types::User;
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{PermissionLevel, UpdateTarget, User};
use typeshare::typeshare;
use crate::{
auth::{auth_request, JwtExtension, RequestUserExtension},
state::StateExtension,
auth::{auth_request, JwtExtension, RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
ResponseResult,
};
pub mod build;
pub mod deployment;
mod build;
mod command;
mod deployment;
mod github_listener;
pub mod group;
pub mod permissions;
pub mod procedure;
pub mod secret;
pub mod server;
pub mod update;
mod group;
mod permissions;
mod procedure;
mod secret;
mod server;
mod update;
#[typeshare]
#[derive(Deserialize)]
struct UpdateDescriptionBody {
target: UpdateTarget,
description: String,
}
#[derive(Deserialize)]
struct UserId {
id: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/user",
get(|jwt, req| async { get_user(jwt, req).await.map_err(handle_anyhow_error) }),
"/title",
get(|state: StateExtension| async move { state.config.title.clone() }),
)
.route("/user", get(get_request_user))
.nest("/listener", github_listener::router())
.nest(
"/",
Router::new()
.route("/user/:id", get(get_user_at_id))
.route(
"/username/:id",
get(|state, user_id| async {
get_username(state, user_id)
get(|state: StateExtension, Path(UserId { id })| async move {
let user = state
.db
.get_user(&id)
.await
.map_err(handle_anyhow_error)
.context("failed to find user at id")
.map_err(handle_anyhow_error)?;
response!(Json(user.username))
}),
)
.route(
"/github_webhook_base_url",
get(|state: StateExtension| async move {
state
.config
.github_webhook_base_url
.as_ref()
.unwrap_or(&state.config.host)
.to_string()
}),
)
.route(
"/update_description",
post(
|state: StateExtension,
user: RequestUserExtension,
body: Json<UpdateDescriptionBody>| async move {
state
.update_description(&body.target, &body.description, &user)
.await
.map_err(handle_anyhow_error)
},
),
)
.route("/users", get(get_users))
.nest("/build", build::router())
.nest("/deployment", deployment::router())
.nest("/server", server::router())
.nest("/command", command::router())
.nest("/procedure", procedure::router())
.nest("/group", group::router())
.nest("/update", update::router())
@@ -58,8 +106,11 @@ pub fn router() -> Router {
)
}
async fn get_user(Extension(jwt): JwtExtension, req: Request<Body>) -> anyhow::Result<Json<User>> {
let mut user = jwt.authenticate(&req).await?;
async fn get_request_user(
Extension(jwt): JwtExtension,
req: Request<Body>,
) -> ResponseResult<Json<User>> {
let mut user = jwt.authenticate(&req).await.map_err(handle_anyhow_error)?;
user.password = None;
for secret in &mut user.secrets {
secret.hash = String::new();
@@ -67,23 +118,10 @@ async fn get_user(Extension(jwt): JwtExtension, req: Request<Body>) -> anyhow::R
Ok(Json(user))
}
#[derive(Deserialize)]
struct UserId {
id: String,
}
async fn get_username(
state: StateExtension,
Path(UserId { id }): Path<UserId>,
) -> anyhow::Result<String> {
let user = state.db.get_user(&id).await?;
Ok(user.username)
}
async fn get_users(
state: StateExtension,
user: RequestUserExtension,
) -> Result<Json<Vec<User>>, (StatusCode, String)> {
) -> ResponseResult<Json<Vec<User>>> {
if user.is_admin {
let users = state
.db
@@ -105,8 +143,33 @@ async fn get_users(
}
}
async fn get_user_at_id(
state: StateExtension,
Path(UserId { id }): Path<UserId>,
user: RequestUserExtension,
) -> ResponseResult<Json<User>> {
if user.is_admin {
let mut user = state
.db
.users
.find_one_by_id(&id)
.await
.context("failed at query to get user from mongo")
.map_err(handle_anyhow_error)?
.ok_or(anyhow!(""))
.map_err(handle_anyhow_error)?;
user.password = None;
for secret in &mut user.secrets {
secret.hash = String::new();
}
Ok(Json(user))
} else {
Err((StatusCode::UNAUTHORIZED, "user is not admin".to_string()))
}
}
// need to run requested actions in here to prevent them being dropped mid action when user disconnects prematurely
pub async fn spawn_request_action<A>(action: A) -> Result<A::Output, (StatusCode, String)>
pub async fn spawn_request_action<A>(action: A) -> ResponseResult<A::Output>
where
A: Future + Send + 'static,
A::Output: Send + 'static,
@@ -117,3 +180,57 @@ where
.map_err(handle_anyhow_error)?;
Ok(res)
}
impl State {
pub async fn update_description(
&self,
target: &UpdateTarget,
description: &str,
user: &RequestUser,
) -> anyhow::Result<()> {
match target {
UpdateTarget::Build(id) => {
self.get_build_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.builds
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Deployment(id) => {
self.get_deployment_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.deployments
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Server(id) => {
self.get_server_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.servers
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Group(id) => {
self.get_group_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.groups
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Procedure(id) => {
self.get_procedure_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.procedures
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
_ => return Err(anyhow!("invalid target: {target:?}")),
}
Ok(())
}
}

View File

@@ -1,10 +1,11 @@
use anyhow::{anyhow, Context};
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Serialize};
use mungos::mongodb::bson::{doc, Document};
use serde::{Deserialize, Serialize};
use types::{
monitor_timestamp, Build, Deployment, Log, Operation, PermissionLevel, PermissionsTarget,
Procedure, Server, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, Build, Deployment, Group, Log, Operation, PermissionLevel,
PermissionsTarget, Procedure, Server, Update, UpdateStatus, UpdateTarget,
};
use typeshare::typeshare;
@@ -33,6 +34,13 @@ struct ModifyUserCreateServerBody {
create_server_permissions: bool,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct ModifyUserCreateBuildBody {
user_id: String,
create_build_permissions: bool,
}
pub fn router() -> Router {
Router::new()
.route(
@@ -62,14 +70,23 @@ pub fn router() -> Router {
response!(Json(update))
}),
)
.route(
"/modify_create_build",
post(|state, user, body| async {
let update = modify_user_create_build_permissions(state, user, body)
.await
.map_err(handle_anyhow_error)?;
response!(Json(update))
}),
)
}
async fn update_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(permission_update): Json<PermissionsUpdateBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user not authorized for this action (is not admin)"
));
@@ -91,7 +108,7 @@ async fn update_permissions(
operation: Operation::ModifyUserPermissions,
start_ts: monitor_timestamp(),
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
status: UpdateStatus::Complete,
..Default::default()
};
@@ -183,9 +200,9 @@ async fn update_permissions(
.procedures
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find build query")?
.context("failed at find procedure query")?
.ok_or(anyhow!(
"failed to find a build with id {}",
"failed to find a procedure with id {}",
permission_update.target_id
))?;
state
@@ -204,6 +221,33 @@ async fn update_permissions(
target_user.username, permission_update.permission, procedure.name
)
}
PermissionsTarget::Group => {
let group = state
.db
.groups
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find group query")?
.ok_or(anyhow!(
"failed to find a group with id {}",
permission_update.target_id
))?;
state
.db
.groups
.update_one::<Group>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Group(group.id);
format!(
"user {} given {} permissions on group {}",
target_user.username, permission_update.permission, group.name
)
}
};
update
.logs
@@ -215,10 +259,10 @@ async fn update_permissions(
async fn modify_user_enabled(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserEnabledBody { user_id, enabled }): Json<ModifyUserEnabledBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
@@ -248,7 +292,7 @@ async fn modify_user_enabled(
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
@@ -257,18 +301,18 @@ async fn modify_user_enabled(
async fn modify_user_create_server_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserCreateServerBody {
user_id,
create_server_permissions,
}): Json<ModifyUserCreateServerBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
}
let user = state
let target_user = state
.db
.users
.find_one_by_id(&user_id)
@@ -296,14 +340,69 @@ async fn modify_user_create_server_permissions(
"modify user create server permissions",
format!(
"{update_type} create server permissions for {} (id: {})",
user.username, user.id
target_user.username, target_user.id
),
)],
start_ts: ts.clone(),
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
Ok(update)
}
async fn modify_user_create_build_permissions(
Extension(state): StateExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserCreateBuildBody {
user_id,
create_build_permissions,
}): Json<ModifyUserCreateBuildBody>,
) -> anyhow::Result<Update> {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
}
let target_user = state
.db
.users
.find_one_by_id(&user_id)
.await
.context("failed at mongo query to find target user")?
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
state
.db
.users
.update_one::<Document>(
&user_id,
mungos::Update::Set(doc! { "create_build_permissions": create_build_permissions }),
)
.await?;
let update_type = if create_build_permissions {
"enabled"
} else {
"disabled"
};
let ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::System,
operation: Operation::ModifyUserCreateBuildPermissions,
logs: vec![Log::simple(
"modify user create build permissions",
format!(
"{update_type} create build permissions for {} (id: {})",
target_user.username, target_user.id
),
)],
start_ts: ts.clone(),
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;

View File

@@ -5,7 +5,8 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, PermissionLevel, Procedure};
use typeshare::typeshare;

View File

@@ -5,7 +5,11 @@ use axum::{
Extension, Json, Router,
};
use helpers::{generate_secret, handle_anyhow_error};
use mungos::{doc, to_bson, Deserialize, Document, Serialize, Update};
use mungos::{
mongodb::bson::{doc, to_bson, Document},
Update,
};
use serde::{Deserialize, Serialize};
use types::{monitor_timestamp, ApiSecret};
use typeshare::typeshare;

View File

@@ -1,5 +1,5 @@
use anyhow::{anyhow, Context};
use async_timing_util::get_timelength_in_ms;
use async_timing_util::{get_timelength_in_ms, unix_timestamp_ms};
use axum::{
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
response::IntoResponse,
@@ -8,7 +8,11 @@ use axum::{
};
use futures_util::{future::join_all, SinkExt, StreamExt};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOptions,
};
use serde::Deserialize;
use tokio::select;
use tokio_tungstenite::tungstenite::Message;
use tokio_util::sync::CancellationToken;
@@ -19,7 +23,7 @@ use types::{
};
use typeshare::typeshare;
const MAX_HISTORICAL_STATS_LIMIT: i64 = 1000;
const MAX_HISTORICAL_STATS_LIMIT: i64 = 500;
use crate::{
auth::{RequestUser, RequestUserExtension},
@@ -339,6 +343,20 @@ pub fn router() -> Router {
},
),
)
.route(
"/:id/secrets",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let vars = state
.get_available_secrets(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(vars))
},
),
)
.route(
"/:id/action_state",
get(
@@ -356,7 +374,11 @@ pub fn router() -> Router {
}
impl State {
async fn get_server(&self, id: &str, user: &RequestUser) -> anyhow::Result<ServerWithStatus> {
pub async fn get_server(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<ServerWithStatus> {
let server = self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await?;
@@ -465,14 +487,9 @@ impl State {
user: &RequestUser,
query: &HistoricalStatsQuery,
) -> anyhow::Result<Vec<SystemStatsRecord>> {
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
MAX_HISTORICAL_STATS_LIMIT
} else {
query.limit as i64
};
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let ts_mod = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
let mut projection = doc! { "processes": 0, "disk.disks": 0 };
if !query.networks {
projection.insert("networks", 0);
@@ -480,14 +497,30 @@ impl State {
if !query.components {
projection.insert("components", 0);
}
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
MAX_HISTORICAL_STATS_LIMIT
} else {
query.limit as i64
};
let interval = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
let mut ts_vec = Vec::<i64>::new();
let curr_ts = unix_timestamp_ms() as i64;
let mut curr_ts = curr_ts - curr_ts % interval - interval * limit * query.page as i64;
for _ in 0..limit {
ts_vec.push(curr_ts);
curr_ts -= interval;
}
self.db
.stats
.get_most_recent(
"ts",
limit,
query.page as u64 * limit as u64,
doc! { "server_id": server_id, "ts": { "$mod": [ts_mod, 0] } },
projection,
.get_some(
doc! {
"server_id": server_id,
"ts": { "$in": ts_vec }
},
FindOptions::builder()
.sort(doc! { "ts": 1 })
.projection(projection)
.build(),
)
.await
.context("failed at mongo query to get stats")
@@ -628,6 +661,18 @@ impl State {
Ok(docker_accounts)
}
async fn get_available_secrets(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<String>> {
let server = self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await?;
let vars = self.periphery.get_available_secrets(&server).await?;
Ok(vars)
}
async fn get_server_action_states(
&self,
id: String,
@@ -635,13 +680,7 @@ impl State {
) -> anyhow::Result<ServerActionState> {
self.get_server_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.server_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.server_action_states.get_or_default(id).await;
Ok(action_state)
}
}

View File

@@ -1,9 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use axum::{extract::Query, routing::get, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, to_bson, ObjectId};
use mungos::mongodb::bson::{doc, to_bson};
use serde_json::Value;
use types::{PermissionLevel, Update, UpdateTarget};
@@ -14,7 +12,7 @@ use crate::{
state::{State, StateExtension},
};
const NUM_UPDATES_PER_PAGE: usize = 10;
const NUM_UPDATES_PER_PAGE: usize = 20;
pub fn router() -> Router {
Router::new().route(
@@ -94,6 +92,10 @@ impl State {
.get_group_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Command(id) => self
.get_command_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
}
}
}
@@ -159,7 +161,7 @@ impl State {
.await
.context("failed at query to get users builds")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let deployment_ids = self
.db
@@ -168,7 +170,7 @@ impl State {
.await
.context("failed at query to get users deployments")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let server_ids = self
.db
@@ -177,7 +179,7 @@ impl State {
.await
.context("failed at query to get users servers")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let procedure_ids = self
.db
@@ -186,7 +188,7 @@ impl State {
.await
.context("failed at query to get users procedures")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let filter = doc! {
"$or": [

View File

@@ -4,7 +4,8 @@ use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::github::{GithubOauthClient, GithubOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
@@ -68,10 +69,15 @@ async fn callback(
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
let user = User {
username: github_user.login,
avatar: github_user.avatar_url.into(),
github_id: github_id.into(),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -4,7 +4,8 @@ use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::google::{GoogleOauthClient, GoogleOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
@@ -85,6 +86,7 @@ async fn callback(
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
let user = User {
username: google_user
.email
@@ -95,6 +97,10 @@ async fn callback(
.to_string(),
avatar: google_user.picture.into(),
google_id: google_id.into(),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -9,7 +9,7 @@ use axum::{body::Body, http::Request, Extension};
use axum_oauth2::random_string;
use hmac::{Hmac, Mac};
use jwt::{SignWithKey, VerifyWithKey};
use mungos::{Deserialize, Serialize};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use types::{CoreConfig, User};
@@ -20,10 +20,12 @@ pub type RequestUserExtension = Extension<Arc<RequestUser>>;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
#[derive(Default)]
pub struct RequestUser {
pub id: String,
pub is_admin: bool,
pub create_server_permissions: bool,
pub create_build_permissions: bool,
}
#[derive(Serialize, Deserialize)]
@@ -109,6 +111,7 @@ impl JwtClient {
id: claims.id,
is_admin: user.admin,
create_server_permissions: user.create_server_permissions,
create_build_permissions: user.create_build_permissions,
};
Ok(user)
} else {

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use axum::{extract::Json, routing::post, Extension, Router};
use helpers::handle_anyhow_error;
use mungos::doc;
use mungos::mongodb::bson::doc;
use types::{monitor_timestamp, User, UserCredentials};
use crate::state::StateExtension;
@@ -47,6 +47,7 @@ async fn create_user_handler(
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -9,7 +9,7 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Serialize};
use serde::{Deserialize, Serialize};
use types::CoreConfig;
use typeshare::typeshare;

View File

@@ -2,7 +2,11 @@ use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Update};
use mungos::{
mongodb::bson::{doc, Document},
Update,
};
use serde::Deserialize;
use types::unix_from_monitor_ts;
use crate::state::StateExtension;

199
core/src/cloud/aws.rs Normal file
View File

@@ -0,0 +1,199 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice, InstanceNetworkInterfaceSpecification,
InstanceStateChange, InstanceStateName, InstanceStatus, InstanceType, ResourceType, Tag,
TagSpecification,
},
Client,
};
use types::Server;
pub async fn create_ec2_client(
region: String,
access_key_id: &str,
secret_access_key: String,
) -> Client {
// There may be a better way to pass these keys to client
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::from_env().region(region).load().await;
let client = Client::new(&config);
client
}
pub struct Ec2Instance {
pub instance_id: String,
pub server: Server,
}
const POLL_RATE_SECS: u64 = 2;
const MAX_POLL_TRIES: usize = 30;
/// this will only resolve after the instance is running
/// should still poll the periphery agent after creation
pub async fn create_instance_with_ami(
client: &Client,
instance_name: &str,
ami_id: &str,
instance_type: &str,
subnet_id: &str,
security_group_ids: Vec<String>,
volume_size_gb: i32,
key_pair_name: &str,
assign_public_ip: bool,
) -> anyhow::Result<Ec2Instance> {
let instance_type = InstanceType::from(instance_type);
if let InstanceType::Unknown(t) = instance_type {
return Err(anyhow!("unknown instance type {t:?}"));
}
let res = client
.run_instances()
.image_id(ami_id)
.instance_type(instance_type)
.block_device_mappings(
BlockDeviceMapping::builder()
.set_device_name(String::from("/dev/sda1").into())
.set_ebs(
EbsBlockDevice::builder()
.volume_size(volume_size_gb)
.build()
.into(),
)
.build(),
)
.network_interfaces(
InstanceNetworkInterfaceSpecification::builder()
.subnet_id(subnet_id)
.associate_public_ip_address(assign_public_ip)
.set_groups(security_group_ids.into())
.device_index(0)
.build(),
)
.key_name(key_pair_name)
.tag_specifications(
TagSpecification::builder()
.tags(Tag::builder().key("Name").value(instance_name).build())
.resource_type(ResourceType::Instance)
.build(),
)
.min_count(1)
.max_count(1)
.send()
.await
.context("failed to start builder ec2 instance")?;
let instance = res
.instances()
.ok_or(anyhow!("got None for created instances"))?
.get(0)
.ok_or(anyhow!("instances array is empty"))?;
let instance_id = instance
.instance_id()
.ok_or(anyhow!("instance does not have instance_id"))?
.to_string();
for _ in 0..MAX_POLL_TRIES {
let state_name = get_ec2_instance_state_name(&client, &instance_id).await?;
if state_name == Some(InstanceStateName::Running) {
let ip = if assign_public_ip {
get_ec2_instance_public_ip(client, &instance_id).await?
} else {
instance
.private_ip_address()
.ok_or(anyhow!("instance does not have private ip"))?
.to_string()
};
let server = Server {
address: format!("http://{ip}:8000"),
..Default::default()
};
return Ok(Ec2Instance {
instance_id,
server,
});
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
}
Err(anyhow!("instance not running after polling"))
}
pub async fn get_ec2_instance_status(
client: &Client,
instance_id: &str,
) -> anyhow::Result<Option<InstanceStatus>> {
let status = client
.describe_instance_status()
.instance_ids(instance_id)
.send()
.await
.context("failed to get instance status from aws")?
.instance_statuses()
.ok_or(anyhow!("instance statuses is None"))?
.get(0)
.map(|s| s.to_owned());
Ok(status)
}
pub async fn get_ec2_instance_state_name(
client: &Client,
instance_id: &str,
) -> anyhow::Result<Option<InstanceStateName>> {
let status = get_ec2_instance_status(client, instance_id).await?;
if status.is_none() {
return Ok(None);
}
let state = status
.unwrap()
.instance_state()
.ok_or(anyhow!("instance state is None"))?
.name()
.ok_or(anyhow!("instance state name is None"))?
.to_owned();
Ok(Some(state))
}
pub async fn get_ec2_instance_public_ip(
client: &Client,
instance_id: &str,
) -> anyhow::Result<String> {
let ip = client
.describe_instances()
.instance_ids(instance_id)
.send()
.await
.context("failed to get instance status from aws")?
.reservations()
.ok_or(anyhow!("instance reservations is None"))?
.get(0)
.ok_or(anyhow!("instance reservations is empty"))?
.instances()
.ok_or(anyhow!("instances is None"))?
.get(0)
.ok_or(anyhow!("instances is empty"))?
.public_ip_address()
.ok_or(anyhow!("instance has no public ip"))?
.to_string();
Ok(ip)
}
pub async fn terminate_ec2_instance(
client: &Client,
instance_id: &str,
) -> anyhow::Result<InstanceStateChange> {
let res = client
.terminate_instances()
.instance_ids(instance_id)
.send()
.await
.context("failed to terminate instance from aws")?
.terminating_instances()
.ok_or(anyhow!("terminating instances is None"))?
.get(0)
.ok_or(anyhow!("terminating instances is empty"))?
.to_owned();
Ok(res)
}

1
core/src/cloud/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub mod aws;

View File

@@ -1,9 +1,12 @@
use axum_extra::routing::SpaRouter;
use axum::Router;
use dotenv::dotenv;
use helpers::parse_config_file;
use mungos::Deserialize;
use merge_config_files::parse_config_file;
use serde::Deserialize;
use tower_http::services::{ServeDir, ServeFile};
use types::CoreConfig;
type SpaRouter = Router;
#[derive(Deserialize, Debug)]
struct Env {
#[serde(default = "default_config_path")]
@@ -12,12 +15,17 @@ struct Env {
pub frontend_path: String,
}
pub fn load() -> (CoreConfig, SpaRouter) {
pub fn load() -> (CoreConfig, SpaRouter, ServeFile) {
dotenv().ok();
let env: Env = envy::from_env().expect("failed to parse environment variables");
let config = parse_config_file(&env.config_path).expect("failed to parse config");
let spa_router = SpaRouter::new("/assets", env.frontend_path);
(config, spa_router)
let config = parse_config_file(env.config_path).expect("failed to parse config");
let spa_router = Router::new().nest_service(
"/assets",
ServeDir::new(&env.frontend_path)
.not_found_service(ServeFile::new(format!("{}/index.html", env.frontend_path))),
);
let index_html_service = ServeFile::new(format!("{}/index.html", env.frontend_path));
(config, spa_router, index_html_service)
}
pub fn default_config_path() -> String {

View File

@@ -1,7 +1,10 @@
use std::str::FromStr;
use std::{collections::HashMap, str::FromStr};
use anyhow::anyhow;
use diff::{Diff, OptionDiff};
use helpers::to_monitor_name;
use tokio::sync::RwLock;
use types::{traits::Busy, Build};
#[macro_export]
macro_rules! response {
@@ -41,3 +44,60 @@ pub fn parse_comma_seperated_list<T: FromStr>(comma_sep_list: &str) -> anyhow::R
})
.collect()
}
pub fn get_image_name(build: &Build) -> String {
let name = to_monitor_name(&build.name);
match &build.docker_organization {
Some(org) => format!("{org}/{name}"),
None => match &build.docker_account {
Some(acct) => format!("{acct}/{name}"),
None => name,
},
}
}
pub fn empty_or_only_spaces(word: &str) -> bool {
if word.len() == 0 {
return true;
}
for char in word.chars() {
if char != ' ' {
return false;
}
}
return true;
}
#[derive(Default)]
pub struct Cache<T: Clone + Default> {
cache: RwLock<HashMap<String, T>>,
}
impl<T: Clone + Default> Cache<T> {
pub async fn get(&self, key: &str) -> Option<T> {
self.cache.read().await.get(key).map(|e| e.clone())
}
pub async fn get_or_default(&self, key: String) -> T {
let mut cache = self.cache.write().await;
cache.entry(key).or_default().clone()
}
pub async fn update_entry(&self, key: String, handler: impl Fn(&mut T) -> ()) {
let mut cache = self.cache.write().await;
handler(cache.entry(key).or_default());
}
pub async fn clear(&self) {
self.cache.write().await.clear();
}
}
impl<T: Clone + Default + Busy> Cache<T> {
pub async fn busy(&self, id: &str) -> bool {
match self.get(id).await {
Some(state) => state.busy(),
None => false,
}
}
}

View File

@@ -2,43 +2,62 @@
use ::helpers::get_socket_addr;
use auth::JwtClient;
use axum::Router;
use axum::{http::StatusCode, Router};
use state::State;
use termination_signal::tokio::immediate_term_handle;
use tower_http::cors::{Any, CorsLayer};
mod actions;
mod api;
mod auth;
mod cloud;
mod config;
mod helpers;
mod monitoring;
mod state;
mod ws;
type ResponseResult<T> = Result<T, (StatusCode, String)>;
#[tokio::main]
async fn main() {
let (config, spa_router) = config::load();
async fn main() -> anyhow::Result<()> {
println!("version: v{}", env!("CARGO_PKG_VERSION"));
println!("starting monitor core on port {}...", config.port);
let term_signal = immediate_term_handle()?;
let app = Router::new()
.merge(spa_router)
.nest("/api", api::router())
.nest("/auth", auth::router(&config))
.nest("/ws", ws::router())
.layer(JwtClient::extension(&config))
.layer(State::extension(config.clone()).await)
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
);
let app = tokio::spawn(async move {
let (config, spa_router, index_html_service) = config::load();
println!("started monitor core on port {}", config.port);
println!("starting monitor core on port {}...", config.port);
axum::Server::bind(&get_socket_addr(config.port))
.serve(app.into_make_service())
.await
.expect("monitor core axum server crashed");
let app = Router::new()
.nest("/api", api::router())
.nest("/auth", auth::router(&config))
.nest("/ws", ws::router())
.layer(JwtClient::extension(&config))
.layer(State::extension(config.clone()).await)
.merge(spa_router)
.fallback_service(index_html_service)
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
);
println!("started monitor core on port {}", config.port);
axum::Server::bind(&get_socket_addr(config.port))
.serve(app.into_make_service())
.await?;
anyhow::Ok(())
});
tokio::select! {
res = app => return res?,
_ = term_signal => {},
}
Ok(())
}

View File

@@ -4,13 +4,13 @@ use async_timing_util::{
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS, ONE_HOUR_MS,
};
use futures_util::future::join_all;
use mungos::doc;
use mungos::mongodb::bson::doc;
use slack::types::Block;
use types::{Server, SystemStats, SystemStatsQuery, SystemStatsRecord};
use crate::state::State;
#[derive(Default)]
#[derive(Default, Clone)]
pub struct AlertStatus {
cpu_alert: bool,
mem_alert: bool,
@@ -100,16 +100,16 @@ impl State {
}
async fn check_cpu(&self, server: &Server, stats: &SystemStats) {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.cpu_alert)
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
if stats.cpu_perc > server.cpu_alert {
let region = if let Some(region) = &server.region {
format!(" ({region})")
@@ -171,24 +171,26 @@ impl State {
server.name, stats.cpu_perc
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.cpu_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.cpu_alert = true;
})
.await;
}
}
}
async fn check_mem(&self, server: &Server, stats: &SystemStats) {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.mem_alert)
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
let usage_perc = (stats.mem_used_gb / stats.mem_total_gb) * 100.0;
if usage_perc > server.mem_alert {
let region = if let Some(region) = &server.region {
@@ -254,25 +256,27 @@ impl State {
server.name, stats.mem_used_gb, stats.mem_total_gb,
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.mem_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.mem_alert = true;
})
.await;
}
}
}
async fn check_disk(&self, server: &Server, stats: &SystemStats) {
for disk in &stats.disk.disks {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| *s.disk_alert.get(&disk.mount).unwrap_or(&false))
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
let usage_perc = (disk.used_gb / disk.total_gb) * 100.0;
if usage_perc > server.disk_alert {
let region = if let Some(region) = &server.region {
@@ -315,25 +319,27 @@ impl State {
server.name, stats.disk.used_gb, stats.disk.total_gb,
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.disk_alert.insert(disk.mount.clone(), true);
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.disk_alert.insert(disk.mount.clone(), true);
})
.await;
}
}
}
}
async fn check_components(&self, server: &Server, stats: &SystemStats) {
let lock = self.server_alert_status.lock().await;
if self.slack.is_none()
|| lock
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.component_alert)
.unwrap_or(false)
{
return;
}
drop(lock);
let info = stats
.components
.iter()
@@ -393,9 +399,11 @@ impl State {
info.join(" | "),
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.component_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.component_alert = true;
})
.await;
}
}
}
@@ -412,8 +420,12 @@ impl State {
);
continue;
}
let servers = servers.unwrap();
if servers.is_empty() {
continue;
}
let mut blocks = vec![Block::header("INFO | daily update"), Block::divider()];
for (server, stats) in servers.unwrap() {
for (server, stats) in servers {
let region = if let Some(region) = &server.region {
format!(" | {region}")
} else {
@@ -483,7 +495,7 @@ impl State {
);
}
{
self.server_alert_status.lock().await.clear();
self.server_alert_status.clear().await;
}
}
}

View File

@@ -1,30 +1,31 @@
use std::{collections::HashMap, sync::Arc};
use std::sync::Arc;
use async_timing_util::{unix_timestamp_ms, wait_until_timelength, Timelength, ONE_HOUR_MS};
use axum::Extension;
use db::DbClient;
use futures_util::future::join_all;
use mungos::doc;
use mungos::mongodb::bson::doc;
use periphery::PeripheryClient;
use tokio::sync::Mutex;
use types::{BuildActionState, CoreConfig, DeploymentActionState, ServerActionState};
use types::{
BuildActionState, CommandActionState, CoreConfig, DeploymentActionState, ServerActionState,
};
use crate::{monitoring::AlertStatus, ws::update::UpdateWsChannel};
use crate::{helpers::Cache, monitoring::AlertStatus, ws::update::UpdateWsChannel};
pub type StateExtension = Extension<Arc<State>>;
pub type ActionStateMap<T> = Mutex<HashMap<String, T>>;
// pub type Cache<T> = RwLock<HashMap<String, T>>;
pub struct State {
pub config: CoreConfig,
pub db: DbClient,
pub update: UpdateWsChannel,
pub periphery: PeripheryClient,
pub slack: Option<slack::Client>,
pub build_action_states: ActionStateMap<BuildActionState>,
pub deployment_action_states: ActionStateMap<DeploymentActionState>,
pub server_action_states: ActionStateMap<ServerActionState>,
pub server_alert_status: Mutex<HashMap<String, AlertStatus>>, // (server_id, AlertStatus)
pub build_action_states: Cache<BuildActionState>,
pub deployment_action_states: Cache<DeploymentActionState>,
pub server_action_states: Cache<ServerActionState>,
pub command_action_states: Cache<CommandActionState>,
pub server_alert_status: Cache<AlertStatus>, // (server_id, AlertStatus)
}
impl State {
@@ -32,12 +33,13 @@ impl State {
let state = State {
db: DbClient::new(config.mongo.clone()).await,
slack: config.slack_url.clone().map(|url| slack::Client::new(&url)),
periphery: PeripheryClient::new(config.passkey.clone()),
config,
update: UpdateWsChannel::new(),
periphery: PeripheryClient::default(),
build_action_states: Default::default(),
deployment_action_states: Default::default(),
server_action_states: Default::default(),
command_action_states: Default::default(),
server_alert_status: Default::default(),
};
let state = Arc::new(state);
@@ -78,7 +80,6 @@ impl State {
}
let futures = servers.unwrap().into_iter().map(|server| async move {
let _ = self.periphery.image_prune(&server).await;
let _ = self.periphery.container_prune(&server).await;
});
join_all(futures).await;
}

View File

@@ -2,18 +2,17 @@ use std::sync::Arc;
use axum::{
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
http::StatusCode,
response::IntoResponse,
};
use futures_util::{SinkExt, StreamExt};
use helpers::handle_anyhow_error;
use mungos::Deserialize;
use serde::Deserialize;
use tokio::select;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::tungstenite::Message as TungsteniteMessage;
use tokio_util::sync::CancellationToken;
use types::{traits::Permissioned, PermissionLevel, SystemStatsQuery};
use crate::{auth::JwtExtension, state::StateExtension};
use crate::{auth::JwtExtension, state::StateExtension, ResponseResult};
#[derive(Deserialize)]
pub struct ServerId {
@@ -26,7 +25,7 @@ pub async fn ws_handler(
path: Path<ServerId>,
query: Query<SystemStatsQuery>,
ws: WebSocketUpgrade,
) -> Result<impl IntoResponse, (StatusCode, String)> {
) -> ResponseResult<impl IntoResponse> {
let server = state
.db
.get_server(&path.id)
@@ -68,7 +67,7 @@ pub async fn ws_handler(
},
stats = stats_recv.next() => stats,
};
if let Some(Ok(Message::Text(msg))) = stats {
if let Some(Ok(TungsteniteMessage::Text(msg))) = stats {
let _ = ws_sender.send(AxumMessage::Text(msg)).await;
} else {
let _ = stats_recv.close(None).await;

View File

@@ -140,6 +140,12 @@ async fn user_can_see_update(
.await?;
(permissions, "group")
}
UpdateTarget::Command(command_id) => {
let permissions = db_client
.get_user_permission_on_command(user_id, command_id)
.await?;
(permissions, "command")
}
UpdateTarget::System => {
return Err(anyhow!("user not admin, can't recieve system updates"))
}

20
docsite/.gitignore vendored Normal file
View File

@@ -0,0 +1,20 @@
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*

41
docsite/README.md Normal file
View File

@@ -0,0 +1,41 @@
# Website
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ yarn
```
### Local Development
```
$ yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true yarn deploy
```
Not using SSH:
```
$ GIT_USER=<Your GitHub username> yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.

3
docsite/babel.config.js Normal file
View File

@@ -0,0 +1,3 @@
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};

View File

@@ -0,0 +1,42 @@
import Divider from '@site/src/components/Divider';
# api secrets
these routes are used to manage api secrets.
| name | route |
| ---- | ------ |
| [create api secret](/api/api-secrets#create-api-secret) | `POST /api/secret/create` |
| [delete api secret](/api/api-secrets#delete-api-secret) | `DELETE /api/secret/delete/<secret-name>` |
```mdx-code-block
<Divider />
```
## create api secret
`POST /api/secret/create`
### request body
```json
{
name: string, // name the secret. must be unique among the users secrets
expires?: rfc3339_timestamp, // optional expiry time. if none, the secret will not expire.
}
```
### response body
```json
string // the body will be the secret hash used to log in.
```
```mdx-code-block
<Divider />
```
## delete api secret
`DELETE /api/secret/delete/<secret-name>`
### response
```json
HTTP 200 OK
```

View File

@@ -0,0 +1,8 @@
# authenticating requests
monitor uses the `JSON Web Token (JWT)` standard to authenticate all requests to subroutes under `/api`.
users can acquire a `JWT` using a [login method](/api/login).
to authenticate requests, pass the `JWT` under the `Authorization` header:
`Authorization: Bearer <JWT>`

224
docsite/docs/api/build.mdx Normal file
View File

@@ -0,0 +1,224 @@
import Divider from '@site/src/components/Divider';
# build
these routes relate to interacting with monitor `builds`
| name | route |
| ---- | ------ |
| [list builds](/api/build#list-builds) | `GET /api/build/list` |
| [get build](/api/build#get-build) | `GET /api/build/<build_id>` |
| [get build action state](/api/build#get-build-action-state) | `GET /api/build/<build_id>/action_state` |
| [get build versions](/api/build#get-build-versions) | `GET /api/build/<build_id>/versions` |
| [create build](/api/build#create-build) | `POST /api/build/create` |
| [create full build](/api/build#create-full-build) | `POST /api/build/create_full` |
| [copy build](/api/build#copy-build) | `POST /api/build/<build_id>/copy` |
| [delete build](/api/build#delete-build) | `DELETE /api/build/<build_id>/delete` |
| [update build](/api/build#update-build) | `PATCH /api/build/update` |
| [build](/api/build#build-action) | `POST /api/build/<build_id>/build` |
| [get aws builder defaults](/api/build#get-aws-builder-defaults) | `GET /api/build/aws_builder_defaults` |
| [get allowed docker organizations](/api/build#get-allowed-docker-organizations) | `GET /api/build/docker_organizations` |
```mdx-code-block
<Divider />
```
## list builds
`GET /api/build/list`
this method will return an array of builds the requesting user has a minimum of `Read` permissions on.
### response body
Array<[Build](/api/types#build)>
```mdx-code-block
<Divider />
```
## get build
`GET /api/build/<build_id>`
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## get build action state
`GET /api/build/<build_id>/action_state`
this method returns the action state for the build, eg. whether the build is currently `building`.
### response body
```json
{
building: boolean,
updating: boolean,
}
```
```mdx-code-block
<Divider />
```
## get build versions
`GET /api/build/<build_id>/versions`
paginated route for fetching the most recent available versions of this build.
### query params
```json
page=number // optional, default is 0. pagination starting at page 0.
major=number // optional. filter by major version number
minor=number // optional. filter by minor version number
patch=number // optional. filter by patch version number
```
### response body
```json
[
{
ts: rfc3339_timestamp,
version: {
major: number,
minor: number,
patch: number,
}
},
...
]
```
```mdx-code-block
<Divider />
```
## create build
`POST /api/build/create`
### request body
```json
{
name: string,
}
```
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## create full build
`POST /api/build/create_full`
### request body
[Build](/api/types#build)
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## copy build
`POST /api/build/<build_id>/copy`
this method will create a copy of the build with a new _id and name,
with all the same configuration as the target build.
### request body
```json
{
name: string, // the new name
}
```
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## delete build
`DELETE /api/build/<build_id>/delete`
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## update build
`PATCH /api/build/update`
### request body
[Build](/api/types#build)
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## build (action)
`POST /api/build/<build_id>/build`
### response body
[Update](/api/types#update)
:::note
this update will include the `version` field.
:::
```mdx-code-block
<Divider />
```
## get aws builder defaults
`GET /api/build/aws_builder_defaults`
### response body
```json
{
default_ami_name: string,
default_subnet_id: string,
default_key_pair_name: string,
default_region: string,
default_volume_gb: number,
default_instance_type: string,
default_security_group_ids: string[],
default_assign_public_ip: boolean,
available_ami_accounts: [
{
ami_id: string,
github: string[],
docker: string[],
secrets: string[],
}
],
}
```
```mdx-code-block
<Divider />
```
## get allowed docker organizations
`GET /api/build/docker_organizations`
### response body
```json
string[] // the names of the allowed docker organizations
```

View File

@@ -0,0 +1,344 @@
import Divider from '@site/src/components/Divider';
# deployment
these routes relate to interacting with monitor `deployments`
| name | route |
| ---- | ------ |
| [list deployments](/api/deployment#list-deployments) | `GET /api/deployment/list` |
| [get deployment](/api/deployment#get-deployment) | `GET /api/deployment/<deployment_id>` |
| [get deployment action state](/api/deployment#get-deployment-action-state) | `GET /api/deployment/<deployment_id>/action_state` |
| [get deployment container log](/api/deployment#get-deployment-container-log) | `GET /api/deployment/<deployment_id>/log` |
| [get deployment container stats](/api/deployment#get-deployment-container-stats) | `GET /api/deployment/<deployment_id>/stats` |
| [get deployment deployed version](/api/deployment#get-deployment-deployed-version) | `GET /api/deployment/<deployment_id>/deployed_version` |
| [create deployment](/api/deployment#create-deployment) | `POST /api/deployment/create` |
| [create full deployment](/api/deployment#create-full-deployment) | `POST /api/deployment/create_full` |
| [copy deployment](/api/deployment#copy-deployment) | `POST /api/deployment/<deployment_id>/copy` |
| [delete deployment](/api/deployment#delete-deployment) | `DELETE /api/deployment/<deployment_id>/delete` |
| [update deployment](/api/deployment#update-deployment) | `PATCH /api/deployment/update` |
| [rename deployment](/api/deployment#rename-deployment) | `PATCH /api/deployment/<deployment_id>/rename` |
| [reclone deployment](/api/deployment#reclone-deployment) | `POST /api/deployment/<deployment_id>/reclone` |
| [pull deployment](/api/deployment#pull-deployment) | `POST /api/deployment/<deployment_id>/pull` |
| [deploy container](/api/deployment#deploy-container) | `POST /api/deployment/<deployment_id>/deploy` |
| [start container](/api/deployment#start-container) | `POST /api/deployment/<deployment_id>/start_container` |
| [stop container](/api/deployment#stop-container) | `POST /api/deployment/<deployment_id>/stop_container` |
| [remove container](/api/deployment#remove-container) | `POST /api/deployment/<deployment_id>/remove_container` |
```mdx-code-block
<Divider />
```
## list deployments
`GET /api/deployment/list`
this method will return an array of deployments with container state that the requesting user has a minimum of `Read` permissions on.
### response body
```json
[
{
deployment: Deployment,
state: DockerContainerState,
container?: {
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
}
},
...
]
```
```mdx-code-block
<Divider />
```
## get deployment
`GET /api/deployment/<deployment_id>`
this method will return the deployment with container state that
the requesting user has a minimum of `Read` permissions on.
it will return `500: Internal Server Error` if the user does not have the required permissions.
### response body
```json
{
deployment: Deployment,
state: DockerContainerState,
container?: {
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
}
}
```
```mdx-code-block
<Divider />
```
## get deployment action state
`GET /api/deployment/<deployment_id>/action_state`
this method returns the action state for the deployment, eg. whether the deployment is currently `deploying`.
### response body
```json
{
deploying: boolean,
stopping: boolean,
starting: boolean,
removing: boolean,
pulling: boolean,
recloning: boolean,
updating: boolean,
renaming: boolean,
}
```
```mdx-code-block
<Divider />
```
## get deployment container log
`GET /api/deployment/<deployment_id>/log`
this method is used to get the container's log associated with the deployment.
### query params
```json
{
tail: number // number of log lines to fetch. this is passed to the --tail flag of docker logs command
}
```
### response body
```json
{
stdout: string,
stderr: string,
}
```
```mdx-code-block
<Divider />
```
## get deployment container stats
`GET /api/deployment/<deployment_id>/stats`
this method returns the results of running `docker stats <container_name>`
for the container associated with the deployment.
### response body
```json
{
name: string,
cpu_perc: string,
mem_perc: string,
mem_usage: string,
net_io: string,
block_io: string,
pids: string,
}
```
```mdx-code-block
<Divider />
```
## get deployment deployed version
`GET /api/deployment/<deployment_id>/deployed_version`
this method is used to get the image version of the container associated with the deployment, if it exists.
otherwise, it will return the version specified in the deployment config.
### response body
```json
string // the deployed version like '0.2.4'
```
```mdx-code-block
<Divider />
```
## create deployment
`POST /api/deployment/create`
this method is used to create a new deployment on a particular server.
it will return the created deployment.
:::note
users must be **admin** or have `update` permissions on the server specified by the `server_id`
in the request body in order for this request to succeed.
:::
### request body
```json
{
name: string,
server_id: string,
}
```
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## create full deployment
`POST /api/deployment/create_full`
this method is used to create a new deployment on a particular server, already initialized with config.
it will return the created deployment
### request body
[Deployment](/api/types#deployment)
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## copy deployment
`POST /api/deployment/<deployment_id>/copy`
this method will create a copy of the deployment with a new _id and name,
with all the same configuration as the target deployment.
it can be used to move the deployment to another server.
### request body
```json
{
name: string,
server_id: string,
}
```
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## delete deployment
`DELETE /api/deployment/<deployment_id>/delete`
this method will delete the deployment. if a container is associated with the deployment, it will be destroyed.
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## update deployment
`PATCH /api/deployment/update`
### request body
[Deployment](/api/types#deployment)
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## rename deployment
`PATCH /api/deployment/<deployment_id>/rename`
### request body
```json
{
new_name: string,
}
```
```mdx-code-block
<Divider />
```
## reclone deployment
`POST /api/deployment/<deployment_id>/reclone`
if the deployment has a repo attached, this will reclone the repo,
including the on-clone and on-pull actions.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## pull deployment
`POST /api/deployment/<deployment_id>/pull`
if the deployment has a repo attached, this will `git pull` in the repo,
including the on-pull action.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## deploy container
`POST /api/deployment/<deployment_id>/deploy`
this will deploy the container corresponding to the deployments configuration.
if the container already exists, it will destroy it first.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## start container
`POST /api/deployment/<deployment_id>/start_container`
this will run `docker start <container_name>` for the container
corresponding to the deployment
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## stop container
`POST /api/deployment/<deployment_id>/stop_container`
this will run `docker stop <container_name>` for the container
corresponding to the deployment
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## remove container
`POST /api/deployment/<deployment_id>/remove_container`
this will run `docker stop <container_name> && docker container rm <container_name>`
for the container corresponding to the deployment
### response body
[Update](/api/types#update)

View File

@@ -0,0 +1,11 @@
---
slug: /api
---
this section documents the rest and websocket api
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

103
docsite/docs/api/login.mdx Normal file
View File

@@ -0,0 +1,103 @@
import Divider from '@site/src/components/Divider';
# login
monitor supports local login (username and password), Oauth2 login (github and google),
and secret login (username and API secret key).
each method must be explicitly enabled in your monitor core config,
otherwise the api won't be available.
:::note
in order to login to an Oauth2 user's account programmatically,
you must [create an api secret](/api/api-secrets#create-api-secret) and login using [/auth/secret/login](/api/login#login-using-api-secret)
:::
| name | route |
| ---- | ------ |
| [get login options](/api/login#get-login-options) | `GET /auth/options` |
| [create local user account](/api/login#create-local-user-account) | `POST /auth/local/create_user` |
| [login local user account](/api/login#login-local-user-account) | `POST /auth/local/login` |
| [login using api secret](/api/login#login-using-api-secret) | `POST /auth/secret/login` |
```mdx-code-block
<Divider />
```
## get login options
`GET /auth/options`
this method is used to obtain the login options for monitor core
### response body
```json
{
local: boolean,
github: boolean,
google: boolean,
}
```
```mdx-code-block
<Divider />
```
## create local user account
`POST /auth/local/create_user`
this method will create a new local auth account with the provided **username** and **password**,
and return a `JWT` for the user to authenticate with.
### request body
```json
{
username: string,
password: string,
}
```
### response body
`<JWT token as string>`
:::caution
a user created with this method is, by default, `disabled`. a monitor admin must enable their account before they can access the API.
:::
```mdx-code-block
<Divider />
```
## login local user account
`POST /auth/local/login`
this method will authenticate a local users credentials and return a JWT if login is successful.
### request body
```json
{
username: string,
password: string,
}
```
### response body
`<JWT token as string>`
```mdx-code-block
<Divider />
```
## login using api secret
`POST /auth/secret/login`
this method will authenticate a users account of any kind using an api secret generated using [/api/secret/create](/api/api-secrets#create-api-secret)
### request body
```json
{
username: string,
secret: string,
}
```
### response body
`<JWT token as string>`

View File

@@ -0,0 +1,90 @@
import Divider from '@site/src/components/Divider';
# permissions
these routes relate to updating user permissions
:::note
these routes can only be called by **admin** users
:::
| name | route |
| ---- | ------ |
| [update user permissions on target](/api/permissions#update-user-permissions-on-target) | `POST /api/permissions/update` |
| [modify user enabled](/api/permissions#modify-user-enabled) | `POST /api/permissions/modify_enabled` |
| [modify user create server permissions](/api/permissions#modify-user-create-server-permissions) | `POST /api/permissions/modify_create_server` |
| [modify user create build permissions](/api/permissions#modify-user-create-build-permissions) | `POST /api/permissions/modify_create_build` |
```mdx-code-block
<Divider />
```
## update user permissions on target
`POST /api/permissions/update`
### request body
```json
{
user_id: string, // the target users id
permission: "none" | "read" | "execute" | "update",
target_type: "server" | "deployment" | "build" | "procedure" | "group",
target_id: string, // the target resources id
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user enabled
`POST /api/permissions/modify_enabled`
### request body
```json
{
user_id: string, // the target users id
enabled: boolean,
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user create server permissions
`POST /api/permissions/modify_create_server`
### request body
```json
{
user_id: string, // the target users id
create_server_permissions: boolean,
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user create build permissions
`POST /api/permissions/modify_create_build`
### request body
```json
{
user_id: string, // the target users id
create_build_permissions: boolean,
}
```
### response body
[Update](/api/types#update)

View File

@@ -0,0 +1,10 @@
import Divider from '@site/src/components/Divider';
# procedure
these routes relate to interacting with monitor `procedures`
```mdx-code-block
<Divider />
```

473
docsite/docs/api/server.mdx Normal file
View File

@@ -0,0 +1,473 @@
import Divider from '@site/src/components/Divider';
# server
these routes relate to interacting with monitor `servers`
| name | route |
| ---- | ------ |
| [list servers](/api/server#list-servers) | `GET /api/server/list` |
| [get server](/api/server#get-server) | `GET /api/server/<server_id>` |
| [get server action state](/api/server#get-server-action-state) | `GET /api/server/<server_id>/action_state` |
| [get server github accounts](/api/server#get-server-github-accounts) | `GET /api/server/<server_id>/github_accounts` |
| [get server docker accounts](/api/server#get-server-docker-accounts) | `GET /api/server/<server_id>/docker_accounts` |
| [get server available secrets](/api/server#get-server-available-secrets) | `GET /api/server/<server_id>/secrets` |
| [create server](/api/server#create-server) | `POST /api/server/create` |
| [create full server](/api/server#create-full-server) | `POST /api/server/create_full` |
| [delete server](/api/server#delete-server) | `DELETE /api/server/<server_id>/delete` |
| [update server](/api/server#update-server) | `PATCH /api/server/update` |
| [get server periphery version](/api/server#get-server-periphery-version) | `GET /api/server/<server_id>/version` |
| [get server system information](/api/server#get-server-system-information) | `GET /api/server/<server_id>/system_information` |
| [get server stats](/api/server#get-server-stats) | `GET /api/server/<server_id>/stats` |
| [get server stats history](/api/server#get-server-stats-history) | `GET /api/server/<server_id>/stats/history` |
| [get server stats at time](/api/server#get-server-stats-at-time) | `GET /api/server/<server_id>/stats/at_ts` |
| [get docker networks](/api/server#get-docker-networks) | `GET /api/server/<server_id>/networks` |
| [prune docker networks](/api/server#prune-docker-networks) | `POST /api/server/<server_id>/networks/prune` |
| [get docker images](/api/server#get-docker-images) | `GET /api/server/<server_id>/images` |
| [prune docker images](/api/server#prune-docker-images) | `POST /api/server/<server_id>/images/prune` |
| [get docker containers](/api/server#get-docker-containers) | `GET /api/server/<server_id>/containers` |
| [prune docker containers](/api/server#prune-docker-containers) | `POST /api/server/<server_id>/containers/prune` |
```mdx-code-block
<Divider />
```
## list servers
`GET /api/server/list`
this method will return an array of servers with their status
that the requesting user has a minimum of `Read` permissions on.
### response body
```json
[
{
server: Server,
status: ServerStatus
},
...
]
```
```mdx-code-block
<Divider />
```
## get server
`GET /api/server/<server_id>`
this method will return the server with server status that
the requesting user has a minimum of `Read` permissions on.
it will return `500: Internal Server Error` if the user does not have the required permissions.
### response body
```json
{
server: Server,
status: ServerStatus
}
```
```mdx-code-block
<Divider />
```
## get server action state
`GET /api/server/<server_id>/action_state`
this method returns the action state for the server, eg. whether the server is currently `pruning_images`.
### response body
```json
{
pruning_networks: boolean,
pruning_containers: boolean,
pruning_images: boolean,
}
```
```mdx-code-block
<Divider />
```
## get server github accounts
`GET /api/server/<server_id>/github_accounts`
this method returns a list of all the github account usernames that are available on the server,
as defined in the server's periphery config under [github_accounts].
### response body
```json
["<github_username_1>", "<github_username_2>", ...]
```
```mdx-code-block
<Divider />
```
## get server docker accounts
`GET /api/server/<server_id>/docker_accounts`
this method returns a list of all the docker account usernames that are available on the server,
as defined in the server's periphery config under [docker_accounts].
### response body
```json
["<docker_username_1>", "<docker_username_2>", ...]
```
```mdx-code-block
<Divider />
```
## get server available secrets
`GET /api/server/<server_id>/secrets`
this method returns a list of all the secret keys that are available on the server,
as defined in the server's periphery config under [secrets].
### response body
```json
["<secret_key_1>", "<secret_key_2>", ...]
```
```mdx-code-block
<Divider />
```
## create server
`POST /api/server/create`
### request body
```json
{
name: string,
address: string, // eg. http://12.34.56.78:8000
}
```
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## create full server
`POST /api/server/create_full`
this method is used to create a new server, already initialized with config.
it will return the created server.
### request body
[Server](/api/types#server)
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## delete server
`DELETE /api/server/<server_id>/delete`
this method will delete the server, along with all deployments attached to the server.
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## update server
`PATCH /api/server/update`
this method is used to update a servers configuration.
### request body
[Server](/api/types#server)
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## get server periphery version
`GET /api/server/<server_id>/version`
this method is used to get the version of the periphery binary running on the server.
### response body
```json
string // the periphery version
```
```mdx-code-block
<Divider />
```
## get server system information
`GET /api/server/<server_id>/system_information`
this method gets some information about the host system running the periphery binary.
### response body
```json
{
name?: string, // the name of the system
os?: string, // the os the system is running
kernel?: string, // the version of the kernel
core_count?: number, // number of cores in the cpu
host_name?: string, // host name of the system
cpu_brand: string, // information on the cpu of the system
}
```
```mdx-code-block
<Divider />
```
## get server stats
`GET /api/server/<server_id>/stats`
this method retrieves current system stats of the server.
### query params
```json
cpus=boolean // optional. if true, response will include information about each core individually
disks=boolean // optional. if true, response will include breakdown of disk usage by mount point
networks=boolean // optional. if true, response will include info on network usage
components=boolean // optional. if true, response will include component tempurature
processes=boolean // optional. if true, response will include all system processes running on host and their resource usage
```
### response body
```json
{
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
refresh_ts: number,
refresh_list_ts: number,
}
```
```mdx-code-block
<Divider />
```
## get server stats history
`GET /api/server/<server_id>/stats/history`
this method will return historical system stats for the server.
the response is paginated, to get older data, specify a higher page number.
### query params
```json
interval=Timelength // optional, default interval is 1-hr. controls granularity of historical data
limit=number // optional, default is 100, max is 500. specifies the number of data points to fetch
page=number // optional, default is 0. specifies the page of data, going backward in time.
networks=boolean // optional. if true, response will include historical info on network usage
components=boolean // optional. if true, response will include historical component tempuratures
```
### response body
```json
[
{
ts: number, // unix timestamp in ms
server_id: string // specifies the server
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
},
...
]
```
```mdx-code-block
<Divider />
```
## get server stats at time
`GET /api/server/<server_id>/stats/at_ts`
this method retrieves the historical stats for a server at a specific timestamp
### query params
```json
ts=number // required. the timestamp in ms
```
### response body
```json
{
ts: number, // unix timestamp in ms
server_id: string // specifies the server
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
}
```
```mdx-code-block
<Divider />
```
## get docker networks
`GET /api/server/<server_id>/networks`
this method retrieves the docker networks on the server
### response body
```json
[
{
Name?: string,
Id?: string,
Created?: string,
Scope?: string,
Driver?: string,
EnableIPv6?: boolean,
IPAM?: {
Driver?: string,
Config?: [
{
Subnet?: string,
IPRange?: string,
Gateway?: string,
AuxiliaryAddresses?: {}
},
...
],
Options?: {}
},
Internal?: boolean,
Attachable?: boolean,
Ingress?: boolean,
Containers?: {},
Options?: {},
Labels?: {}
},
...
]
```
```mdx-code-block
<Divider />
```
## prune docker networks
`POST /api/server/<server_id>/networks/prune`
this method triggers the `network prune` action on the server, which runs
`docker network prune -f` on the target server
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## get docker images
`GET /api/server/<server_id>/images`
this method will return a list of images available locally on the server
### response body
```json
[
{
Id: string,
ParentId: string,
RepoTags: [string],
RepoDigests: [string],
Created: number,
Size: number,
SharedSize: number,
VirtualSize: number,
Labels: {},
Containers: number,
}
]
```
```mdx-code-block
<Divider />
```
## prune docker images
`POST /api/server/<server_id>/images/prune`
this method triggers the `image prune` action, which runs
`docker image prune -a -f` on the target server
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## get docker containers
`GET /api/server/<server_id>/containers`
this method is used to retrieve information about all the containers on the target server
### response body
```json
[
{
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
},
...
]
```
```mdx-code-block
<Divider />
```
## prune docker containers
`POST /api/server/<server_id>/containers/prune`
this method triggers the `container prune` action, which runs
`docker container prune -f` on the target server
### response body
[Update](/api/types#update)

283
docsite/docs/api/types.mdx Normal file
View File

@@ -0,0 +1,283 @@
import Divider from "@site/src/components/Divider";
# types
these types are used across the monitor api, defined using `typescript`. they are referenced throughout the api docs.
```mdx-code-block
<Divider />
```
## build
```typescript
interface Build {
_id?: {
$oid: string;
};
name: string;
description?: string;
permissions?: {
[user_id: string]: PermissionLevel;
};
skip_secret_interp?: boolean;
server_id?: string;
aws_config?: {
region?: string;
instance_type?: string;
ami_name?: string;
volume_gb?: number;
subnet_id?: string;
security_group_ids?: string[];
key_pair_name?: string;
assign_public_ip?: boolean;
};
version: {
major: number;
minor: number;
patch: number;
};
repo?: string;
branch?: string;
github_account?: string;
pre_build?: {
path?: string;
command?: string;
};
docker_build_args?: {
build_path: string;
dockerfile_path?: string;
build_args?: Array<{
variable: string;
value: string;
}>;
extra_args?: string[];
use_buildx?: boolean;
};
docker_account?: string;
docker_organization?: string;
last_built_at?: string;
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## deployment
```typescript
interface Deployment {
_id?: {
$oid: string;
};
name: string;
description?: string;
server_id: string;
permissions?: PermissionLevel;
skip_secret_interp?: boolean;
docker_run_args: {
image: string;
ports?: Array<{
local: string;
container: string;
}>;
volumes?: Array<{
local: string;
container: string;
}>;
environment?: Array<{
variable: string;
value: string;
}>;
network?: string;
restart?: "no" | "on-failure" | "always" | "unless-stopped";
post_image?: string;
container_user?: string;
extra_args?: string[];
docker_account?: string;
};
build_id?: string;
build_version?: {
major: number;
minor: number;
patch: number;
};
repo?: string;
branch?: string;
github_account?: string;
on_clone?: {
path?: string;
command?: string;
};
on_pull?: {
path?: string;
command?: string;
};
repo_mount?: {
local: string;
container: string;
};
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## server
```typescript
interface Server {
_id?: string;
name: string;
description?: string;
address: string;
permissions?: {
[user_id: string]: PermissionLevel;
};
enabled: boolean;
to_notify?: string[];
auto_prune?: boolean;
cpu_alert?: number;
mem_alert?: number;
disk_alert?: number;
stats_interval?: Timelength;
region?: string;
instance_id?: string;
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## update
```typescript
interface Update {
_id?: string;
target: {
type: "System" | "Build" | "Deployment" | "Server" | "Procedure" | "Group";
id?: string;
};
operation: Operation;
logs: Array<{
stage: string;
command: string;
stdout: string;
stderr: string;
success: boolean;
start_ts: string;
end_ts: string;
}>;
start_ts: string;
end_ts?: string;
status: "queued" | "in_progress" | "complete";
success: boolean;
operator: string;
version?: {
major: number;
minor: number;
patch: number;
};
}
```
```mdx-code-block
<Divider />
```
## operation
```typescript
enum Operation {
None = "none",
CreateServer = "create_server",
UpdateServer = "update_server",
DeleteServer = "delete_server",
PruneImagesServer = "prune_images_server",
PruneContainersServer = "prune_containers_server",
PruneNetworksServer = "prune_networks_server",
RenameServer = "rename_server",
CreateBuild = "create_build",
UpdateBuild = "update_build",
DeleteBuild = "delete_build",
BuildBuild = "build_build",
CreateDeployment = "create_deployment",
UpdateDeployment = "update_deployment",
DeleteDeployment = "delete_deployment",
DeployContainer = "deploy_container",
StopContainer = "stop_container",
StartContainer = "start_container",
RemoveContainer = "remove_container",
PullDeployment = "pull_deployment",
RecloneDeployment = "reclone_deployment",
RenameDeployment = "rename_deployment",
CreateProcedure = "create_procedure",
UpdateProcedure = "update_procedure",
DeleteProcedure = "delete_procedure",
CreateGroup = "create_group",
UpdateGroup = "update_group",
DeleteGroup = "delete_group",
ModifyUserEnabled = "modify_user_enabled",
ModifyUserCreateServerPermissions = "modify_user_create_server_permissions",
ModifyUserCreateBuildPermissions = "modify_user_create_build_permissions",
ModifyUserPermissions = "modify_user_permissions",
AutoBuild = "auto_build",
AutoPull = "auto_pull",
}
```
```mdx-code-block
<Divider />
```
## permission level
```typescript
enum PermissionLevel {
None = "none",
Read = "read",
Execute = "execute",
Update = "update",
}
```
```mdx-code-block
<Divider />
```
## timelength
```typescript
enum Timelength {
OneSecond = "1-sec",
FiveSeconds = "5-sec",
TenSeconds = "10-sec",
FifteenSeconds = "15-sec",
ThirtySeconds = "30-sec",
OneMinute = "1-min",
TwoMinutes = "2-min",
FiveMinutes = "5-min",
TenMinutes = "10-min",
FifteenMinutes = "15-min",
ThirtyMinutes = "30-min",
OneHour = "1-hr",
TwoHours = "2-hr",
SixHours = "6-hr",
EightHours = "8-hr",
TwelveHours = "12-hr",
OneDay = "1-day",
ThreeDay = "3-day",
OneWeek = "1-wk",
TwoWeeks = "2-wk",
ThirtyDays = "30-day",
}
```

View File

View File

@@ -0,0 +1,9 @@
# select a builder
A builder is a machine running monitor periphery and docker. Any server connected to monitor can be chosen as the builder for a build.
Building on a machine running production software is usually not a great idea, as this process can use a lot of system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Right now monitor supports AWS ec2 for this task.
### AWS builder
You can choose to build on AWS on the "builder" tab on the build's page. From here you can select preconfigured AMIs to use as a base to build the image. These must be configured in the monitor core configuration along with other information like defaults to use, AWS credentials, etc. This is explained on the [core setup page](https://github.com/mbecker20/monitor/blob/main/docs/setup.md).

View File

@@ -0,0 +1,29 @@
# configuration
monitor just needs a bit of information in order to build your image.
### repo configuration
To specify the github repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
Many repos are private, in this case a Github access token is required in the periphery.config.toml of the building server. these are specified in the config like ```username = "access_token"```. An account which has access to the repo and is available on the periphery server can be selected to use via the *github account* dropdown menu.
### docker build configuration
In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile relative to the repo, you can configure these in the *build config* section.
If the build directory is the root of the repository, you pass the build path as ```.```. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example ```build/directory```
The dockerfile's path is given relative to the build directory. So if your build directory is ```build/directory``` and the dockerfile is in ```build/directory/Dockerfile.example```, you give the dockerfile path simply as ```Dockerfile.example```.
Just as with private repos, you will need to select a docker account to use with ```docker push```.
### adding build args
The Dockerfile may make use of [build args](https://docs.docker.com/engine/reference/builder/#arg). Build args can be passed using the gui by pressing the ```edit``` button. They are passed in the menu just like in the would in a .env file:
```
BUILD_ARG1=some_value
BUILD_ARG2=some_other_value
```
Note that these values are visible in the final image using ```docker history```, so shouldn't be used to pass build time secrets. Use [secret mounts](https://docs.docker.com/engine/reference/builder/#run---mounttypesecret) for this instead.

View File

@@ -0,0 +1,15 @@
---
slug: /build-images
---
# building images
Monitor builds docker images by cloning the source repository from Github, running ```docker build```, and pushing the resulting image to docker hub. Any repo containing a ```Dockerfile``` is buildable using this method.
Build configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -0,0 +1,7 @@
# pre build command
Sometimes a command needs to be run before running ```docker build```, you can configure this in the *pre build* section.
There are two fields to pass for *pre build*. the first is *path*, which changes the working directory. To run the command in the root of the repo, just pass ```.```. The second field is *command*, this is the shell command to be executed after the repo is cloned.
For example, say your repo had a folder in it called ```scripts``` with a shell script ```on-clone.sh```. You would give *path* as ```scripts``` and command as ```sh on-clone.sh```. Or you could make *path* just ```.``` and then the command would be ```sh scripts/on-clone.sh```. Either way works fine.

View File

@@ -0,0 +1,3 @@
# versioning
Monitor uses a major.minor.patch versioning scheme. Every build will auto increment the patch number, and push the image to docker hub with the version tag as well as the ```latest``` tag.

View File

@@ -0,0 +1,7 @@
# adding the server to monitor
The easiest way to add the server is with the GUI. On the home page, click the ```+``` button to the right of the server search bar, configure the name and address of the server. The address is the full http/s url to the periphery server, eg ```http://12.34.56.78:8000```.
Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline.
Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server.

View File

@@ -0,0 +1,16 @@
---
slug: /connecting-servers
---
# connecting servers
Integrating a device into the monitor system has 2 steps:
1. Setup and start the periphery agent on the server
2. Adding the server to monitor via the core API
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -0,0 +1,43 @@
# setup monitor periphery
The easiest way to do this is to follow the [monitor guide](https://github.com/mbecker20/monitor-guide). This is a repo containing directions and scripts enabling command line installation via ssh or remotely.
### manual install steps
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases).
2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The monitor cli can be used to add the boilerplate: ```monitor periphery gen-config --path /path/to/config.toml```. The files can be anywhere, and can be passed to periphery via the ```--config-path``` argument.
3. Ensure that inbound connectivity is allowed on the port specified in periphery.config.toml (default 8000).
4. Install docker. Make sure whatever user periphery is run as has access to the docker group without sudo.
5. Start the periphery binary with your preferred process manager, like systemd. The config read from the file is printed on startup, ensure that it is as expected.
### example periphery start command
```
periphery \
--config-path /path/to/periphery.config.base.toml \
--config-path /other_path/to/overide-periphery-config-directory \
--config-keyword periphery \
--config-keyword config \
--merge-nested-config \
--home_dir /home/username
```
### passing config files
Either file paths or directory paths can be passed to ```--config-path```.
When using directories, the file entries can be filtered by name with the ```--config-keyword``` argument, which can be passed multiple times to add more keywords. If passed, then only config files with file names that contain all keywords will be merged.
When passing multiple config files, later --config-path given in the command will always overide previous ones. Directory config files are merged in alphabetical order by name, so ```config_b.toml``` will overide ```config_a.toml```.
There are two ways to merge config files. The default behavior is to completely replace any base fields with whatever fields are present in the overide config. So if you pass ```allowed_ips = []``` in your overide config, the final allowed_ips will be an empty list as well.
```--merge-nested-config``` will merge config fields recursively and extend config array fields.
For example, with ```--merge-nested-config``` you can specify an allowed ip in the base config, and another in the overide config, they will both be present in the final config.
Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config.

View File

@@ -0,0 +1,40 @@
# core setup
setting up monitor core is fairly simple. there are some requirements to run monitor core:
- a valid configuration file
- an instance of MongoDB to which monitor core can connect
- docker must be installed on the host
## 1. create the configuration file
create a configuration file on the system, for example at `~/.monitor/core.config.toml`, and copy the [example config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). fill in all the necessary information before continuing.
:::note
to enable OAuth2 login, you must create a client on the respective OAuth provider,
for example [google](https://developers.google.com/identity/protocols/oauth2)
or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps).
monitor uses the `web application` login flow.
the redirect uri is `<base_url>/auth/google/callback` for google and `<base_url>/auth/github/callback` for github.
:::
## 2. start monitor core
monitor core is distributed via dockerhub under the public repo [mbecker2020/monitor_core](https://hub.docker.com/r/mbecker2020/monitor_core).
```sh
docker run -d --name monitor-core \
-v $HOME/.monitor/core.config.toml:/config/config.toml \
-p 9000:9000 \
mbecker2020/monitor_core
```
## first login
monitor core should now be accessible on the specified port, so navigating to `http://<address>:<port>` will display the login page.
the first user to log in will be auto enabled and made admin. any additional users to create accounts will be disabled by default.
## https
monitor core itself only supports http, so a reverse proxy like [caddy](https://caddyserver.com/) should be used for https

View File

@@ -0,0 +1,83 @@
# configuration
## choose the docker image
There are two options to configure the docker image to deploy.
### attaching a monitor build
If the software you want to deploy is built by monitor, you can attach the build directly to the deployment.
By default, monitor will deploy the latest available version of the build, or you can specify a specific version using the version dropdown.
Also by default, monitor will use the same docker account that is attached to the build in order to pull the image on the periphery server. If that account is not available on the server, you can specify another available account to use instead, this account just needs to have read access to the docker repository.
### using a custom image
You can also manually specify an image name, like ```mongo``` or ```mbecker2020/random_image:0.1.1```.
If the image repository is private, you can select an available docker account to use to pull the image.
## configuring the network
One feature of docker is that it allows for the creation of [virtual networks between containers](https://docs.docker.com/network/). Monitor allows you to specify a docker virtual network to connect the container to, or to use the host system networking to bypass the docker virtual network.
The default selection is ```host```, which bypasses the docker virtual network layer.
If you do select select a network other than host, you can specify port bindings with the GUI. For example, if you are running mongo (which defaults to port 27017), you could use the mapping:
```
27018 : 27017
```
In this case, you would access mongo from outside of the container on port ```27018```.
Note that this is not the only affect of using a network other than ```host```. For example, containers running on different networks can not communicate, and ones on the same network can not reach other containers on ```localhost``` even when they are running on the same system. This behavior can be a bit confusing if you are not familiar with it, and it can be bypassed entirely by just using ```host``` network.
## configuring restart behavior
Docker, like systemd, has a couple options for handling when a container exits. See [docker restart policies](https://docs.docker.com/config/containers/start-containers-automatically/). Monitor allows you to select the appropriate restart behavior from these options.
## configuring environment variables
Monitor enables you to easily manage environment variables passed to the container. In the GUI, click the 'edit' button on the 'environment' card, this will bring up the environment menu.
You pass environment variables just as you would with a ```.env``` file:
```
ENV_VAR_1=some_value
ENV_VAR_2=some_other_value
```
## configuring volumes
A docker container's filesystem is segregated from that of the host. However, it is still possible for a container to access system files and directories, this is accomplished by using [bind mounts](https://docs.docker.com/storage/bind-mounts/).
Say your container needs to read a config file located on the system at ```/home/ubuntu/config.toml```. You can specify the bind mount to be:
```
/home/ubuntu/config.toml : /config/config.toml
```
The first path is the one on the system, the second is the path in the container. Your application would then read the file at ```/config/config.toml``` in order to load its contents.
These can be configured easily with the GUI in the 'volumes' card. You can configure as many bind mounts as you need.
## extra args
Not all features of docker are mapped directly by monitor, only the most common. You can still specify any custom flags for monitor to include in the ```docker run``` command by utilizing 'extra args'. For example, you can enable log rotation using these two extra args:
```
--log-opt max-size=10M
```
```
--log-opt max-file=3
```
## post image
Sometimes you need to specify some flags to be passed directly to the application. What is put here is inserted into the docker run command after the image. For example, to pass the ```--quiet``` flag to MongoDB, the docker run command would be:
```
docker run -d --name mongo-db mongo:6.0.3 --quiet
```
In order to achieve this with monitor, just pass ```--quiet``` to 'post image'.

View File

@@ -0,0 +1,11 @@
# deploy containers
Monitor can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a ```docker run``` command, which is then run on the target system. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well.
Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -0,0 +1,15 @@
# container management
the lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. monitor will display the state of the container and provides an API to manage all your container's lifetimes.
this is achieved internally by running the appropriate docker command for the requested action (docker stop, docker start, etc).
### stopping a container
sometimes you want to stop a running application but preserve its logs and configuration, either to be restarted later or to view the logs at a later time. It is more like *pausing* the application with its current config, as no configuration (like environment variable, volume mounts, etc.) will be changed when the container is started again.
note that in order to restart an application with updated configuration, it must be *redeployed*. stopping and starting a container again will keep all configuration as it was when the container was initially created.
### container redeploy
redeploying is the action of destroying a container and recreating it. If you update deployment config, these changes will not take effect until the container is redeployed. Just note this will destroy the previous containers logs along with the container itself.

View File

@@ -0,0 +1,31 @@
# file paths
when working with monitor, you might have to configure file or directory paths.
## relative paths
Where possible, it is better to use relative file paths. Using relative file paths removes the connection between the process being run and the particular server it runs on, making it easier to move things between servers.
Where you see relative paths:
- setting the build directory and path of the Dockerfile
- setting a pre build command path
- configuring a frontend mount (used for web apps)
For all of the above, the path can be given relative to the root of the configured repo
The one exception is the Dockerfile path, which is given relative to the build directory (This is done by Docker itself, and this pattern matches usage of the Docker CLI).
There are 3 kinds of paths to pass:
1. to specify the root of the repo, use ```.``` as the path
2. to specify a folder in the repo, pass it with **no** preceding ```/```. For example, ```example_folder``` or ```folder1/folder2```
3. to specify an absolute path on the servers filesystem, use a preceding slash, eg. ```/home/ubuntu/example```. This way should only be used if absolutely necessary, like when passing host paths when configuring docker volumes.
### implementation
relative file paths are joined with the path of the repo on the system using a Rust [PathBuf](https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push).
## Docker Volume Paths
These are passed directly to the Docker CLI using ```--volume /path/on/system:/path/in/container```. So for these, the same rules apply as when using Docker on the command line. Paths here should be given as absolute, don't use ```~``` or even ```$HOME```.

44
docsite/docs/intro.md Normal file
View File

@@ -0,0 +1,44 @@
---
slug: /intro
---
# what is monitor?
If you have many servers running many applications, it can be a challenge to keep things organized and easily accessible. Without structure, things can become messy quickly, which means operational issues are more likely to arise and they can take longer to resolve. Ultimately these issues hinder productivity and waste valuable time. Monitor is a web app to provide this structure for how applications are built, deployed, and managed across many servers.
## docker
Monitor is opinionated by design, and [docker](https://docs.docker.com/) is the tool of choice. Docker provides the ability to package applications and their runtime dependencies into a standalone bundle, called an *image*. This makes them easy to "ship" to any server and run without the hassle of setting up the runtime environment. Docker uses the image as a sort of template to create *containers*. Containers are kind of like virtual machines but with different performance characteristics, namely that processes contained still run natively on the system kernel. The file system is seperate though, and like virtual machines, they can be created, started, stopped, and destroyed.
## monitor
Monitor is a solution for handling for the following:
1. Build application source into auto-versioned images.
2. Create, start, stop, and restart Docker containers, and view their status and logs.
3. Keep a record of all the actions that are performed and by whom.
4. View realtime and historical system resource usage.
5. Alerting for server health, like high cpu, memory, disk, etc.
## architecture and components
Monitor is composed of a single core and any amount of connected servers running the periphery application.
### monitor core
The core is a web server that hosts the core API and serves the frontend to be accessed in a web browser. All user interaction with the connected servers flow through the core. It is the stateful part of the system, with the application state stored on an instance of MongoDB.
### monitor periphery
The periphery is a stateless web server that exposes API called by the core. The core calls this API to get system usage and container status / logs, clone git repos, and perform docker actions. It is only intended to be reached from the core, and has an address whitelist to limit the IPs allowed to call this API.
### monitor cli
This is a simple standalone cli that helps perform some actions required to setup monitor core and periphery, like generating config files.
## core API
Monitor exposes powerful functionality over the core's REST API, enabling infrastructure engineers to manage deployments programmatically in addition to with the GUI. There is a [rust crate](https://crates.io/crates/monitor_client) to simplify programmatic interaction with the API, but in general this can be accomplished using any programming language that can make REST requests.
## permissioning
Monitor is a system designed to be used by many users, whether they are developers, operations personnel, or administrators. The ability to affect an applications state is very powerful, so monitor has a granular permissioning system to only provide this functionality to the intended users. The permissioning system is explained in detail in the [permissioning](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md) section.
User sign-on is possible using username / password, or with Oauth (Github and Google). Allowed login methods can be configured from the [core config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml).

View File

@@ -0,0 +1,28 @@
# permissioning resources
All monitor resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions.
## permission levels
There are 4 levels of permissions a user can have on a resource:
1. **None**. This is the lowest permission level, and means the user will not have any access to this resource. They will not see it in the GUI, and it will not show up if the user queries the core API directly. All attempts to view or update the resource will be blocked.
2. **Read**. This is the first permission level that grants any access. It will enable the user to see the resource in the GUI, read the configuration, and see any logs. Any attempts to update configuration or trigger any action will be blocked.
3. **Execute**. This level will allow the user to execute actions on the resource, like send a build command or trigger a redeploy. The user will still be blocked from updating configuration on the resource.
4. **Update**. The user has full access to the resource, they can execute any actions, update the configuration, and delete the resource.
## Administration
Users can be given admin priviledges by accessing the monitor MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all monitor resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions.
Monitor admins are responsible for managing user accounts as well. When a user logs into monitor for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API.
Users also have some configurable global permissions, these are:
- create server permission
- create build permission
Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively.

View File

@@ -0,0 +1,99 @@
// @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
/** @type {import('@docusaurus/types').Config} */
const config = {
title: "monitor",
tagline: "distributed build and deployment system",
favicon: "img/favicon.ico",
// Set the production url of your site here
url: "https://mbecker20.github.io",
// Set the /<baseUrl>/ pathname under which your site is served
// For GitHub pages deployment, it is often '/<projectName>/'
baseUrl: "/monitor/",
// baseUrl: "/",
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: "mbecker20", // Usually your GitHub org/user name.
projectName: "monitor", // Usually your repo name.
trailingSlash: false,
deploymentBranch: "gh-pages-docs",
onBrokenLinks: "throw",
onBrokenMarkdownLinks: "warn",
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
// to replace "en" with "zh-Hans".
i18n: {
defaultLocale: "en",
locales: ["en"],
},
presets: [
[
"classic",
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
routeBasePath: "/",
sidebarPath: require.resolve("./sidebars.js"),
// Please change this to your repo.
// Remove this to remove the "edit this page" links.
editUrl: "https://github.com/mbecker20/monitor/tree/main/docsite",
},
theme: {
customCss: require.resolve("./src/css/custom.css"),
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
// Replace with your project's social card
image: "img/monitor-lizard.png",
docs: {
sidebar: {
autoCollapseCategories: true,
}
},
navbar: {
title: "monitor",
logo: {
alt: "monitor lizard",
src: "img/monitor-lizard.png",
},
items: [
{
type: "docSidebar",
sidebarId: "docs",
position: "left",
label: "docs",
},
{
href: "https://github.com/mbecker20/monitor",
label: "GitHub",
position: "right",
},
],
},
footer: {
style: "dark",
copyright: `Built with Docusaurus`,
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
},
}),
};
module.exports = config;

38
docsite/package.json Normal file
View File

@@ -0,0 +1,38 @@
{
"name": "docsite",
"version": "0.0.0",
"private": true,
"scripts": {
"start": "docusaurus start",
"deploy": "GIT_USER=mbecker20 docusaurus deploy"
},
"dependencies": {
"@docusaurus/core": "2.4.0",
"@docusaurus/preset-classic": "2.4.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "2.4.0",
"@tsconfig/docusaurus": "^1.0.5",
"typescript": "^4.7.4"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"engines": {
"node": ">=16.14"
}
}

87
docsite/sidebars.js Normal file
View File

@@ -0,0 +1,87 @@
/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// By default, Docusaurus generates a sidebar from the docs folder structure
// docsSidebar: [{type: 'autogenerated', dirName: '.'}],
// But you can create a sidebar manually
docs: [
"intro",
"core-setup",
{
type: "category",
label: "connecting servers",
link: {
type: "doc",
id: "connecting-servers/index",
},
items: [
"connecting-servers/setup-periphery",
"connecting-servers/add-server",
],
},
{
type: "category",
label: "build images",
link: {
type: "doc",
id: "build-images/index",
},
items: [
"build-images/configuration",
"build-images/pre-build",
"build-images/choosing-builder",
"build-images/versioning",
],
},
{
type: "category",
label: "deploy containers",
link: {
type: "doc",
id: "deploy-containers/index",
},
items: [
"deploy-containers/configuration",
"deploy-containers/lifetime-management",
// "deploy-containers/choosing-builder",
// "deploy-containers/versioning",
],
},
"permissioning",
"file-paths",
{
type: "category",
label: "API",
link: {
type: "doc",
id: "api/index",
},
items: [
"api/types",
"api/authenticating-requests",
"api/login",
"api/api-secrets",
"api/build",
"api/deployment",
"api/server",
"api/permissions",
"api/websocket",
],
},
],
};
module.exports = sidebars;

View File

@@ -0,0 +1,15 @@
import React from "react";
export default function Divider() {
return (
<div
style={{
opacity: 0.7,
backgroundColor: "rgb(175, 175, 175)",
height: "3px",
width: "100%",
margin: "75px 0px"
}}
/>
);
}

View File

@@ -0,0 +1,67 @@
import React from 'react';
import clsx from 'clsx';
import styles from './styles.module.css';
type FeatureItem = {
title: string;
// Svg: React.ComponentType<React.ComponentProps<'svg'>>;
description: JSX.Element;
};
const FeatureList: FeatureItem[] = [
{
title: 'automated builds 🛠️',
// Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
description: (
<>
build auto versioned docker images from github repos, trigger builds on git push
</>
),
},
{
title: 'deploy docker containers 🚀',
// Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
description: (
<>
deploy your builds (or any docker image), see uptime and logs across all your servers
</>
),
},
{
title: 'powered by Rust 🦀',
// Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
description: (
<>
The core API and periphery client are written in Rust
</>
),
},
];
function Feature({ title, description }: FeatureItem) {
return (
<div className={clsx('col col--4')}>
{/* <div className="text--center">
<Svg className={styles.featureSvg} role="img" />
</div> */}
<div className="text--center padding-horiz--md">
<h3>{title}</h3>
<p>{description}</p>
</div>
</div>
);
}
export default function HomepageFeatures(): JSX.Element {
return (
<section className={styles.features}>
<div className="container">
<div className="row">
{FeatureList.map((props, idx) => (
<Feature key={idx} {...props} />
))}
</div>
</div>
</section>
);
}

View File

@@ -0,0 +1,11 @@
.features {
display: flex;
align-items: center;
padding: 4rem 0;
width: 100%;
}
.featureSvg {
height: 200px;
width: 200px;
}

View File

@@ -0,0 +1,11 @@
import React from "react";
export default function MonitorLogo({ width = "4rem" }) {
return (
<img
style={{ width, height: "auto", opacity: 0.7 }}
src="img/monitor-lizard.png"
alt="monitor-lizard"
/>
);
}

View File

@@ -0,0 +1,13 @@
import React from "react";
export default function SummaryImg() {
return (
<div style={{ display: "flex", justifyContent: "center" }}>
<img
style={{ marginBottom: "4rem", width: "1000px" }}
src="img/monitor-summary.png"
alt="monitor-summary"
/>
</div>
);
}

View File

@@ -0,0 +1,30 @@
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}

View File

@@ -0,0 +1,24 @@
/**
* CSS files with the .module.css suffix will be treated as CSS modules
* and scoped locally.
*/
.heroBanner {
padding: 4rem 0;
text-align: center;
position: relative;
overflow: hidden;
}
@media screen and (max-width: 996px) {
.heroBanner {
padding: 2rem;
}
}
.buttons {
display: grid;
gap: 1rem;
grid-template-columns: 1fr 1fr;
width: fit-content;
}

View File

@@ -0,0 +1,76 @@
import React from 'react';
import clsx from 'clsx';
import Link from '@docusaurus/Link';
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
import Layout from '@theme/Layout';
import HomepageFeatures from '@site/src/components/HomepageFeatures';
import styles from './index.module.css';
import SummaryImg from '../components/SummaryImg';
import MonitorLogo from '../components/MonitorLogo';
function HomepageHeader() {
const {siteConfig} = useDocusaurusContext();
return (
<header className={clsx("hero hero--primary", styles.heroBanner)}>
<div className="container">
<div style={{ display: "flex", gap: "1rem", justifyContent: "center" }}>
<div style={{ position: "relative" }}>
<MonitorLogo width="600px" />
<h1
className="hero__title"
style={{
margin: 0,
position: "absolute",
top: "40%",
left: "50%",
transform: "translate(-50%, -50%)",
}}
>
monitor
</h1>
</div>
</div>
<p className="hero__subtitle">{siteConfig.tagline}</p>
<div style={{ display: "flex", justifyContent: "center" }}>
<div className={styles.buttons}>
<Link className="button button--secondary button--lg" to="/intro">
docs
</Link>
<Link
className="button button--secondary button--lg"
to="https://github.com/mbecker20/monitor"
>
github
</Link>
<Link
className="button button--secondary button--lg"
to="https://github.com/mbecker20/monitor#readme"
style={{
width: "100%",
boxSizing: "border-box",
gridColumn: "span 2",
}}
>
screenshots
</Link>
</div>
</div>
</div>
</header>
);
}
export default function Home(): JSX.Element {
const {siteConfig} = useDocusaurusContext();
return (
<Layout title="monitor docs" description={siteConfig.tagline}>
{/* <SummaryImg /> */}
<HomepageHeader />
<main>
<HomepageFeatures />
{/* <SummaryImg /> */}
</main>
</Layout>
);
}

0
docsite/static/.nojekyll Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

7
docsite/tsconfig.json Normal file
View File

@@ -0,0 +1,7 @@
{
// This file is not used in compilation. It is here just for a nice editor experience.
"extends": "@tsconfig/docusaurus/tsconfig.json",
"compilerOptions": {
"baseUrl": "."
}
}

7617
docsite/yarn.lock Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,7 @@
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>
<div id="root" class="app"></div>
<div id="root" class="app-bounder"></div>
<script src="/src/index.tsx" type="module"></script>
</body>

View File

@@ -11,6 +11,7 @@
},
"license": "GPL v3.0",
"devDependencies": {
"@types/sanitize-html": "^2.9.0",
"sass": "^1.57.1",
"typescript": "^4.9.4",
"vite": "^4.0.3",
@@ -18,10 +19,13 @@
},
"dependencies": {
"@solidjs/router": "^0.6.0",
"@tanstack/solid-query": "^4.26.0",
"ansi-to-html": "^0.7.2",
"axios": "^1.2.1",
"js-file-download": "^0.4.12",
"lightweight-charts": "^3.8.0",
"reconnecting-websocket": "^4.4.0",
"sanitize-html": "^2.10.0",
"solid-js": "^1.6.6"
}
}

View File

@@ -0,0 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 18.1.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Rounded_Rectangle_2_1_" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
x="0px" y="0px" viewBox="0 0 20 20" enable-background="new 0 0 20 20" xml:space="preserve">
<g id="Rounded_Rectangle_2">
<g>
<path fill-rule="evenodd" clip-rule="evenodd" fill="#fceade" d="M15,4H1C0.45,4,0,4.45,0,5v14c0,0.55,0.45,1,1,1h14c0.55,0,1-0.45,1-1V5
C16,4.45,15.55,4,15,4z M14,18H2V6h12V18z M19,0H5C4.45,0,4,0.45,4,1v2h2V2h12v12h-1v2h2c0.55,0,1-0.45,1-1V1
C20,0.45,19.55,0,19,0z"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 679 B

View File

@@ -8,16 +8,19 @@ const Deployment = lazy(() => import("./components/deployment/Deployment"));
const Server = lazy(() => import("./components/server/Server"));
const Build = lazy(() => import("./components/build/Build"));
const Users = lazy(() => import("./components/users/Users"));
const User = lazy(() => import("./components/users/User"));
const Stats = lazy(() => import("./components/stats/Stats"));
const Account = lazy(() => import("./components/Account"));
const Account = lazy(() => import("./components/account/Account"));
const Updates = lazy(() => import("./components/Updates"));
const App: Component = () => {
const { user } = useUser();
return (
<>
<div class="app">
<Topbar />
<Routes>
<Route path="/" component={Home} />
<Route path="/updates" component={Updates} />
<Route path="/build/:id" component={Build} />
<Route path="/deployment/:id" component={Deployment} />
<Route path="/server/:id" component={Server} />
@@ -25,9 +28,10 @@ const App: Component = () => {
<Route path="/account" component={Account} />
<Show when={user().admin}>
<Route path="/users" component={Users} />
<Route path="/user/:id" component={User} />
</Show>
</Routes>
</>
</div>
);
};

View File

@@ -0,0 +1,115 @@
import { useNavigate } from "@solidjs/router";
import { Component, createSignal, Show } from "solid-js";
import { client, pushNotification } from "..";
import { useAppState } from "../state/StateProvider";
import { Build, Deployment } from "../types";
import { getId } from "../util/helpers";
import { useToggle } from "../util/hooks";
import ConfirmButton from "./shared/ConfirmButton";
import Icon from "./shared/Icon";
import Input from "./shared/Input";
import Flex from "./shared/layout/Flex";
import Grid from "./shared/layout/Grid";
import CenterMenu from "./shared/menu/CenterMenu";
import HoverMenu from "./shared/menu/HoverMenu";
import Selector from "./shared/menu/Selector";
const CopyMenu: Component<{
type: "deployment" | "build";
id: string;
}> = (p) => {
const navigate = useNavigate();
const [show, toggleShow] = useToggle();
const [newName, setNewName] = createSignal("");
const { builds, deployments, servers } = useAppState();
const curr_server = () => {
if (p.type === "build") {
return builds.get(p.id)!.server_id;
} else {
return deployments.get(p.id)!.deployment.server_id;
}
}
const [selectedId, setSelected] = createSignal(curr_server());
const name = () => {
if (p.type === "build") {
return builds.get(p.id)?.name;
} else if (p.type === "deployment") {
return deployments.get(p.id)?.deployment.name;
}
};
const copy = () => {
if (newName().length !== 0) {
let promise: Promise<Build | Deployment>;
if (p.type === "build") {
promise = client.copy_build(p.id, {
name: newName(),
});
} else {
promise = client.copy_deployment(p.id, {
name: newName(),
server_id: selectedId()!,
});
}
toggleShow();
promise.then((val) => {
navigate(`/${p.type}/${getId(val)}`);
});
} else {
pushNotification("bad", "copy name cannot be empty");
}
};
return (
<HoverMenu
target={
<CenterMenu
show={show}
toggleShow={toggleShow}
title={`copy ${p.type} | ${name()}`}
target={<Icon type="duplicate" />}
targetClass="blue"
content={() => (
<Grid placeItems="center">
<Flex class="full-width" alignItems="center">
<Input
placeholder="copy name"
class="card dark full-width"
style={{ padding: "0.5rem" }}
value={newName()}
onEdit={setNewName}
/>
<Show when={p.type === "deployment"}>
<Selector
label="target: "
selected={selectedId()!}
items={servers.ids()!}
onSelect={setSelected}
itemMap={(id) => servers.get(id)!.server.name}
targetClass="blue"
targetStyle={{ display: "flex", gap: "0.5rem" }}
searchStyle={{ width: "100%" }}
menuClass="scroller"
menuStyle={{ "max-height": "40vh" }}
position="bottom right"
useSearch
/>
</Show>
</Flex>
<ConfirmButton
class="green"
style={{ width: "100%" }}
onConfirm={copy}
>
copy {p.type}
</ConfirmButton>
</Grid>
)}
position="center"
/>
}
content={`copy ${p.type}`}
position="bottom center"
/>
);
};
export default CopyMenu;

View File

@@ -0,0 +1,126 @@
import { Component, createSignal, onMount, Show } from "solid-js";
import { client, pushNotification } from "..";
import { useAppState } from "../state/StateProvider";
import { UpdateTarget } from "../types";
import { useToggle } from "../util/hooks";
import Grid from "./shared/layout/Grid";
import Loading from "./shared/loading/Loading";
import CenterMenu from "./shared/menu/CenterMenu";
import TextArea from "./shared/TextArea";
const Description: Component<{
name: string;
target: UpdateTarget;
description?: string;
userCanUpdate: boolean;
}> = (p) => {
const [show, toggleShow] = useToggle();
const description = () => {
if (p.description) {
let [description] = p.description.split("\n");
return description;
} else {
return "add a description";
}
};
const [width, setWidth] = createSignal<number>();
onMount(() => {
setWidth(ref!?.clientWidth);
});
let ref: HTMLDivElement;
return (
<CenterMenu
show={show}
toggleShow={toggleShow}
title={`description | ${p.name}`}
targetClass="card grey"
targetStyle={{ width: "100%", "justify-content": "flex-start" }}
target={
<div
ref={ref! as any}
class="ellipsis"
style={{
opacity: 0.7,
width: width() ? `${width()}px` : "100%",
"box-sizing": "border-box",
"text-align": "left"
}}
>
{width() ? description() : ""}
</div>
}
content={() => (
<DescriptionMenu
target={p.target}
description={p.description}
userCanUpdate={p.userCanUpdate}
toggleShow={toggleShow}
/>
)}
/>
);
};
const DescriptionMenu: Component<{
target: UpdateTarget;
description?: string;
userCanUpdate: boolean;
toggleShow: () => void;
}> = (p) => {
const { builds, servers, deployments } = useAppState();
let ref: HTMLTextAreaElement;
onMount(() => {
ref?.focus();
});
const [desc, setDesc] = createSignal(p.description);
const [loading, setLoading] = createSignal(false);
const update_description = () => {
if (!p.userCanUpdate) return;
setLoading(true);
client
.update_description({ target: p.target, description: desc() || "" })
.then(() => {
if (p.target.type === "Build") {
builds.update({ ...builds.get(p.target.id)!, description: desc() });
} else if (p.target.type === "Deployment") {
const deployment = deployments.get(p.target.id)!;
deployments.update({
...deployment,
deployment: { ...deployment.deployment, description: desc() },
});
} else if (p.target.type === "Server") {
const server = servers.get(p.target.id)!;
servers.update({
...server,
server: { ...server.server, description: desc() },
});
}
p.toggleShow();
})
.catch(() => {
pushNotification("bad", "failed to update description");
p.toggleShow();
});
};
return (
<Grid placeItems="center">
<TextArea
ref={ref! as any}
placeholder="add a description"
value={desc()}
onEdit={setDesc}
style={{ width: "900px", "max-width": "90vw", height: "70vh", padding: "1rem" }}
disabled={!p.userCanUpdate}
/>
<Show when={p.userCanUpdate}>
<Show when={!loading()} fallback={<Loading />}>
<button class="green" onClick={update_description}>
update
</button>
</Show>
</Show>
</Grid>
);
};
export default Description;

View File

@@ -14,7 +14,11 @@ export const NewGroup: Component<{}> = (p) => {
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<button
class="green"
onClick={toggleShowNew}
style={{ height: "100%" }}
>
<Icon type="plus" />
</button>
}
@@ -33,8 +37,12 @@ export const NewDeployment: Component<{ serverID: string }> = (p) => {
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<Icon type="plus" />
<button
class="green"
onClick={toggleShowNew}
style={{ width: "100%", height: "fit-content" }}
>
<Icon type="plus" width="1.2rem" />
</button>
}
>
@@ -47,17 +55,21 @@ export const NewDeployment: Component<{ serverID: string }> = (p) => {
);
};
export const NewBuild: Component<{ serverID: string }> = (p) => {
export const NewBuild: Component<{}> = (p) => {
const [showNew, toggleShowNew] = useToggle();
const create = (name: string) => {
client.create_build({ name, server_id: p.serverID });
client.create_build({ name });
};
return (
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<Icon type="plus" />
<button
class="green"
onClick={toggleShowNew}
style={{ width: "100%", height: "fit-content" }}
>
<Icon type="plus" width="1.2rem" />
</button>
}
>
@@ -87,25 +99,19 @@ const New: Component<{
}
};
return (
<Flex justifyContent="space-between">
<Flex justifyContent="space-between" style={{ height: "fit-content", width: "100%" }}>
<Input
ref={inputRef}
placeholder={p.placeholder}
value={name()}
onEdit={setName}
onEnter={create}
style={{ width: "20rem" }}
style={{ width: "100%", "min-width": "20rem" }}
/>
<Flex gap="0.4rem">
<button class="green" onClick={create}>
create
</button>
{/* <ConfirmButton
class="green"
onConfirm={create}
>
create
</ConfirmButton> */}
<button class="red" onClick={p.close}>
<Icon type="cross" />
</button>

View File

@@ -1,19 +1,24 @@
import { Component } from "solid-js";
import { Component, Show } from "solid-js";
import Grid from "./shared/layout/Grid";
import Loading from "./shared/loading/Loading";
const NotFound: Component<{ type: "deployment" | "server" | "build" }> = (p) => {
return (
<Grid
placeItems="center"
style={{ height: "100%", width: "100%" }}
>
<Grid placeItems="center" style={{ width: "fit-content", height: "fit-content" }}>
<h2>{p.type} at id not found</h2>
const NotFound: Component<{
type: "deployment" | "server" | "build";
loaded: boolean;
}> = (p) => {
return (
<Grid placeItems="center" style={{ height: "100%", width: "100%" }}>
<Grid
placeItems="center"
style={{ width: "fit-content", height: "fit-content" }}
>
<Show when={p.loaded} fallback={<h2>loading {p.type}...</h2>}>
<h2>{p.type} at id not found</h2>
</Show>
<Loading type="sonar" />
</Grid>
</Grid>
);
}
};
export default NotFound;
export default NotFound;

View File

@@ -0,0 +1,176 @@
import { A } from "@solidjs/router";
import {
Component,
createEffect,
createMemo,
createSignal,
For,
Show,
} from "solid-js";
import { OPERATIONS } from "..";
import { useAppDimensions } from "../state/DimensionProvider";
import { useAppState } from "../state/StateProvider";
import { Operation, Update as UpdateType, UpdateStatus } from "../types";
import {
getId,
readableMonitorTimestamp,
readableVersion,
} from "../util/helpers";
import Icon from "./shared/Icon";
import Input from "./shared/Input";
import Flex from "./shared/layout/Flex";
import Grid from "./shared/layout/Grid";
import Loading from "./shared/loading/Loading";
import Selector from "./shared/menu/Selector";
import UpdateMenu from "./update/UpdateMenu";
const Updates: Component<{}> = (p) => {
const { isMobile } = useAppDimensions();
const { updates, usernames, name_from_update_target } = useAppState();
const [operation, setOperation] = createSignal<Operation>();
createEffect(() => {
if (operation()) {
updates.load([operation()!]);
} else {
updates.load();
}
});
const [search, setSearch] = createSignal("");
const filtered_updates = createMemo(() => {
return updates.collection()?.filter((u) => {
const name = name_from_update_target(u.target);
if (name.includes(search())) return true;
const username = usernames.get(u.operator);
if (username?.includes(search())) return true;
});
});
const [openMenu, setOpenMenu] = createSignal<string | undefined>(undefined);
return (
<Grid class="full-width card shadow">
<Flex alignItems="center" justifyContent="space-between">
<Flex>
<h1>updates</h1>
<UpdateMenu
update={openMenu() ? updates.get(openMenu()!) : undefined}
closeMenu={() => setOpenMenu(undefined)}
/>
</Flex>
<Flex alignItems="center">
<Input class="lightgrey" placeholder="search" onEdit={setSearch} />
<Selector
label={isMobile() ? undefined : "operation: "}
selected={operation() ? operation()! : "all"}
items={["all", ...OPERATIONS]}
onSelect={(o) =>
o === "all"
? setOperation(undefined)
: setOperation(o.replaceAll(" ", "_") as Operation)
}
targetClass="blue"
position="bottom right"
searchStyle={{ width: "15rem" }}
menuClass="scroller"
menuStyle={{ "max-height": "50vh" }}
useSearch
/>
</Flex>
</Flex>
<Show
when={updates.loaded()}
fallback={
<Flex justifyContent="center">
<Loading type="three-dot" />
</Flex>
}
>
<For each={filtered_updates()}>
{(update) => (
<Update
update={update}
openMenu={() => setOpenMenu(getId(update))}
/>
)}
</For>
<Show when={!updates.noMore()}>
<button
class="grey full-width"
onClick={() =>
operation()
? updates.loadMore([operation()!])
: updates.loadMore()
}
>
load more
</button>
</Show>
</Show>
</Grid>
);
};
export default Updates;
const Update: Component<{ update: UpdateType; openMenu: () => void }> = (p) => {
const { isMobile } = useAppDimensions();
const { usernames, name_from_update_target } = useAppState();
const name = () => name_from_update_target(p.update.target);
const operation = () => {
if (p.update.operation === Operation.BuildBuild) {
return `build ${readableVersion(p.update.version!)}`;
}
return `${p.update.operation.replaceAll("_", " ")}${
p.update.version ? " " + readableVersion(p.update.version) : ""
}`;
};
const link_to = () => {
return p.update.target.type === "System"
? "/"
: `/${p.update.target.type.toLowerCase()}/${p.update.target.id}`;
};
return (
<Flex
class="card light hover shadow wrap pointer"
justifyContent="space-between"
alignItems="center"
onClick={p.openMenu}
>
<Flex
alignItems="center"
justifyContent="space-between"
style={{ width: isMobile() ? "100%" : undefined }}
>
<A style={{ padding: 0 }} href={link_to()}>
<h2 class="text-hover">{name()}</h2>
</A>
<div
style={{
color: !p.update.success ? "rgb(182, 47, 52)" : "inherit",
}}
>
{operation()}
</div>
<Show when={p.update.status === UpdateStatus.InProgress}>
<div style={{ opacity: 0.7 }}>(in progress)</div>
</Show>
</Flex>
<Flex
alignItems="center"
justifyContent="space-between"
style={{ width: isMobile() ? "100%" : undefined }}
>
<Flex gap="0.5rem">
<Icon type="user" />
<div>{usernames.get(p.update.operator)}</div>
</Flex>
<Flex alignItems="center">
<div style={{ "place-self": "center end" }}>
{readableMonitorTimestamp(p.update.start_ts)}
</div>
{/* <button class="blue" onClick={p.openMenu}>
<Icon type="console" />
</button> */}
</Flex>
</Flex>
</Flex>
);
};

Some files were not shown because too many files have changed in this diff Show More