Compare commits

...

421 Commits

Author SHA1 Message Date
mbecker20
1b39aaaa38 implement description 2023-03-01 09:46:50 +00:00
mbecker20
5a2a1a3d98 0.2.4: add description and update description 2023-03-01 08:13:07 +00:00
mbecker20
39eceb745b v0.2.2: configure docker organizations for builds 2023-03-01 07:18:49 +00:00
beckerinj
4c1ec5db33 edit permissions.md 2023-02-28 02:26:57 -05:00
beckerinj
8b68b9481e permissions.md 2023-02-28 02:21:46 -05:00
beckerinj
14843f83c6 add core setup link to table of contents 2023-02-28 01:56:05 -05:00
beckerinj
e67d87e885 even 2023-02-28 01:53:54 -05:00
beckerinj
7d4d865d58 elaborate on networks 2023-02-28 01:53:04 -05:00
beckerinj
1e4aaff23c if to is 2023-02-28 01:37:43 -05:00
beckerinj
df3f4a5f4a improve builds.md 2023-02-28 01:36:35 -05:00
beckerinj
1f8557300d fix type 2023-02-28 01:33:55 -05:00
beckerinj
bf17d705f0 fix typo 2023-02-28 01:33:09 -05:00
beckerinj
0d24b792c6 container lifetime management 2023-02-28 01:30:43 -05:00
mbecker20
fb61e36417 remove download log button, its kind of unsafe if the log is long 2023-02-28 06:18:42 +00:00
beckerinj
c39869d2f8 deployments.md 2023-02-28 01:18:11 -05:00
mbecker20
750e0274da #example 2023-02-28 05:54:43 +00:00
beckerinj
a9d37ab667 add placeholders to show to to pass env 2023-02-28 00:52:09 -05:00
mbecker20
eacb549d5e update core config example with github_webhook_base_url 2023-02-28 05:08:05 +00:00
mbecker20
ce7cb8fe45 improve confirm menu with copy button 2023-02-28 04:58:39 +00:00
mbecker20
f9fe4e32b4 restyle builds and deployments 2023-02-28 04:24:15 +00:00
mbecker20
2c9fc2bad4 always show docker account 2023-02-28 03:57:35 +00:00
mbecker20
94949291c2 fix notifications, add dynamic listener url 2023-02-28 03:41:25 +00:00
beckerinj
2944ba6ef9 cli v0.2.3 2023-02-27 22:18:04 -05:00
beckerinj
997e68a31d dynamic github webhook base url 2023-02-27 22:17:37 -05:00
beckerinj
bfb9d9e34d add periphery version in builder connected logs 2023-02-27 21:46:43 -05:00
mbecker20
3b9219b586 fix updates selector style 2023-02-27 05:55:33 +00:00
mbecker20
7bf2a88ab1 finish build args section 2023-02-27 05:53:56 +00:00
mbecker20
d21ed093dc fix build args gap 2023-02-27 05:52:13 +00:00
mbecker20
6e89671e91 switch cli build and build args build config 2023-02-27 05:47:10 +00:00
beckerinj
ee1128a666 Update builds.md 2023-02-27 00:45:16 -05:00
beckerinj
63b5deecd7 Update servers.md 2023-02-27 00:44:01 -05:00
mbecker20
f4f97ce1a7 finish builds / servers 2023-02-27 05:42:22 +00:00
mbecker20
a666df099f use image on deployment container 2023-02-26 06:55:31 +00:00
mbecker20
21dd0ee072 cli should be 0.2.2 2023-02-26 06:48:38 +00:00
mbecker20
bd2a1d4236 v0.2.1 merge multiple config files 2023-02-26 06:25:26 +00:00
mbecker20
7acdbcfd8f improve updates selector - add class 2023-02-25 22:34:37 +00:00
mbecker20
58514c5c93 fix height of builder config when no builder type chosen 2023-02-25 22:07:05 +00:00
mbecker20
580e800923 fix clap args with - 2023-02-23 23:14:02 +00:00
mbecker20
29f6b19f33 cli 0.2.0. fix starting mongo when no existing container present 2023-02-23 22:46:17 +00:00
mbecker20
e090247723 fix error when user doesn't have access to build on deployment 2023-02-23 08:00:55 +00:00
mbecker20
1374c26cd8 0.2.0 cleanup 2023-02-23 07:28:11 +00:00
mbecker20
5467b40b2e fix to git clone <TOKEN> splice 2023-02-23 07:22:32 +00:00
mbecker20
165b9012da improve users responsiveness 2023-02-23 07:17:18 +00:00
mbecker20
22630f665e update the manage users page 2023-02-23 07:09:33 +00:00
mbecker20
3d867084ba log poll default to false 2023-02-23 06:44:55 +00:00
mbecker20
171dd2d9e0 remove menu animation, change builder type to selector 2023-02-23 06:38:36 +00:00
mbecker20
9709239f88 build version h2 2023-02-22 22:45:05 +00:00
mbecker20
60d457b285 improve deployment in tree and display deployed version in header 2023-02-22 22:27:54 +00:00
mbecker20
8b1d4793a7 0.1.17 support building with ec2 instances 2023-02-22 21:05:03 +00:00
mbecker20
f2166c8435 configure aws config on builds 2023-02-22 20:49:56 +00:00
mbecker20
07d723a748 more prog on frontend, some api etc 2023-02-22 06:39:32 +00:00
mbecker20
b36f485287 put server / aws build on build header 2023-02-21 23:05:49 +00:00
mbecker20
a121ae0828 begin frontend refactor for ephemeral build support 2023-02-21 18:11:43 +00:00
mbecker20
e2b5a02008 building works 2023-02-21 05:22:26 +00:00
mbecker20
575aa62625 update versions to 0.1.16 2023-02-21 04:32:41 +00:00
mbecker20
ac88a2c4ed testing and fixes for aws build 2023-02-21 04:27:30 +00:00
mbecker20
f1dcb71a8a poll periphery on build instance to ensure connectivity before moving on 2023-02-20 22:56:22 +00:00
mbecker20
30d04bc201 support building on epheral ec2 2023-02-20 09:41:15 +00:00
mbecker20
33a00bb1a2 poll when instance running 2023-02-20 04:55:44 +00:00
mbecker20
ccca44ea89 start working on build instance spawn on aws 2023-02-20 01:19:07 +00:00
mbecker20
ae5f36fe51 0.1.15 use passkey for addition core - periphery auth layer 2023-02-17 18:29:13 +00:00
beckerinj
69ce1e4f36 start adding passkey auth to core periphery communication 2023-02-17 12:53:09 -05:00
mbecker20
6e444b9032 temp remove deployment state polling 2023-02-14 18:33:32 +00:00
beckerinj
73eff72da4 work on docs 2023-02-13 11:16:27 -05:00
beckerinj
698e3c214b opinionated 2023-02-12 14:48:28 -05:00
beckerinj
9da77667dc add next page 2023-02-12 04:14:06 -05:00
beckerinj
c30793fb8f to 2023-02-12 04:12:49 -05:00
beckerinj
84fdaab24d close intro better 2023-02-12 04:10:10 -05:00
beckerinj
cbd67bb609 back to table 2023-02-12 04:05:35 -05:00
beckerinj
00f58e9008 architecture 2023-02-12 03:59:26 -05:00
beckerinj
7738fab351 intro 2023-02-12 03:57:55 -05:00
beckerinj
06e8f6589b links 2023-02-12 00:55:14 -05:00
beckerinj
57d9287724 start docs 2023-02-12 00:51:05 -05:00
mbecker20
2cc65595ee log only polls when container is running 2023-02-11 20:29:25 +00:00
mbecker20
3dd2b97873 add polling for deployment state and logs 2023-02-11 20:17:13 +00:00
mbecker20
3c805ebbf7 fix deployment action build A tag styling 2023-02-11 19:36:36 +00:00
beckerinj
a854160018 Update periphery.config.example.toml 2023-02-08 13:40:00 -05:00
mbecker20
a99d9e5969 make update menu wider 2023-02-08 05:56:09 +00:00
mbecker20
813b6c1182 confirm menu 2023-02-08 05:37:27 +00:00
mbecker20
2958f9589b deployment / build delete wont fail if server disabled / unreachable. 2023-02-08 05:37:12 +00:00
beckerinj
69b4e26176 ConfirmMenuButton 2023-02-07 23:49:27 -05:00
beckerinj
78b00f139d delete server side effects 2023-02-07 23:23:46 -05:00
beckerinj
dc1e8de851 widen env 2023-02-07 23:23:18 -05:00
mbecker20
3187b335a3 cli 0.1.20 2023-02-07 20:12:20 +00:00
beckerinj
54b5a2b420 add logrotate to mongo startup 2023-02-07 15:10:36 -05:00
beckerinj
14c6bd00a8 use the scroller class for LogContainer 2023-02-02 16:14:11 -05:00
mbecker20
e9c3646450 0.1.14 fix deployment builder defaults 2023-01-30 17:45:34 +00:00
mbecker20
4f20257479 0.1.13 improve Builder structs with defaults 2023-01-30 17:24:03 +00:00
beckerinj
65749991de 0.1.12 MonitorClient::new_from_env 2023-01-30 11:50:48 -05:00
mbecker20
237a1d802d handle stuff when server disabled / unreachable 2023-01-28 09:03:00 +00:00
mbecker20
e4336f19f3 default copy server to curr server 2023-01-25 23:08:31 +00:00
mbecker20
c895e5e67f fix selector search behavior when using itemMap 2023-01-25 23:01:11 +00:00
mbecker20
4e4e210736 implement build / deployment copy 2023-01-25 22:53:27 +00:00
beckerinj
09dfc8faa3 cli 0.1.19 add Restart=on-failure to service unit file 2023-01-25 14:58:54 -05:00
mbecker20
3c4f77cc78 0.1.11 custom DockerRunArgs Default implementation 2023-01-25 06:29:09 +00:00
mbecker20
c86880ccdb v0.1.10 make default --network host 2023-01-25 04:01:01 +00:00
beckerinj
9db26a3037 sort the servers 2023-01-24 22:33:02 -05:00
mbecker20
711d27e15e change notification formatting 2023-01-25 02:49:24 +00:00
mbecker20
e1d53598e6 historical stats default 500 length 2023-01-25 02:41:31 +00:00
beckerinj
f871bc3e03 auto refresh stats and color red in error 2023-01-24 02:46:39 -05:00
mbecker20
69b359ce4a fix loading more updates when operation selected 2023-01-23 00:24:25 +00:00
mbecker20
0dd914b6e4 wrap summary items 2023-01-22 23:08:33 +00:00
beckerinj
f03d0b8930 add links in updates and headers 2023-01-22 17:50:35 -05:00
mbecker20
a672c1cba3 fix update summary 2023-01-22 09:38:28 +00:00
beckerinj
233d9dab33 filter updates by operation 2023-01-22 04:29:46 -05:00
mbecker20
f5ac23834d fix to summary 2023-01-21 09:09:30 +00:00
mbecker20
03bc43b04e improve monitoring 2023-01-21 09:02:37 +00:00
mbecker20
fdbd2a2181 improve updates home, add server, add group 2023-01-21 06:25:49 +00:00
mbecker20
ae9fe5c424 fix open unexpected 2023-01-21 04:55:28 +00:00
mbecker20
7de10b7277 serverchildren 2023-01-21 04:49:30 +00:00
mbecker20
8c1f4d7786 fix parse show_builds 2023-01-20 05:21:26 +00:00
beckerinj
f3e84b52c6 try to fix updates 2023-01-20 00:12:47 -05:00
beckerinj
d64c46f44d fixes 2023-01-19 23:42:26 -05:00
beckerinj
a6a58a25be server info tab 2023-01-19 23:36:38 -05:00
mbecker20
37046ddbd8 add back home screen summary / updates 2023-01-20 02:48:22 +00:00
beckerinj
6f697d292a show builds on deployment page 2023-01-19 21:05:53 -05:00
beckerinj
4098c6b487 create api secret frontend 2023-01-19 00:10:06 -05:00
mbecker20
d2844b6558 enable scroll again 2023-01-18 09:34:46 +00:00
mbecker20
59eb63bcda 0.1.9 add 15-sec to Timelength options 2023-01-18 09:15:36 +00:00
mbecker20
7842ac5c45 need to add timelength here too 2023-01-18 09:01:25 +00:00
mbecker20
ba7b6db5b0 update async timing util to 0.1.14 2023-01-18 08:55:50 +00:00
mbecker20
3648a6efd4 handle permissions 2023-01-18 06:22:13 +00:00
mbecker20
f96ee3f1e8 fix permissions access for actions 2023-01-18 05:03:38 +00:00
mbecker20
f0a9b4f5a6 update async_timing_util to 0.1.13 2023-01-18 04:51:31 +00:00
beckerinj
f01c159f38 Merge branch 'responsive' 2023-01-17 23:27:18 -05:00
beckerinj
79c587c892 improve frontend responsive behavior 2023-01-17 23:26:58 -05:00
mbecker20
9f4a1625ff attach version to deployment update 2023-01-17 04:39:37 +00:00
mbecker20
3b331e1df7 take timestamp upon recv current stat 2023-01-17 03:53:30 +00:00
mbecker20
fb067c15ff element should be func 2023-01-16 23:29:57 +00:00
mbecker20
f4bba8febc improve sizes of some inputs 2023-01-16 23:02:21 +00:00
mbecker20
bf442f5cdf timeout on health check 1 second 2023-01-16 22:49:33 +00:00
mbecker20
447e41c0af add health check before all requests to peripheries 2023-01-16 22:38:10 +00:00
mbecker20
5901992d80 cli 0.1.16 dont install periphery from crates by default 2023-01-16 21:46:37 +00:00
mbecker20
13e8ad93e2 cli 0.1.15 fix --yes param to skip manual enter 2023-01-16 21:37:32 +00:00
mbecker20
66daa1ac17 cli 0.1.14 gen systemd file pointing to bin in .monitor 2023-01-16 21:31:15 +00:00
beckerinj
3622394751 move deployment permissions to top level tabs like others 2023-01-16 14:27:29 -05:00
mbecker20
53ab799d9b I guess the process filter is kind of usable now 2023-01-16 03:30:56 +00:00
mbecker20
5d0563968b fix build actions height 2023-01-16 02:33:57 +00:00
beckerinj
6e9b692328 fix currentstats mostly 2023-01-15 21:17:55 -05:00
beckerinj
de7ddd2224 improve historical stats and add server 2023-01-15 20:46:03 -05:00
beckerinj
db82a2d44e improve page layouts 2023-01-15 06:06:08 -05:00
beckerinj
ee46c86106 log on daily update 2023-01-14 23:21:59 -05:00
beckerinj
756490bc0c dont sort groups procedures 2023-01-14 23:15:38 -05:00
beckerinj
a483283352 implement groups create delete add remove 2023-01-14 23:12:33 -05:00
beckerinj
399e4bfd40 move stats to http poll 2023-01-14 03:59:17 -05:00
beckerinj
d1df2818b5 move current stats to polling http 2023-01-14 03:58:20 -05:00
mbecker20
5438ca87fb don't log stats object 2023-01-13 05:58:00 +00:00
mbecker20
20a2a660a0 Merge pull request #4 from mbecker20/next
Next
2023-01-13 00:43:46 -05:00
mbecker20
f9d234908b Merge branch 'main' into next 2023-01-13 00:42:14 -05:00
beckerinj
56b21f0d9e fix core historical stats query filter: specify server_id 2023-01-12 17:08:09 -05:00
mbecker20
d604b28d91 filter charts on undefined 2023-01-12 20:20:42 +00:00
mbecker20
d227f2c311 fix login css width to not overflow 2023-01-12 19:58:11 +00:00
mbecker20
a1acac1479 remove width 100vw in app css class 2023-01-12 19:43:28 +00:00
mbecker20
805ebe55ce smaller stats bars 2023-01-12 05:51:32 +00:00
mbecker20
3f478fefdf 0.1.8 add process start time 2023-01-12 05:07:13 +00:00
mbecker20
809be61e9c cli 0.1.13 periphery systemd needed to run 'loginctl enable-linger $USER' 2023-01-12 04:42:29 +00:00
mbecker20
cfa3dd537d cli 0.1.12 specify whether to --add-host to core container 2023-01-12 04:14:25 +00:00
mbecker20
511efdc113 cli 0.1.11 fix docker run. dont fail when there is no existing container 2023-01-12 00:24:41 +00:00
mbecker20
34a6ac3be3 cli 0.1.10 pass --yes or -y to skip requirement for user to press enter 2023-01-11 22:43:26 +00:00
mbecker20
1674f066ea cli 0.1.9 try fix to periphery start systemd 2023-01-11 22:22:57 +00:00
mbecker20
572560d5b2 cli 0.1.8 start periphery with systemd 2023-01-11 22:08:16 +00:00
mbecker20
34822d6d19 periphery 0.1.6 2023-01-11 19:29:21 +00:00
mbecker20
6655cb43b5 cli 0.1.7 pass repo dir to periphery gen_config 2023-01-11 10:45:07 +00:00
mbecker20
7060926854 add cpu freq and avg system load 2023-01-11 10:03:48 +00:00
mbecker20
a19c339857 show sys info and, load, cpu freq on stats 2023-01-11 09:58:23 +00:00
mbecker20
2cc86a8f21 0.1.6 2023-01-11 09:12:53 +00:00
mbecker20
5adbba61ec increase temp threshold to 85% of critical 2023-01-11 08:51:18 +00:00
mbecker20
d0540669b4 core and periphery 0.1.5 2023-01-11 08:44:16 +00:00
mbecker20
4fb3eb12ba 0.1.5 2023-01-11 08:42:09 +00:00
mbecker20
f885398d7c 0.1.4 across all crates 2023-01-11 06:52:02 +00:00
mbecker20
bda5a93ea4 helpers 0.1.3 2023-01-11 06:29:40 +00:00
mbecker20
713d7680c1 helpers 0.1.3 update types 2023-01-11 06:28:48 +00:00
mbecker20
3b574341f2 types 0.1.4 fix extra_args update diff 2023-01-11 06:22:25 +00:00
mbecker20
664a585bdd frontend for extra args 2023-01-11 06:21:44 +00:00
mbecker20
0a0bf55204 periphery 0.1.3 update types and helpers 2023-01-11 05:27:13 +00:00
mbecker20
b3405a8ab9 periphery 0.1.3 update types and helpers version 2023-01-11 05:25:16 +00:00
mbecker20
7693993d18 deployment can have extra args 2023-01-11 05:20:26 +00:00
mbecker20
89e16ef715 types 0.1.3 pass any extra args to docker run command 2023-01-11 05:08:35 +00:00
mbecker20
199a0e0517 remove unused mount filter 2023-01-11 04:25:48 +00:00
mbecker20
ca80fb4d28 periphery 0.1.2 remove disk filter 2023-01-10 17:04:50 +00:00
mbecker20
143d855ad7 cli v0.1.2 start core container with --add-host option 2023-01-10 16:53:34 +00:00
mbecker20
aaea572c4f update tokio version 2023-01-10 09:41:10 +00:00
mbecker20
41d86e1f34 fix monitor client 2023-01-10 09:38:05 +00:00
mbecker20
00bae80410 client 0.1.2 2023-01-10 09:30:17 +00:00
mbecker20
ef0522b200 monitor cli 0.1.1 2023-01-10 09:29:07 +00:00
mbecker20
f61f38be7e periphery v0.1.1 2023-01-10 09:00:48 +00:00
mbecker20
120877bfb5 helpers v0.1.1 2023-01-10 08:59:12 +00:00
mbecker20
3587818775 types v0.1.2 2023-01-10 08:57:45 +00:00
mbecker20
1f2cf5a848 show system procs 2023-01-10 08:54:25 +00:00
mbecker20
d9e4e5b390 system stats views working 2023-01-10 08:32:16 +00:00
mbecker20
4f965cde2d improve charts, historical and current graphs 2023-01-10 06:37:10 +00:00
mbecker20
662df40aaf current stats page 2023-01-10 02:56:22 +00:00
mbecker20
b41486fb20 realtime current stats heatbar 2023-01-09 07:47:00 +00:00
mbecker20
6acbc19c6a more accurate comp temp sort 2023-01-09 06:34:02 +00:00
mbecker20
01dfdd03fe sort the components and processes 2023-01-09 06:30:56 +00:00
mbecker20
5dc335595e core will auto prune images daily on servers with feature enabled 2023-01-09 05:53:20 +00:00
mbecker20
7c5cd1553d show build_build as just build in update menu, and fix build listener url 2023-01-09 05:39:22 +00:00
mbecker20
c78c89da37 pull image before remove container to minimize downtime 2023-01-09 05:24:37 +00:00
mbecker20
f9f3b276c8 fix default config file 2023-01-09 05:16:30 +00:00
mbecker20
e982f5a6f2 periphery systemd service file 2023-01-09 05:13:13 +00:00
mbecker20
ba0f76fd59 periphery as a service 2023-01-09 05:11:30 +00:00
mbecker20
e26769a787 get periphery version route 2023-01-09 03:54:16 +00:00
mbecker20
cffe3b1428 add permissions map to action 2023-01-09 02:12:35 +00:00
mbecker20
82f9076f3e spawn command handler 2023-01-09 01:36:12 +00:00
mbecker20
80438e7a74 run arbitrary command on periphery 2023-01-09 01:34:15 +00:00
mbecker20
8453f9ac5f handle build version 2023-01-09 00:02:35 +00:00
mbecker20
a9834a9997 fix to build last_built_at parsing 2023-01-08 08:31:36 +00:00
mbecker20
772aca95f9 fix to docker build command parsing 2023-01-08 08:25:05 +00:00
mbecker20
e1b1ee3f8d cli point to correct image 2023-01-08 08:13:06 +00:00
mbecker20
91b640cd90 disk io 2023-01-08 08:09:12 +00:00
mbecker20
47ed7d8d5f lazy load stats 2023-01-08 07:52:30 +00:00
mbecker20
af948edbea make standalone stats page 2023-01-08 07:50:47 +00:00
mbecker20
e18cd2eebb select build version for monitor build 2023-01-08 06:11:34 +00:00
mbecker20
2e5f2d11b4 add more info on stats page 2023-01-08 03:08:45 +00:00
mbecker20
2906eaf5f5 server stats page 2023-01-08 02:54:59 +00:00
mbecker20
4bc8fc4b25 fix peirphery client not passing through disks query 2023-01-07 23:46:09 +00:00
mbecker20
487bf515ba spawn threads to complete tasks requested over http 2023-01-07 21:45:57 +00:00
mbecker20
9378954551 improve frontend config design 2023-01-07 09:44:44 +00:00
mbecker20
6b366fb0e2 get stats api 2023-01-07 08:01:44 +00:00
mbecker20
a5af235cef start on github webhook listener 2023-01-05 08:41:58 +00:00
mbecker20
b0b991115b user permission management working 2023-01-05 06:55:41 +00:00
mbecker20
f2db575c70 implement owners management 2023-01-05 06:33:14 +00:00
mbecker20
5628d92b3c add status to update menu 2023-01-05 05:36:23 +00:00
mbecker20
ba994e80f6 make simple tabs to use on home screen 2023-01-05 05:30:55 +00:00
mbecker20
db9f633e89 container log should be GET method 2023-01-05 05:20:33 +00:00
mbecker20
7aaececa87 deployment page loads 2023-01-05 05:14:25 +00:00
mbecker20
1c6f31cfea finish deployment refactor 2023-01-05 05:09:06 +00:00
mbecker20
88f46a04f0 add deployment page 2023-01-05 05:06:01 +00:00
mbecker20
8b0fd2326f add cli publish task 2023-01-05 03:17:09 +00:00
mbecker20
86af789dd3 cli periphery seems to be working 2023-01-05 03:16:37 +00:00
mbecker20
243f54ec11 use cli to start periphery as daemon 2023-01-05 02:32:21 +00:00
mbecker20
4cc1f4458e start up periphery with cli 2023-01-05 02:22:15 +00:00
mbecker20
823a735ef7 prep monitor_periphery for publish 2023-01-05 02:13:22 +00:00
mbecker20
7782412c9b helpers use crates.io monitor_types 2023-01-05 02:09:41 +00:00
mbecker20
ccd7d715b9 add publish helpers task 2023-01-05 02:07:44 +00:00
mbecker20
a192b142d4 update helpers to be on crates.io 2023-01-05 02:07:10 +00:00
mbecker20
277d5fe662 update licenses and descriptions 2023-01-05 02:03:00 +00:00
mbecker20
44b9eb462e fix monitor_types license 2023-01-05 02:01:09 +00:00
mbecker20
680bb55321 start prep monitor_periphery for crates.io 2023-01-05 01:57:55 +00:00
mbecker20
33e0568782 cli to start up periphery as daemon 2023-01-05 01:55:00 +00:00
mbecker20
2b1b88ce3c periphery doesn't log any secret values on startup 2023-01-05 01:20:19 +00:00
mbecker20
bd02b9e281 fix build and deploy 2023-01-05 01:04:10 +00:00
mbecker20
85c5b146d4 fix builds getting stuck. update core dockerfile 2023-01-04 23:26:12 +00:00
mbecker20
ac3d17ce82 fix updates pagination 2023-01-04 22:41:48 +00:00
mbecker20
f6f94303a5 put id on SystemStatsRecord 2023-01-04 08:26:13 +00:00
mbecker20
53cf644008 record system processes with stats 2023-01-04 08:25:17 +00:00
mbecker20
3dfeb65aab prune system stats 2023-01-04 08:22:02 +00:00
mbecker20
c90e91d78b finish stat collection and slack alerts, daily update. async mutex 2023-01-04 08:01:25 +00:00
mbecker20
17a5d624d9 implement some stats recording. clean up Tab comp 2023-01-03 06:50:23 +00:00
mbecker20
ef8f75b3e2 begin recording system stats 2023-01-02 10:44:48 +00:00
mbecker20
da83afc6f4 stats interval is timelength 2023-01-02 10:18:54 +00:00
mbecker20
d77d07cb52 attach id of deleted item to delete update 2023-01-02 09:43:40 +00:00
mbecker20
161ac34afa fix Operation for delete build update 2023-01-02 09:34:42 +00:00
mbecker20
e9685ce71a add build page 2023-01-02 09:09:32 +00:00
mbecker20
bdf4e9d6c6 pass icon dir in frontend env 2023-01-02 08:31:39 +00:00
mbecker20
5e205efd29 add user management features 2023-01-02 08:15:43 +00:00
mbecker20
a491f206c9 create channel to handle stats stream in client 2023-01-02 07:16:55 +00:00
mbecker20
a6d655b42e add usernames cache to state 2023-01-02 06:33:18 +00:00
mbecker20
f5fa676db6 builds and deployments under server 2023-01-02 05:58:22 +00:00
mbecker20
8b16aff3bf implement home, servers, implement container log passthrough 2023-01-02 05:28:38 +00:00
mbecker20
3006aa7fa3 basically get server page 2023-01-02 00:44:20 +00:00
mbecker20
d5861274de add get_username method on client 2023-01-01 21:18:24 +00:00
mbecker20
a927571e29 add get_username to client 2023-01-01 20:51:57 +00:00
mbecker20
9998debb96 get username api 2023-01-01 20:40:01 +00:00
mbecker20
956805603e get home screen working pretty much 2023-01-01 08:00:08 +00:00
mbecker20
4a4bf197a9 home screen update views 2023-01-01 02:15:29 +00:00
mbecker20
0845dc04e1 summary 2022-12-31 21:42:58 +00:00
mbecker20
6104e5aa5a add summary component 2022-12-31 11:17:57 +00:00
mbecker20
1d717dff7a remove core ws stats logs 2022-12-31 10:28:29 +00:00
mbecker20
c2702869c1 test the stats ws connection with monitor client 2022-12-31 10:26:37 +00:00
mbecker20
58a5accd83 link up the stats websocket 2022-12-31 09:42:41 +00:00
mbecker20
de937707e1 Periphery config repo dir parse as PathBuf 2022-12-31 06:23:07 +00:00
mbecker20
0ceecee604 a bunch of stuff 2022-12-31 05:42:44 +00:00
beckerinj
5d0c14201d start on processes in stats 2022-12-30 12:36:13 -05:00
mbecker20
d0d6271d8f frontend topbar search 2022-12-30 07:49:30 +00:00
mbecker20
90884904de kind of get search working 2022-12-30 06:23:09 +00:00
mbecker20
b042494b44 get system component temps etc 2022-12-30 05:56:16 +00:00
mbecker20
80ced82131 implement configurable login options 2022-12-30 05:05:45 +00:00
mbecker20
f74faaecf8 google oauth working 2022-12-29 09:09:04 +00:00
mbecker20
3b6d3af7bb implement google oauth 2022-12-29 07:59:50 +00:00
mbecker20
15662c951d topbar ws indicator and userPermissionsOnEntity 2022-12-28 06:08:09 +00:00
mbecker20
de8c59bb61 clean up the log for unauthorized ip access 2022-12-27 05:51:06 +00:00
mbecker20
9cde83de0e update gen_config in cli to support adding allowed_ips 2022-12-27 05:41:14 +00:00
mbecker20
0a5256cbd7 periphery allowed ips 2022-12-27 05:03:24 +00:00
mbecker20
99ce35ce1d put container state on list deployment return, including NotCreated and Unknown 2022-12-26 23:18:17 +00:00
mbecker20
4d3feb52c7 make sure user has execute permissions on resources in procedure 2022-12-26 22:39:21 +00:00
mbecker20
eb79e8726d fix type import issue 2022-12-26 22:21:08 +00:00
mbecker20
3c055544cc move Timelength to local types for typeshare 2022-12-26 22:14:57 +00:00
mbecker20
0d4118dac8 add crud for groups at all client levels 2022-12-26 19:55:33 +00:00
mbecker20
dac9c05a89 change default periphery stats polling rate to 5-sec 2022-12-26 19:07:32 +00:00
mbecker20
1dbc2909e2 start making crud for groups 2022-12-26 06:32:36 +00:00
mbecker20
6fc4a0627c running as daemon seems to work! 2022-12-26 06:24:38 +00:00
mbecker20
4a99ccc1a9 run periphery as daemon 2022-12-25 22:59:03 +00:00
mbecker20
46dc663385 create the Group type to group all the resources (including groups for nesting 2022-12-25 06:20:22 +00:00
mbecker20
d2a3a6cddf refactor types lib into split files for easier finding of things 2022-12-25 05:46:21 +00:00
mbecker20
0d140b420e periphery runs fine lcoally 2022-12-24 06:20:25 +00:00
mbecker20
6be463475c change update send channel to tokio::sync::broadcast 2022-12-24 06:10:13 +00:00
mbecker20
858cead89d rename stats_interval to stats_polling_rate 2022-12-24 05:28:10 +00:00
mbecker20
ba439d6f5f improve periphery stats with modify interval, modify user updates, frontend state 2022-12-23 05:41:06 +00:00
mbecker20
9c09906bdd implement get updates list 2022-12-22 07:05:18 +00:00
mbecker20
90173a30e7 add topbar to FE and some state hooks 2022-12-22 01:06:42 +00:00
mbecker20
902bf0c981 frontend login guard and page, deployment / build copy route 2022-12-21 06:21:23 +00:00
mbecker20
5ac3239f4f pull deployment in clients 2022-12-21 05:13:25 +00:00
mbecker20
3a0fa36d19 pull deployment repo 2022-12-21 05:10:17 +00:00
mbecker20
3b0f919baa implement correct busy logic 2022-12-21 04:45:27 +00:00
mbecker20
b2226efd59 implement a not good busy guard 2022-12-20 07:45:25 +00:00
mbecker20
a0beb27072 small refactor on the ts client 2022-12-19 07:32:47 +00:00
mbecker20
5cd68191fc implement a lot of run procedure 2022-12-19 00:59:00 +00:00
mbecker20
b9978f531a fill out the ts client 2022-12-18 08:10:14 +00:00
mbecker20
11a4379fb6 add additional typescript type gen from core 2022-12-18 06:00:23 +00:00
mbecker20
090c4a6dc7 generate typescript types in frontend 2022-12-18 05:44:41 +00:00
mbecker20
0b3bd899fe move to string timestamps 2022-12-18 05:42:58 +00:00
mbecker20
0e913ac420 add shared util components 2022-12-18 03:23:50 +00:00
mbecker20
3e5df637fd add frontend public 2022-12-18 02:51:05 +00:00
mbecker20
81e96861d7 only show size of / and paths including "external" 2022-12-18 02:41:36 +00:00
mbecker20
6284a48a3a some consistency fixes 2022-12-18 00:14:46 +00:00
mbecker20
c0854c7923 port 8000 2022-12-17 23:35:40 +00:00
mbecker20
b4c98820d9 cli | replace ~ with env var HOME in path 2022-12-17 23:07:48 +00:00
beckerinj
1cbb7d6bfd fix test 2022-12-17 01:27:38 -05:00
beckerinj
be58cec1c9 add start stop remove container 2022-12-17 01:27:06 -05:00
beckerinj
839af15311 create procedure and update mungos 2022-12-16 02:50:07 -05:00
beckerinj
a985139f9c add procedures to api and add functions in monitor_client 2022-12-15 00:50:07 -05:00
beckerinj
7719c17227 outline functions on state to work with procedures 2022-12-15 00:24:57 -05:00
beckerinj
b92111c7c2 leverage build args to build slim / full periphery 2022-12-14 23:59:17 -05:00
beckerinj
a3ceef26f8 cli specify restart mode 2022-12-14 17:58:35 -05:00
beckerinj
e31515f6fd get cli working again 2022-12-14 12:55:02 -05:00
beckerinj
38be2c53e7 add color and remove cli deps 2022-12-14 03:09:01 -05:00
beckerinj
1d64040129 fill out cli scripts 2022-12-13 02:53:58 -05:00
beckerinj
ae9a1cb89e create full server / deployment / build 2022-12-13 02:18:44 -05:00
beckerinj
9858a7a5ce start mongo cli 2022-12-12 02:48:36 -05:00
beckerinj
e5dec41b49 slim and full periphery. updated stats type 2022-12-12 00:46:27 -05:00
beckerinj
b98f4a6ec1 add better error handing 2022-12-11 13:05:37 -05:00
beckerinj
ba431638b7 gen periphery config with cli 2022-12-11 12:42:53 -05:00
beckerinj
ffeca7658a fill out monitor client permissions and secret calls 2022-12-11 12:29:48 -05:00
beckerinj
daf06a33f8 start on cli - generate core config 2022-12-11 04:15:19 -05:00
beckerinj
e81b6a14de list + prune images networks containers 2022-12-11 02:19:52 -05:00
beckerinj
c79469af30 go back to copy . . 2022-12-10 23:37:10 -05:00
beckerinj
d761e49a64 only copy what each dockerfile needs to improve caching 2022-12-10 23:12:59 -05:00
beckerinj
0f865be593 first user auto enabled and admined 2022-12-10 20:15:23 -05:00
beckerinj
4130bc1726 add created_at and updated_at fields 2022-12-10 19:14:21 -05:00
beckerinj
5559c3de42 handle version in deployment from build 2022-12-10 18:43:18 -05:00
beckerinj
1884693ca6 add basic container stats to core get / list deployment 2022-12-10 18:16:43 -05:00
beckerinj
1eb707a759 get docker compose testing setup working 2022-12-10 15:55:41 -05:00
beckerinj
91246b7078 add routes to update build at id. build stops if pre_build fails. 2022-12-10 00:44:56 -05:00
beckerinj
31dd47aac8 create and test deploy 2022-12-08 00:29:34 -05:00
beckerinj
644a3c795e build_build 2022-12-07 19:55:41 -05:00
beckerinj
521976e57e remove all DbExtension 2022-12-07 15:42:57 -05:00
beckerinj
f1df4f36df move actions to actions folder as fn impl on State 2022-12-07 03:51:58 -05:00
beckerinj
e8e566ea62 add end ts to reclone update 2022-12-06 01:53:33 -05:00
beckerinj
b0a865ddb8 change clone repo update status to complete 2022-12-06 01:50:13 -05:00
beckerinj
937b271686 update things, reclone 2022-12-06 01:31:13 -05:00
beckerinj
c4a533bc4b cleanup 2022-12-05 03:13:59 -05:00
beckerinj
16aa58482f update build 2022-12-05 02:58:04 -05:00
beckerinj
1674825a94 apply Diff to all the updatable structs 2022-12-04 20:41:51 -05:00
beckerinj
a382b4d589 delete repo when deleting build 2022-12-04 04:36:36 -05:00
beckerinj
22a8845c53 clean up old stats 2022-12-04 04:11:27 -05:00
beckerinj
34b60c755b container stats 2022-12-04 01:59:47 -05:00
beckerinj
38d7126959 create test bin for testing system with monitor client 2022-12-03 02:25:34 -05:00
beckerinj
00a89ccb48 implement secret creating and deleting 2022-12-02 02:40:44 -05:00
beckerinj
4e518d90ad made login route for secret usage 2022-12-02 00:22:04 -05:00
beckerinj
0d4bacd892 fix description monitor client 2022-12-01 23:17:03 -05:00
beckerinj
d876634899 add client readme and publish client task 2022-12-01 23:14:07 -05:00
beckerinj
8c8b4ea488 create permissioned delete routes 2022-12-01 22:58:22 -05:00
beckerinj
209f6ebb87 update types package 2022-12-01 01:34:25 -05:00
beckerinj
15faefff93 delete deployment 2022-12-01 01:32:54 -05:00
beckerinj
bf80b34e8e rename monitor_types and update axum version 2022-12-01 00:33:00 -05:00
beckerinj
8c87102116 update target 2022-11-29 01:39:17 -05:00
beckerinj
500e180241 update status enum 2022-11-24 23:39:00 -08:00
beckerinj
20997cf5ac implement get server stats on core 2022-11-24 22:30:03 -08:00
beckerinj
96127b52e6 add docker build tasks 2022-11-24 13:32:50 -08:00
beckerinj
192363bb64 get periphery docker build working 2022-11-24 13:30:08 -08:00
beckerinj
8d7b0625b0 permissions update returns message 2022-11-24 13:05:45 -08:00
beckerinj
47717760b2 move permissions admin check to early return 2022-11-24 13:00:15 -08:00
beckerinj
9c5d2da199 refactor permissions a bit to reduce duplication 2022-11-24 12:59:06 -08:00
beckerinj
556f9d7d96 finish update permissions route for builds and deployments 2022-11-24 12:54:54 -08:00
beckerinj
e5bd6493f5 basic add permissions 2022-11-24 00:54:54 -08:00
beckerinj
7a4cf8be2d delete update "entity_type" 2022-11-23 12:31:17 -08:00
beckerinj
4a7c0008e0 health check? 2022-11-23 13:13:32 -05:00
beckerinj
d15d2ca56b plan to inject arbitrary string secrets defined in periphery config into deployment configuration 2022-11-21 19:35:25 -05:00
beckerinj
a91fd11844 improve permissions code 2022-11-21 00:02:39 -05:00
beckerinj
37d5606eb3 list servers 2022-11-20 23:46:36 -05:00
beckerinj
56269b2fcf add dockerfiles and add version tags to docker builds 2022-11-20 22:24:59 -05:00
beckerinj
abc1ad4356 create for server / deployment 2022-11-20 20:14:16 -05:00
beckerinj
052756140b broadcast updates to clients, and implement create build 2022-11-20 19:52:23 -05:00
beckerinj
6f03bd5827 something 2022-11-16 02:35:43 -05:00
beckerinj
437052b58b start implementing the core websocket 2022-11-15 02:40:57 -05:00
beckerinj
e35d3473c7 add start / end ts, in progress to update 2022-11-14 02:22:13 -05:00
beckerinj
f48703aef7 renome config example folder 2022-11-14 01:05:48 -05:00
beckerinj
393e202102 move config files to dedicated folder 2022-11-14 01:05:26 -05:00
beckerinj
809732e7f3 build route 2022-11-13 23:29:12 -05:00
beckerinj
a7abca2038 implement docker build 2022-11-13 23:19:41 -05:00
beckerinj
475f6774bf implement more periphery routes + periphery client 2022-11-13 02:14:36 -05:00
beckerinj
d4a775025f implement clone build / deployment repo 2022-11-13 00:01:53 -05:00
beckerinj
21c8fac0a4 configs 2022-11-12 19:35:28 -05:00
beckerinj
d0051016fc dont really need the json example files 2022-11-12 01:36:27 -05:00
beckerinj
b495593b59 add accounts route in periphery 2022-11-11 17:40:57 -05:00
beckerinj
aaa8d7e97d get accounts on builder 2022-11-11 17:34:53 -05:00
beckerinj
af126c70a6 add a bunch of secret / config examples. begin builder 2022-11-11 17:15:29 -05:00
beckerinj
1a4e3fa55a pass core config as toml 2022-11-11 16:00:57 -05:00
mbecker20
e740128baf parse toml secrets 2022-11-11 15:08:29 -05:00
beckerinj
fdf5626a93 fmt 2022-11-11 11:51:10 -05:00
beckerinj
40c62234bb write the basic git clone function 2022-11-11 04:11:27 -05:00
beckerinj
b96a6cf7c2 implement container stuff in periphery client 2022-11-09 01:52:21 -05:00
beckerinj
6728507914 retrieve system stats 2022-11-09 01:28:33 -05:00
beckerinj
f679fa2c0c start looking at stats 2022-11-08 02:48:59 -05:00
beckerinj
ced8aa661b begin implement periphery docker functions 2022-11-08 02:37:37 -05:00
beckerinj
b936ff3e36 work on periphery deploy client 2022-11-07 00:26:06 -05:00
beckerinj
841b458f05 start implementing the periphery client 2022-11-06 22:50:01 -05:00
mbecker20
fd13240134 attach userid and is_admin to request 2022-11-06 20:16:17 -05:00
mbecker20
c70917382f implement auth extension 2022-11-06 20:05:51 -05:00
beckerinj
8d2ea8ae87 prog2 2022-11-06 04:57:29 -05:00
beckerinj
d4a91f7240 connect up some auth routing 2022-11-06 04:30:00 -05:00
beckerinj
4ba8c0f380 prog 2022-11-06 04:15:02 -05:00
beckerinj
cb9b482340 start local login implement 2022-11-06 02:57:28 -05:00
beckerinj
797f33e0eb restructure and work on docker client 2022-11-05 22:22:27 -04:00
beckerinj
6a4991354b clients galore 2022-11-05 03:26:58 -04:00
beckerinj
336646c280 pretty much fill out the types 2022-10-10 23:30:53 -04:00
beckerinj
2664ba939e define some types 2022-10-10 20:55:46 -04:00
beckerinj
47c3680d8c add bollard to deps 2022-09-25 17:43:21 -04:00
beckerinj
f281f68a8f scafolding for next 2022-09-25 17:27:06 -04:00
beckerinj
1852ee5216 delete everything for next version 2022-09-25 16:59:59 -04:00
492 changed files with 30002 additions and 22339 deletions

4
.dockerignore Normal file
View File

@@ -0,0 +1,4 @@
/target
/config_example
config.*
.env

19
.gitignore vendored
View File

@@ -1,7 +1,12 @@
.DS_Store
node_modules
build
secrets
bundle.*
monitor_mount
.env*
target
/frontend/build
node_modules
dist
.env
.env.development
repos
config.json
config.toml
secrets.json
secrets.toml

View File

@@ -1,60 +0,0 @@
{
"service": {
"scope": "typescript",
"prefix": "plugin",
"body": [
"import { FastifyInstance } from \"fastify\";",
"import fp from \"fastify-plugin\";",
"",
"const ${1:$TM_FILENAME_BASE} = fp((app: FastifyInstance, _: {}, done: () => void) => {",
"\t${0}",
"\tdone();",
"});",
"",
"export default ${1:$TM_FILENAME_BASE};"
]
},
"schema": {
"scope": "typescript",
"prefix": "schema",
"body": [
"import { FastifyInstance } from \"fastify\";",
"import fp from \"fastify-plugin\";",
"import { Schema } from \"mongoose\";",
"import model from \"../../util/model\";",
"",
"const ${2:$TM_FILENAME_BASE} = fp((app: FastifyInstance, _: {}, done: () => void) => {",
"\tconst schema = new Schema({",
"\t\t${0}",
"\t});",
"\t",
"\tapp.decorate(\"${2:$TM_FILENAME_BASE}\", model(app, \"${1}\", schema));",
"\t",
"\tdone();",
"});",
"",
"export default ${2:$TM_FILENAME_BASE};"
]
},
"get-auth": {
"scope": "typescript",
"prefix": "get-auth",
"body": [
"app.get(\"/${1}\", { onRequest: [app.auth, app.userEnabled] }, async (req, res) => {",
"\t${0}",
"});"
]
},
"post-auth": {
"scope": "typescript",
"prefix": "post-auth",
"body": [
"app.post(\"/${1}\", { onRequest: [app.auth, app.userEnabled] }, async (req, res) => {",
"\t${0}",
"});"
]
}
}

View File

@@ -1,21 +0,0 @@
{
"ink-comp": {
"scope": "typescriptreact,javascriptreact",
"prefix": "ink-comp",
"body": [
"import React from \"react\";",
"import { Box } from \"ink\";",
"",
"const ${1:$TM_FILENAME_BASE} = ({}: {}) => {",
"\treturn (",
"\t\t<Box>",
"\t\t\t${0}",
"\t\t</Box>",
"\t);",
"}",
"",
"export default ${1:$TM_FILENAME_BASE};"
]
},
}

View File

@@ -1,3 +0,0 @@
{
"npm.exclude": "**/monitor/**"
}

View File

@@ -38,7 +38,7 @@
"scope": "typescriptreact,javascriptreact",
"prefix": "provider",
"body": [
"import { Component, createContext, useContext } from \"solid-js\";",
"import { ParentComponent, createContext, useContext } from \"solid-js\";",
"",
"const value = () => {",
"\treturn {};",
@@ -48,7 +48,7 @@
"",
"const context = createContext<Value>();",
"",
"export const Provider: Component<{}> = (p) => {",
"export const Provider: ParentComponent<{}> = (p) => {",
"\treturn (",
"\t\t<context.Provider value={value()}>",
"\t\t\t{p.children}",

172
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,172 @@
{
"version": "2.0.0",
"tasks": [
{
"type": "cargo",
"command": "build",
"group": {
"kind": "build",
"isDefault": true
},
"label": "rust: cargo build"
},
{
"type": "cargo",
"command": "fmt",
"label": "rust: cargo fmt"
},
{
"type": "cargo",
"command": "check",
"label": "rust: cargo check"
},
{
"label": "start dev",
"dependsOn": [
"run core",
"yarn: start frontend"
],
"problemMatcher": []
},
{
"type": "shell",
"command": "yarn start",
"label": "yarn: start frontend",
"options": {
"cwd": "${workspaceFolder}/frontend"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run core",
"options": {
"cwd": "${workspaceFolder}/core"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run periphery",
"options": {
"cwd": "${workspaceFolder}/periphery"
}
},
{
"type": "shell",
"command": "cargo install --path . && if pgrep periphery; then pkill periphery; fi && periphery --daemon --config-path ~/.monitor/local.periphery.config.toml",
"label": "run periphery daemon",
"options": {
"cwd": "${workspaceFolder}/periphery"
},
"problemMatcher": []
},
{
"type": "cargo",
"command": "run",
"label": "run cli",
"options": {
"cwd": "${workspaceFolder}/cli"
}
},
{
"type": "cargo",
"command": "run",
"label": "run tests",
"options": {
"cwd": "${workspaceFolder}/tests"
}
},
{
"type": "cargo",
"command": "publish",
"args": ["--allow-dirty"],
"label": "publish monitor types",
"options": {
"cwd": "${workspaceFolder}/lib/types"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish monitor client",
"options": {
"cwd": "${workspaceFolder}/lib/monitor_client"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish monitor cli",
"options": {
"cwd": "${workspaceFolder}/cli"
}
},
{
"type": "shell",
"command": "docker compose up -d",
"label": "docker compose up",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose down",
"label": "docker compose down",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build",
"label": "docker compose build",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose down && docker compose up -d",
"label": "docker compose restart",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build && docker compose down && docker compose up -d",
"label": "docker compose build and restart",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "docker compose build periphery",
"label": "docker compose build periphery",
"options": {
"cwd": "${workspaceFolder}/tests"
},
"problemMatcher": []
},
{
"type": "shell",
"command": "typeshare ./lib/types --lang=typescript --output-file=./frontend/src/types.ts && typeshare ./core --lang=typescript --output-file=./frontend/src/util/client_types.ts",
"label": "generate typescript types",
"problemMatcher": []
}
]
}

3751
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

14
Cargo.toml Normal file
View File

@@ -0,0 +1,14 @@
[workspace]
members = [
"cli",
"core",
"periphery",
"tests",
"lib/axum_oauth2",
"lib/db_client",
"lib/helpers",
"lib/periphery_client",
"lib/types",
"lib/monitor_client"
]

23
Dockerfile.core Normal file
View File

@@ -0,0 +1,23 @@
FROM rust:latest as builder
WORKDIR /builder
COPY ./core ./core
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
COPY ./lib/db_client ./lib/db_client
COPY ./lib/periphery_client ./lib/periphery_client
COPY ./lib/axum_oauth2 ./lib/axum_oauth2
RUN cd core && cargo build --release
FROM gcr.io/distroless/cc
COPY ./frontend/build /frontend
COPY --from=builder /builder/core/target/release/core /
EXPOSE 9000
CMD ["./core"]

22
Dockerfile.periphery Normal file
View File

@@ -0,0 +1,22 @@
FROM rust:latest as builder
WORKDIR /builder
COPY ./periphery ./periphery
COPY ./lib/types ./lib/types
COPY ./lib/helpers ./lib/helpers
RUN cd periphery && cargo build --release
FROM debian:stable-slim
ARG DEPS_INSTALLER
COPY ./${DEPS_INSTALLER}.sh ./
RUN sh ./${DEPS_INSTALLER}.sh
COPY --from=builder /builder/periphery/target/release/periphery /usr/local/bin/periphery
EXPOSE 8000
CMD "periphery"

25
cli/Cargo.toml Normal file
View File

@@ -0,0 +1,25 @@
[package]
name = "monitor_cli"
version = "0.2.4"
edition = "2021"
authors = ["MoghTech"]
description = "monitor cli | tools to setup monitor system"
license = "GPL-3.0-or-later"
[[bin]]
name = "monitor"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "4.0"
async_timing_util = "0.1.14"
rand = "0.8"
serde = "1.0"
serde_derive = "1.0"
toml = "0.7"
run_command = "0.0.5"
colored = "2"
strum = "0.24"
strum_macros = "0.24"

View File

@@ -1,31 +0,0 @@
{
"name": "@mbecker20/monitor-cli",
"version": "0.0.31",
"description": "A CLI to aid in the setup of monitor",
"author": "mbecker20",
"license": "GPL v3.0",
"bin": "cli.js",
"scripts": {
"start": "yarn build && build/cli.js",
"restart-default": "yarn build && build/cli.js --restart-default",
"build": "vite build && node post-build.mjs && chmod +x build/cli.js",
"update-version": "node update-version.mjs"
},
"dependencies": {
"ink": "^3.2.0",
"ink-link": "^2.0.0",
"ink-spinner": "^4.0.3",
"ink-text-input": "^4.0.3",
"jotai": "^1.6.1",
"meow": "^10.1.2",
"mongoose": "^6.2.7",
"react": "^17.0.2"
},
"devDependencies": {
"@monitor/types": "1.0.0",
"@types/node": "^17.0.21",
"@types/react": "^17.0.40",
"typescript": "^4.6.2",
"vite": "^2.8.6"
}
}

View File

@@ -1,22 +0,0 @@
import { readFileSync, writeFileSync } from "fs";
const contents = readFileSync("build/cli.js").toString();
writeFileSync(
"build/cli.js",
"#!/usr/bin/env node\n" +
contents.replace(
`const meow = await Promise.resolve().then(function() {
return /* @__PURE__ */ _interopNamespace(require("meow"));
});`,
'const meow = await import("meow");'
)
);
const pkgjson = JSON.parse(readFileSync("package.json"));
delete pkgjson.devDependencies;
delete pkgjson.scripts;
// delete pkgjson.dependencies["@monitor/util"];
// pkgjson.bin = "cli.js";
writeFileSync("build/package.json", JSON.stringify(pkgjson, undefined, 2));

View File

@@ -1,32 +0,0 @@
* intro
* is docker installed?
* starting core or periphery?
* core
* mongo -- should it be started?
* yes
* name, port, volume, restart
* no -- is it running locally? add as deployment?
* yes
* name, port, volume, restart
* no
* provide url
* registry -- should it be started?
* yes
* name, port, volume, restart
* no -- is it running locally? add as deployment?
* yes
* name, port, volume, restart
* no
* provide url
* monitor config
* name, secrets volume, use host network, port
* periphery
* install

View File

@@ -1,128 +0,0 @@
import React, { ReactNode, useState } from "react";
import { Newline, render, Text, Box } from "ink";
import init from "./util/init";
import Intro from "./components/Intro";
import Docker from "./components/docker/Docker";
import IsPeriphery from "./components/IsPeriphery";
import Confirm from "./components/Confirm";
import { createUseConfig, createUseSequence } from "./util/state";
import { Config } from "./types";
import Mongo from "./components/deployment-config/Mongo";
import CoreOrPeriphery from "./components/core-or-periphery/CoreOrPeriphery";
import { bound } from "./util/helpers/general";
import Setup from "./components/Setup";
import Restart from "./components/Restart";
type Page = {
title: string;
view: ReactNode;
};
export const useMainSequence = createUseSequence();
export const useConfig = createUseConfig<Config>({});
init().then(({ flags, dockerInstalled }) => {
const App = () => {
const { current } = useMainSequence();
const [periphery, setPeriphery] = useState<boolean | undefined>(
flags.core ? false : flags.periphery ? true : undefined
);
if (flags.restart || flags.restartDefault) {
return (
<Box flexDirection="column">
<Newline />
<Box>
<Text color="blue" bold underline>
Monitor CLI{" "}
</Text>
<Box marginLeft={2}>
<Text color="gray">restart {`(1 of 1)`}</Text>
</Box>
</Box>
<Newline />
<Restart
useDefaults={flags.restartDefault ? true : false}
defaultMongoUrl={flags.mongoUrl}
defaultPullLatest={flags.pullLatest}
/>
</Box>
);
}
const corePages: Page[] = [
{
title: "mongo config",
view: <Mongo />,
},
{
title: "monitor core config",
view: <CoreOrPeriphery type="core" />,
},
];
const peripheryPages: Page[] = [
{
title: "periphery config",
view: <CoreOrPeriphery type="periphery" />,
},
];
const pages = [
{
title: "intro",
view: <Intro />,
},
dockerInstalled
? false
: {
title: "docker intro",
view: <Docker />,
},
{
title: "restart",
view: <Restart />,
},
!flags.core && !flags.periphery
? {
title: "core or periphery",
view: <IsPeriphery setPeriphery={setPeriphery} />,
}
: false,
periphery === true && peripheryPages,
periphery === false && corePages,
{
title: "confirm config",
view: <Confirm />,
},
{
title: "setup",
view: <Setup />,
},
]
.filter((val) => (val ? true : false))
.flat();
const { title, view } = pages[bound(current, 0, pages.length - 1)] as Page;
return (
<Box flexDirection="column">
<Newline />
<Box>
<Text color="blue" bold underline>
Monitor CLI{" "}
</Text>
<Box marginLeft={2}>
<Text color="gray">
{title} {`(${current + 1} of ${pages.length})`}
</Text>
</Box>
</Box>
<Newline />
{view}
</Box>
);
};
render(<App />);
});

View File

@@ -1,63 +0,0 @@
import React, { Fragment } from "react";
import { useConfig, useMainSequence } from "../cli";
import EnterToContinue from "./util/EnterToContinue";
import { useEsc } from "../util/hooks";
import { Box, Newline, Text } from "ink";
import View from "./deployment-config/View";
import ViewCoreOrPeriphery from "./core-or-periphery/ViewCoreOrPeriphery";
const Confirm = () => {
const { config } = useConfig();
const { next, prev } = useMainSequence();
useEsc(prev);
return (
<Box flexDirection="column">
{config.mongo && (
<Fragment>
<Text color="cyan" bold>
mongo:
</Text>
<View url={config.mongo.url} config={config.mongo.startConfig} />
<Newline />
</Fragment>
)}
{/* {config.registry && (
<Fragment>
<Text color="cyan" bold>
registry:
</Text>
<View
url={config.registry.url!}
config={config.registry.startConfig}
/>
<Newline />
</Fragment>
)} */}
{config.core && (
<Fragment>
<Text color="cyan" bold>
monitor core:
</Text>
<ViewCoreOrPeriphery config={config.core} />
<Newline />
</Fragment>
)}
{config.periphery && (
<Fragment>
<Text color="cyan" bold>
monitor periphery:
</Text>
<ViewCoreOrPeriphery config={config.periphery} />
<Newline />
</Fragment>
)}
<EnterToContinue pressEnterTo="begin setup" onEnter={next} />
</Box>
);
};
export default Confirm;

View File

@@ -1,23 +0,0 @@
import React from "react";
import { Box, Newline, Text } from "ink";
import EnterToContinue from "./util/EnterToContinue";
import { useMainSequence } from "../cli";
const Intro = () => {
const { next } = useMainSequence();
return (
<Box flexDirection="column">
<Text>
this is a CLI to setup{" "}
<Text color="cyan" bold>
monitor
</Text>
, a tool to manage application deployment.
</Text>
<Newline />
<EnterToContinue onEnter={next} />
</Box>
);
};
export default Intro;

View File

@@ -1,40 +0,0 @@
import React, { useEffect } from "react";
import { Text } from "ink";
import LabelledSelector from "./util/LabelledSelector";
import { useConfig, useMainSequence } from "../cli";
import { useEsc } from "../util/hooks";
const IsPeriphery = ({
setPeriphery,
}: {
setPeriphery: (periphery: boolean) => void;
}) => {
const { next, prev } = useMainSequence();
const { setMany } = useConfig();
useEffect(() => {
setMany(
["core", undefined],
["periphery", undefined],
["mongo", undefined],
);
}, []);
useEsc(prev);
return (
<LabelledSelector
label={
<Text>
Are you setting up <Text color="cyan">monitor core</Text> or{" "}
<Text color="red">monitor periphery</Text>?
</Text>
}
items={["core", "periphery"]}
onSelect={(item) => {
setPeriphery(item === "periphery");
next();
}}
vertical
/>
);
};
export default IsPeriphery;

View File

@@ -1,185 +0,0 @@
import React, { Fragment, useEffect } from "react";
import LabelledSelector from "./util/LabelledSelector";
import { useMainSequence } from "../cli";
import { useStore } from "../util/hooks";
import { Box, Newline, Text } from "ink";
import { Input } from "./util/Input";
import EnterToContinue from "./util/EnterToContinue";
import { CommandLogError } from "@monitor/types";
import { restart, RestartError } from "../util/helpers/restart";
import YesNo from "./util/YesNo";
type State = {
stage:
| "query"
| "mongo"
| "pullLatest"
| "confirm"
| "installing"
| "finished"
| "error";
pullLatest?: boolean;
mongoUrl?: string;
result?: CommandLogError;
error?: RestartError;
};
const DEPLOY_CORE_OR_PERIPHERY = "deploy monitor core or periphery";
const RESTART_CORE = "restart monitor core";
const Restart = ({
useDefaults,
defaultMongoUrl,
defaultPullLatest,
}: {
useDefaults?: boolean;
defaultMongoUrl?: string;
defaultPullLatest?: boolean;
}) => {
const { next, prev } = useMainSequence();
const [config, setConfig, setMany] = useStore<State>({
stage:
useDefaults
? "installing"
: defaultMongoUrl
? "pullLatest"
: "query",
mongoUrl: useDefaults
? "mongodb://127.0.0.1:27017/monitor"
: defaultMongoUrl,
pullLatest: useDefaults ? false : defaultPullLatest,
});
const { stage, mongoUrl, pullLatest, result, error } = config;
useEffect(() => {
if (stage === "installing") {
restart({ mongoUrl: mongoUrl!, pullLatest: pullLatest! }, (err) =>
setMany(["stage", "error"], ["error", err])
).then((success) => {
if (success) {
setMany(["stage", "finished"], ["result", success]);
}
});
} else if (stage === "finished" || stage === "error") {
process.exit();
}
}, [stage]);
if (stage === "query") {
return (
<LabelledSelector
label="What are you trying to do?"
items={[DEPLOY_CORE_OR_PERIPHERY, RESTART_CORE]}
onSelect={(option) => {
switch (option) {
case DEPLOY_CORE_OR_PERIPHERY:
next();
break;
case RESTART_CORE:
setConfig("stage", "mongo");
break;
}
}}
onEsc={prev}
vertical
/>
);
} else {
return (
<Box flexDirection="column">
{stage === "mongo" && (
<Text color="green">
mongo url:{" "}
<Text color="white">
<Input
initialValue={mongoUrl || "mongodb://127.0.0.1:27017/monitor"}
onSubmit={(mongoUrl) =>
setMany(["stage", "pullLatest"], ["mongoUrl", mongoUrl])
}
onEsc={() => setConfig("stage", "query")}
/>
</Text>
</Text>
)}
{mongoUrl && stage !== "mongo" && (
<Text color="green">
mongo url: <Text color="white">{mongoUrl}</Text>
</Text>
)}
{stage === "pullLatest" && (
<YesNo
label="pull latest core?"
onSelect={(res) => {
setMany(["stage", "confirm"], ["pullLatest", res === "yes"]);
}}
onEsc={() => setConfig("stage", "mongo")}
/>
)}
{pullLatest !== undefined && stage !== "pullLatest" && (
<Text color="green">
pull latest: <Text color="white">{pullLatest ? "yes" : "no"}</Text>
</Text>
)}
<Newline />
{stage === "confirm" && (
<EnterToContinue
onEnter={() => {
setConfig("stage", "installing");
}}
onEsc={() => setConfig("stage", "pullLatest")}
pressEnterTo="restart monitor"
/>
)}
{(stage === "installing" || stage === "error") && (
<Fragment>
<Text>restarting...</Text>
</Fragment>
)}
{result && (
<Fragment>
<Text color="green">finished restarting</Text>
<Newline />
<Box flexDirection="column" marginLeft={2}>
<Text color="green">
command: <Text color="white">{result.command}</Text>
</Text>
{result.log.stderr ? (
<Text color="red">
stderr: <Text color="white">{result.log.stderr}</Text>
</Text>
) : undefined}
{result.log.stdout ? (
<Text color="blue">
stdout: <Text color="white">{result.log.stdout}</Text>
</Text>
) : undefined}
</Box>
<Newline />
</Fragment>
)}
{error && (
<Fragment>
<Newline />
<Text color="red">error restarting</Text>
<Newline />
<Text>{error.message}</Text>
<Text>{error.error}</Text>
<Newline />
</Fragment>
)}
</Box>
);
}
};
export default Restart;

View File

@@ -1,166 +0,0 @@
import React, { Fragment, useEffect, useState } from "react";
import { Box, Newline, Text } from "ink";
import { useConfig } from "../cli";
import deploy, { Stage, Update } from "../util/helpers/deploy";
import { Config } from "../types";
const Setup = () => {
const { config } = useConfig();
const [updates, setUpdates] = useState<Update[]>([getInitialUpdate(config)]);
const [error, setError] = useState<string>();
const [finished, setFinished] = useState(false);
useEffect(() => {
deploy(config, (update) =>
setUpdates((updates) => {
const newUpdates = [update, getNextUpdate(update)].filter(
(val) => val
) as Update[];
return [...updates, ...newUpdates];
})
)
.then(() => setFinished(true))
.catch((err) => setError(err));
}, []);
useEffect(() => {
if (finished) process.exit();
}, [finished]);
useEffect(() => {
if (error) process.exit();
}, [error]);
return (
<Box flexDirection="column">
<Text>
setting up{" "}
{config.core ? (
<Text color="cyan">monitor core</Text>
) : (
<Text color="red">monitor periphery</Text>
)}
...
</Text>
<Newline />
{updates.map(({ stage, result, description }, i) => (
<Fragment key={i}>
<Text>
{description}{" "}
<Text color="gray">
({getStageNumber(config, stage)} of {getTotalSteps(config)})
</Text>
</Text>
{result && (
<Box marginLeft={2} flexDirection="column">
<Text color="green">
command: <Text color="white">{result.command}</Text>
</Text>
{result.log.stderr ? (
<Text color="red">
stderr: <Text color="white">{result.log.stderr}</Text>
</Text>
) : undefined}
{result.log.stdout ? (
<Text color="green">
stdout: <Text color="white">{result.log.stdout}</Text>
</Text>
) : undefined}
</Box>
)}
</Fragment>
))}
{finished && (
<Fragment>
<Newline />
<Text>
<Text color={config.core ? "cyan" : "red"} bold>
{config.core ? "monitor core" : "monitor periphery"}
</Text>{" "}
setup <Text color="green">finished</Text>.
</Text>
</Fragment>
)}
{error && (
<Fragment>
<Newline />
<Text>
setup encountered an <Text color="red">error</Text>:
</Text>
<Box marginLeft={2}>
<Text>{error}</Text>
</Box>
<Newline />
<Text>
process{" "}
<Text color="red" bold>
exiting
</Text>
.
</Text>
</Fragment>
)}
<Newline />
</Box>
);
};
function getInitialUpdate(config: Config): Update {
if (config.core) {
if (config.mongo?.startConfig) {
return {
stage: "mongo",
description: "starting mongo",
};
} else {
return {
stage: "core",
description: "starting monitor core",
};
}
} else {
return {
stage: "periphery",
description: "starting monitor periphery",
};
}
}
function getNextUpdate({ stage }: Update): Update | undefined {
switch (stage) {
case "mongo":
return {
stage: "core",
description: "starting monitor core...",
};
case "core":
return {
stage: "docs",
description: "adding configurations to db...",
};
}
}
function getStageNumber(config: Config, stage: Stage) {
switch (stage) {
case "mongo":
return 1;
case "core":
return 1 + (config.mongo?.startConfig ? 1 : 0);
case "docs":
return 2 + (config.mongo?.startConfig ? 1 : 0);
case "periphery":
return 1;
}
}
function getTotalSteps(config: Config) {
if (config.periphery) {
return 1;
} else {
return 2 + (config.mongo?.startConfig ? 1 : 0);
}
}
export default Setup;

View File

@@ -1,222 +0,0 @@
import React, { Fragment } from "react";
import { Box, Newline, Text } from "ink";
import { join, resolve } from "path";
import { useConfig, useMainSequence } from "../../cli";
import { useEsc, useStore } from "../../util/hooks";
import {
DEFAULT_PERIPHERY_PORT,
DEFAULT_PORT,
RESTART_MODES,
} from "../../config";
import EnterToContinue from "../util/EnterToContinue";
import { ControlledInput } from "../util/Input";
import NumberInput from "../util/NumberInput";
import { CoreOrPeripheryConfig } from "../../types";
import LabelledSelector from "../util/LabelledSelector";
import {
noTrailingSlash,
toDashedName,
trailingSlash,
} from "../../util/helpers/general";
type Stage =
| "name"
| "secret"
| "sysroot"
| "host"
| "port"
| "restart"
| "confirm";
const CoreOrPeriphery = ({ type }: { type: "core" | "periphery" }) => {
const { set } = useConfig();
const { next, prev } = useMainSequence();
const isCore = type === "core";
const [config, setConfig, setMany] = useStore<
Partial<CoreOrPeripheryConfig> & { stage: Stage }
>({
stage: "name",
name: isCore ? "monitor-core" : "monitor-periphery",
});
const { stage, name, secretVolume, host, port, restart, sysroot } = config;
useEsc(() => {
switch (stage) {
case "name":
prev();
break;
case "secret":
setConfig("stage", "name");
break;
case "sysroot":
setConfig("stage", "secret");
break;
case "host":
setConfig("stage", "sysroot");
break;
case "port":
setMany(["stage", isCore ? "host" : "sysroot"]);
break;
case "restart":
setMany(["stage", "port"]);
break;
case "confirm":
setMany(["stage", "restart"], ["restart", undefined]);
break;
}
});
return (
<Box flexDirection="column">
<Text color="green">
name:{" "}
<Text color="white">
{stage === "name" ? (
<ControlledInput
value={name!}
onChange={(name) => setConfig("name", name)}
onSubmit={(name) => {
setMany(["stage", "secret"], ["name", name]);
}}
/>
) : (
name
)}
</Text>
</Text>
{stage === "secret" && (
<Text color="green">
secrets folder:{" "}
<Text color="white">
<ControlledInput
value={secretVolume || join(resolve("."), "/secrets")}
onChange={(volume) => setConfig("secretVolume", volume)}
onSubmit={(volume) => {
setMany(["stage", "sysroot"], ["secretVolume", volume]);
}}
/>
</Text>
</Text>
)}
{(secretVolume || undefined) && stage !== "secret" && (
<Text color="green">
secrets folder: <Text color="white">{secretVolume}</Text>
</Text>
)}
{stage === "sysroot" && (
<Text color="green">
system root folder:{" "}
<Text color="white">
<ControlledInput
value={sysroot || join(resolve("."), "/monitor")}
onChange={(sysroot) => setConfig("sysroot", sysroot)}
onSubmit={(sysroot) => {
setMany(["stage", isCore ? "host" : "port"], ["sysroot", trailingSlash(sysroot)]);
}}
/>
</Text>
</Text>
)}
{sysroot && stage !== "sysroot" && (
<Text color="green">
system root: <Text color="white">{sysroot}</Text>
</Text>
)}
{stage === "host" && (
<Text color="green">
host address:{" "}
<Text color="white">
<ControlledInput
value={host || "http://localhost:9000"}
onChange={(host) => setConfig("host", host)}
onSubmit={(host) => {
setMany(["stage", "port"], ["host", noTrailingSlash(host)]);
}}
/>
</Text>
</Text>
)}
{host && stage !== "host" && (
<Text color="green">
host address: <Text color="white">{host}</Text>
</Text>
)}
{stage === "port" && (
<Text color="green">
port:{" "}
<Text color="white">
<NumberInput
initialValue={
port ||
(type === "core" ? DEFAULT_PORT : DEFAULT_PERIPHERY_PORT)
}
onSubmit={(port) => {
setMany(["stage", "restart"], ["port", port]);
}}
/>
</Text>
</Text>
)}
{port && stage !== "port" && (
<Text color="green">
port: <Text color="white">{port}</Text>
</Text>
)}
{stage === "restart" && (
<LabelledSelector
label="restart: "
items={RESTART_MODES}
onSelect={(restart) => {
setMany(
["stage", "confirm"],
[
"restart",
restart === "don't restart" ? "no" : toDashedName(restart),
]
);
}}
/>
)}
{restart && (
<Text color="green">
restart: <Text color="white">{restart}</Text>
</Text>
)}
{stage === "confirm" && (
<Fragment>
<Newline />
<EnterToContinue
onEnter={() => {
set(type, {
name: name!,
secretVolume: secretVolume!,
port: Number(port),
restart: restart!,
sysroot: sysroot!,
host,
});
next();
}}
/>
</Fragment>
)}
</Box>
);
};
export default CoreOrPeriphery;

View File

@@ -1,31 +0,0 @@
import React from "react";
import { Box, Text } from "ink";
import { CoreOrPeripheryConfig } from "../../types";
const ViewCoreOrPeriphery = ({
config: { name, secretVolume, port, restart, sysroot },
}: {
config: CoreOrPeripheryConfig;
}) => {
return (
<Box flexDirection="column" marginLeft={2}>
<Text color="green">
name: <Text color="white">{name}</Text>
</Text>
<Text color="green">
secrets folder: <Text color="white">{secretVolume}</Text>
</Text>
<Text color="green">
system root: <Text color="white">{sysroot}</Text>
</Text>
<Text color="green">
port: <Text color="white">{port}</Text>
</Text>
<Text color="green">
restart: <Text color="white">{restart}</Text>
</Text>
</Box>
);
};
export default ViewCoreOrPeriphery;

View File

@@ -1,173 +0,0 @@
import React, { Fragment } from "react";
import { Box, Newline, Text } from "ink";
import { useEsc, useStore } from "../../util/hooks";
import EnterToContinue from "../util/EnterToContinue";
import LabelledSelector from "../util/LabelledSelector";
import YesNo from "../util/YesNo";
import { toDashedName } from "../../util/helpers/general";
import { Input } from "../util/Input";
import NumberInput from "../util/NumberInput";
import { RESTART_MODES } from "../../config";
type DeploymentConfig = {
stage: "name" | "port" | "volume" | "restart" | "confirm";
name: string;
port?: number;
volume?: string | false;
restart?: string;
};
const DeploymentConfig = ({
deployment,
onFinish,
back,
}: {
deployment: "mongo-db" | "registry";
onFinish: (config: DeploymentConfig) => void;
back: () => void;
}) => {
const [config, setConfig, setMany] = useStore<DeploymentConfig>({
stage: "name",
name: deployment,
});
const { stage, name, port, volume, restart } = config;
useEsc(() => {
switch (stage) {
case "name":
back();
break;
case "port":
setConfig("stage", "name");
break;
case "volume":
if (volume) {
setConfig("volume", undefined);
} else {
setMany(["stage", "port"], ["volume", undefined]);
}
break;
case "restart":
setMany(
["stage", "volume"],
volume === false ? ["volume", undefined] : ["volume", volume]
);
break;
case "confirm":
setMany(["stage", "restart"], ["restart", undefined]);
break;
}
});
return (
<Box flexDirection="column">
<Text color="green">
name:{" "}
<Text color="white">
{stage === "name" ? (
<Input
initialValue={name}
onSubmit={(name) => {
setMany(["stage", "port"], ["name", name]);
}}
/>
) : (
name
)}
</Text>
</Text>
{stage === "port" && (
<Text color="green">
port:{" "}
<Text color="white">
<NumberInput
initialValue={port || (deployment === "mongo-db" ? 27017 : 5000)}
onSubmit={(port) => {
setMany(["stage", "volume"], ["port", port]);
}}
/>
</Text>
</Text>
)}
{port && stage !== "port" && (
<Text color="green">
port: <Text color="white">{port}</Text>
</Text>
)}
{stage === "volume" && volume === undefined && (
<YesNo
label={
<Text>
mount data on local filesystem? this is used to{" "}
<Text color="green">persist data</Text> between{" "}
<Text color="green">container restarts</Text>.
</Text>
}
onSelect={(use) => {
if (use === "yes") {
setConfig("volume", `~/${name}`);
} else {
setMany(["stage", "restart"], ["volume", false]);
}
}}
vertical
noYes
/>
)}
{volume !== undefined && (
<Text color="green">
mount folder:{" "}
<Text color="white">
{stage === "volume" ? (
<Input
initialValue={volume as string}
onSubmit={(volume) => {
setMany(["stage", "restart"], ["volume", volume]);
}}
/>
) : (
volume || "don't use"
)}
</Text>
</Text>
)}
{stage === "restart" && (
<LabelledSelector
label="restart: "
items={RESTART_MODES}
onSelect={(restart) => {
setMany(
["stage", "confirm"],
[
"restart",
restart === "don't restart" ? "no" : toDashedName(restart),
]
);
}}
/>
)}
{restart && (
<Text color="green">
restart: <Text color="white">{restart}</Text>
</Text>
)}
{stage === "confirm" && (
<Fragment>
<Newline />
<EnterToContinue onEnter={() => onFinish(config)} />
</Fragment>
)}
</Box>
);
};
export default DeploymentConfig;

View File

@@ -1,111 +0,0 @@
import React from "react";
import { Box, Newline, Text } from "ink";
import { useConfig, useMainSequence } from "../../cli";
import YesNo from "../util/YesNo";
import DeploymentConfig from "./DeploymentConfig";
import EnterToContinue from "../util/EnterToContinue";
import { DEFAULT_MONGO_URL } from "../../config";
import { useEsc, useStore } from "../../util/hooks";
import { Input } from "../util/Input";
import { toDashedName } from "../../util/helpers/general";
type State = {
setup?: boolean;
mongoUrl: string;
confirm: boolean;
}
const Mongo = () => {
const { set } = useConfig();
const { next, prev } = useMainSequence();
const [state, setState, setMany] = useStore<State>({
mongoUrl: DEFAULT_MONGO_URL,
confirm: false,
})
const { setup, mongoUrl, confirm } = state;
useEsc(() => {
if (setup === false) {
if (confirm) {
setState("confirm", false);
} else {
setState("setup", undefined);
}
} else if (setup === undefined) {
prev();
}
})
if (setup === undefined) {
return (
<YesNo
label={
<Text>
do you need to set up{" "}
<Text color="cyan" bold>
mongo db
</Text>{" "}
locally?{" "}
</Text>
}
onSelect={(res) => setState("setup", res === "yes")}
vertical
/>
);
}
if (setup) {
return (
<DeploymentConfig
deployment="mongo-db"
back={() => setState("setup", undefined)}
onFinish={({ name, port, volume, restart }) => {
set("mongo", {
url: `mongodb://${toDashedName(name)}:${port}/monitor`,
startConfig: {
name,
port: port as number,
volume: volume as string | false,
restart: restart as string,
},
});
next();
}}
/>
);
} else {
if (confirm) {
return (
<Box flexDirection="column">
<Text color="green">
mongo url: <Text color="white">{mongoUrl}</Text>
</Text>
<Newline />
<EnterToContinue
onEnter={() => {
set("mongo", { url: mongoUrl });
next();
}}
/>
</Box>
);
} else {
return (
<Text color="green">
mongo url:{" "}
<Text color="white">
<Input
initialValue={mongoUrl}
onSubmit={(mongoUrl) => {
setMany(["mongoUrl", mongoUrl], ["confirm", true]);
}}
/>
</Text>
</Text>
);
}
}
};
export default Mongo;

View File

@@ -1,108 +0,0 @@
// import React from "react";
// import { Box, Newline, Text } from "ink";
// import { useConfig, useMainSequence } from "../../cli";
// import YesNo from "../util/YesNo";
// import DeploymentConfig from "./DeploymentConfig";
// import EnterToContinue from "../util/EnterToContinue";
// import { DEFAULT_REGISTRY_URL } from "../../config";
// import { useEsc, useStore } from "../../util/hooks";
// import { Input } from "../util/Input";
// import { toDashedName } from "../../util/helpers/general";
// type State = {
// setup?: boolean;
// regUrl: string;
// confirm: boolean;
// };
// const Registry = () => {
// const { set } = useConfig();
// const { next, prev } = useMainSequence();
// const [state, setState, setMany] = useStore<State>({
// regUrl: DEFAULT_REGISTRY_URL,
// confirm: false,
// });
// const { setup, regUrl, confirm } = state;
// useEsc(() => {
// if (setup === false) {
// if (confirm) {
// setState("confirm", false);
// } else {
// setState("setup", undefined);
// }
// } else if (setup === undefined) {
// prev();
// }
// });
// if (setup === undefined) {
// return (
// <YesNo
// label={
// <Text>
// do you need to set up a{" "}
// <Text color="cyan" bold>
// docker registry
// </Text>{" "}
// locally?{" "}
// </Text>
// }
// onSelect={(res) => setState("setup", res === "yes")}
// vertical
// />
// );
// }
// if (setup) {
// return (
// <DeploymentConfig
// deployment="registry"
// back={() => setState("setup", undefined)}
// onFinish={({ name, port, volume, restart }) => {
// set("registry", {
// url: `http://${toDashedName(name)}:${port}/`,
// startConfig: {
// name,
// port: Number(port),
// volume: volume as string | false,
// restart: restart as string,
// },
// });
// next();
// }}
// />
// );
// } else {
// if (confirm) {
// return (
// <Box flexDirection="column">
// <Text color="green">
// registry url: <Text color="white">{regUrl}</Text>
// </Text>
// <Newline />
// <EnterToContinue
// onEnter={() => {
// set("registry", { url: regUrl });
// next();
// }}
// />
// </Box>
// );
// } else {
// return (
// <Text color="green">
// registry url:{" "}
// <Text color="white">
// <Input
// initialValue={regUrl}
// onSubmit={(regUrl) => setMany(["regUrl", regUrl], ["confirm", true])}
// />
// </Text>
// </Text>
// );
// }
// }
// };
// export default Registry;

View File

@@ -1,32 +0,0 @@
import React, { Fragment } from "react";
import { Box, Text } from "ink";
import { StartConfig } from "../../types";
const View = ({ url, config }: { url: string, config?: StartConfig }) => {
const { name, port, volume, restart } = config || { name: "", port: "", volume: "", restart: "" };
return (
<Box flexDirection="column" marginLeft={2}>
<Text color="green">
url: <Text color="white">{url}</Text>
</Text>
{config && (
<Fragment>
<Text color="green">
name: <Text color="white">{name}</Text>
</Text>
<Text color="green">
port: <Text color="white">{port}</Text>
</Text>
<Text color="green">
mount folder: <Text color="white">{volume || "don't use"}</Text>
</Text>
<Text color="green">
restart: <Text color="white">{restart}</Text>
</Text>
</Fragment>
)}
</Box>
);
}
export default View;

View File

@@ -1,66 +0,0 @@
import React, { Fragment, useEffect } from "react";
import { Newline, Text } from "ink";
import Link from "ink-link";
const Docker = () => {
useEffect(() => {
process.exit();
}, []);
// if (installDocker === undefined) {
// return (
// <YesNo
// label={
// <Text>
// Docker does not appear to be accessable. Would you like to{" "}
// <Text color="green">install docker</Text>? This will begin the{" "}
// <Text color="cyan" bold>
// Docker Install Helper
// </Text>
// . Docker is necessary to proceed.
// </Text>
// }
// onSelect={(res) => {
// setInstallDocker(res === "yes");
// }}
// vertical
// />
// );
// } else if (installDocker) {
// return <InstallDocker next={next} />;
// } else {
// return (
// <Fragment>
// <Text>
// install docker and restart the CLI to proceed. make sure that docker
// is accessable on the command line{" "}
// <Text color="green">without using sudo</Text>.
// </Text>
// <Newline />
// </Fragment>
// );
// }
return (
<Fragment>
<Text>
docker appears appears to be inaccessable.{" "}
<Link url="https://docs.docker.com/engine/install/">
<Text color="blue" bold>
install docker
</Text>
</Link>{" "}
and restart the CLI to proceed. make sure that docker is accessable on
the command line{" "}
<Link url="https://docs.docker.com/engine/install/linux-postinstall/">
<Text color="green" bold>
without using sudo
</Text>
</Link>
.
</Text>
<Newline />
</Fragment>
);
};
export default Docker;

View File

@@ -1,127 +0,0 @@
import React, { Fragment, useState } from "react";
import { Box, Newline, Text } from "ink";
import YesNo from "../util/YesNo";
import { installDockerUbuntu, InstallLog } from "../../util/helpers/docker";
import { useEnter, useKey } from "../../util/hooks";
import Spinner from "ink-spinner";
const InstallDocker = ({ next }: { next: () => void }) => {
const [stage, setStage] = useState<
"sysCtlEnable" | "confirm" | "install" | "installing" | "finish" | "error"
>("sysCtlEnable");
const [sysCtlEnable, setSysCtlEnable] = useState<"yes" | "no">();
const [logs, setLogs] = useState<InstallLog[]>([]);
useEnter(async () => {
switch (stage) {
case "confirm":
setStage("installing");
const error = await installDockerUbuntu(
(log) => setLogs((logs) => [...logs, log]),
sysCtlEnable === "yes"
);
if (error) {
// there was some error
setStage("error");
} else {
setStage("finish");
}
break;
case "finish":
next();
break;
case "error":
setSysCtlEnable(undefined);
setStage("sysCtlEnable");
break;
}
});
useKey("leftArrow", () => {
switch (stage) {
case "confirm":
setSysCtlEnable(undefined);
setStage("sysCtlEnable");
break;
}
});
return (
<Box flexDirection="column">
<Text color="cyan" bold>
Docker Install Helper
</Text>
<Newline />
{stage === "sysCtlEnable" && sysCtlEnable === undefined && (
<YesNo
label="start docker on system start (boot)?"
labelColor="white"
onSelect={(res) => {
setSysCtlEnable(res);
setStage("confirm");
}}
vertical
/>
)}
{sysCtlEnable !== undefined && (
<Text color="green">
start on boot: <Text color="white">{sysCtlEnable}</Text>
</Text>
)}
<Newline />
{stage === "confirm" && (
<Text>
press <Text color="green">enter</Text> to install docker. you may have
to provide your password.
</Text>
)}
{(stage === "installing" || stage === "finish") && (
<Fragment>
{stage === "installing" && (
<Text>
<Text color="green">
<Spinner type="dots" />
</Text>{" "}
installing...
</Text>
)}
<Newline />
{logs.map(({ stage, log }) => {
<Fragment>
<Text color="cyan" bold>
{stage}
</Text>
<Text color="green">
command: <Text color="white">{log.command}</Text>
</Text>
{log.log.stdout ? (
<Text color="green">
stdout: <Text color="white">{log.log.stdout}</Text>
</Text>
) : undefined}
{log.log.stderr ? (
<Text color="red">
stderr: <Text color="white">{log.log.stderr}</Text>
</Text>
) : undefined}
<Newline />
</Fragment>;
})}
</Fragment>
)}
{stage === "finish" && (
<Text>
docker has finished installing. press <Text color="green">enter</Text>{" "}
to continue.
</Text>
)}
{stage === "error" && (
<Text>
there was an error during install. press{" "}
<Text color="green">enter</Text> to try again.
</Text>
)}
</Box>
);
};
export default InstallDocker;

View File

@@ -1,19 +0,0 @@
import React from "react";
import { Text } from "ink";
import { useEnter, useEsc } from "../../util/hooks";
const EnterToContinue = ({ onEnter, pressEnterTo, onEsc }: { onEnter: () => void; pressEnterTo?: string; onEsc?: () => void; }) => {
useEnter(onEnter);
useEsc(() => onEsc && onEsc());
return (
<Text>
press{" "}
<Text color="green" bold>
enter
</Text>{" "}
to {pressEnterTo || "continue"}.
</Text>
);
};
export default EnterToContinue;

View File

@@ -1,42 +0,0 @@
import React from "react";
import TextInput, { UncontrolledTextInput } from "ink-text-input";
import { useBlinker, useEsc } from "../../util/hooks";
export const Input = ({
initialValue,
onSubmit,
onEsc,
}: {
initialValue?: string;
onSubmit?: (val: string) => void;
onEsc?: () => void;
}) => {
useEsc(onEsc ? onEsc : () => {});
return (
<UncontrolledTextInput
initialValue={initialValue}
onSubmit={onSubmit}
/>
);
};
export const ControlledInput = ({
value,
onChange,
onSubmit,
onEsc,
}: {
value: string;
onChange: (val: string) => void;
onSubmit?: (val: string) => void;
onEsc?: () => void;
}) => {
useEsc(onEsc ? onEsc : () => {});
return (
<TextInput
value={value}
onChange={onChange}
onSubmit={onSubmit}
/>
);
};

View File

@@ -1,33 +0,0 @@
import { Box, Newline, Text } from "ink";
import React, { ReactNode } from "react";
import Selector from "./Selector";
const LabelledSelector = ({
label,
items,
onSelect,
onEsc,
vertical,
labelColor = "white",
}: {
label: ReactNode;
labelColor?: "green" | "white";
items: string[];
onSelect?: (item: string, index: number) => void;
vertical?: boolean;
onEsc?: () => void;
}) => {
return (
<Box flexDirection={vertical ? "column" : "row"}>
{typeof label === "string" ? (
<Text color={labelColor}>{label} </Text>
) : (
label
)}
{vertical && <Newline />}
<Selector items={items} onSelect={onSelect} onEsc={onEsc} />
</Box>
);
};
export default LabelledSelector;

View File

@@ -1,41 +0,0 @@
import React from "react";
import { Text } from "ink";
import TextInput from "ink-text-input";
import { useState } from "react";
import { useBlinker, useEsc } from "../../util/hooks";
const NumberInput = ({
initialValue,
onSubmit,
onEsc,
}: {
initialValue: number;
onSubmit?: (val: number) => void;
onEsc?: () => void;
}) => {
const [value, setValue] = useState(initialValue?.toString() || "");
const [error, setError] = useState(isNaN(Number(value)));
useEsc(onEsc ? onEsc : () => {});
return (
<Text>
<TextInput
value={value}
onChange={(val: string) => {
setError(isNaN(Number(val)));
setValue(val);
}}
onSubmit={(val: string) => {
const value = Number(val);
if (val) {
onSubmit && onSubmit(value);
} else {
setError(true);
}
}}
/>
{error && <Text color="gray"> (please enter a number)</Text>}
</Text>
);
};
export default NumberInput;

View File

@@ -1,34 +0,0 @@
import React, { useState } from "react";
import { Box, Text, useInput } from "ink";
const Selector = (p: {
items: string[];
onSelect?: (item: string, i: number) => void;
onEsc?: () => void;
}) => {
const [highlighted, setHighlighted] = useState(0);
useInput((_, key) => {
if (key.upArrow) {
setHighlighted(Math.max(highlighted - 1, 0));
} else if (key.downArrow) {
setHighlighted(Math.min(highlighted + 1, p.items.length - 1));
} else if (key.return) {
if (p.onSelect) p.onSelect(p.items[highlighted]!, highlighted);
} else if (key.escape) {
if (p.onEsc) p.onEsc();
}
});
return (
<Box flexDirection="column">
{p.items.map((item, i) => {
return (
<Text key={i} color={i === highlighted ? "green" : "white"}>
{i === highlighted ? ">" : " "} {item}
</Text>
);
})}
</Box>
);
};
export default Selector;

View File

@@ -1,42 +0,0 @@
import React, { ReactNode } from "react";
import LabelledSelector from "./LabelledSelector";
const YesNo = ({
label,
onYes,
onNo,
onSelect,
vertical,
labelColor,
noYes,
onEsc
}: {
label: ReactNode;
onYes?: () => void;
onNo?: () => void;
onSelect?: (res: "yes" | "no") => void;
vertical?: boolean;
labelColor?: "green" | "white";
noYes?: boolean;
onEsc?: () => void;
}) => {
return (
<LabelledSelector
label={label}
items={noYes ? ["no", "yes"] : ["yes", "no"]}
onSelect={(item) => {
if (item === "yes") {
onYes && onYes();
} else {
onNo && onNo();
}
onSelect && onSelect(item as "yes" | "no");
}}
vertical={vertical}
labelColor={labelColor}
onEsc={onEsc}
/>
);
};
export default YesNo;

View File

@@ -1,13 +0,0 @@
export const DEFAULT_PORT = 9000;
export const DEFAULT_PERIPHERY_PORT = 8000;
export const DEFAULT_MONGO_URL = "mongodb://mongo-db:27017/monitor";
export const DEFAULT_REGISTRY_URL = "registry:5000/";
export const CORE_IMAGE = "mbecker2020/monitor-core";
export const PERIPHERY_IMAGE = "mbecker2020/monitor-periphery";
export const DOCKER_NETWORK = "monitor-network";
export const RESTART_MODES = [
"don't restart",
"unless stopped",
"on failure",
"always",
];

620
cli/src/helpers.rs Normal file
View File

@@ -0,0 +1,620 @@
use std::{
env,
fs::{self, File},
io::{Read, Write},
net::IpAddr,
path::PathBuf,
str::FromStr,
};
use async_timing_util::Timelength;
use clap::ArgMatches;
use colored::Colorize;
use rand::{distributions::Alphanumeric, Rng};
use run_command::run_command_pipe_to_terminal;
use serde::Serialize;
use crate::types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode};
const CORE_IMAGE_NAME: &str = "mbecker2020/monitor_core";
const PERIPHERY_IMAGE_NAME: &str = "mbecker2020/monitor_periphery";
const PERIPHERY_CRATE: &str = "monitor_periphery";
pub fn gen_core_config(sub_matches: &ArgMatches) {
let host = sub_matches
.get_one::<String>("host")
.map(|p| p.as_str())
.unwrap_or("http://localhost:9000")
.to_string();
let path = sub_matches
.get_one::<String>("path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/core.config.toml")
.to_string();
let port = sub_matches
.get_one::<String>("port")
.map(|p| p.as_str())
.unwrap_or("9000")
.parse::<u16>()
.expect("invalid port");
let mongo_uri = sub_matches
.get_one::<String>("mongo-uri")
.map(|p| p.as_str())
.unwrap_or("mongodb://monitor-mongo")
.to_string();
let mongo_db_name = sub_matches
.get_one::<String>("mongo-db-name")
.map(|p| p.as_str())
.unwrap_or("monitor")
.to_string();
let jwt_valid_for = sub_matches
.get_one::<String>("jwt-valid-for")
.map(|p| p.as_str())
.unwrap_or("1-wk")
.parse()
.expect("invalid jwt-valid-for");
let slack_url = sub_matches
.get_one::<String>("slack-url")
.map(|p| p.to_owned());
let config = CoreConfig {
host,
port,
jwt_valid_for,
monitoring_interval: Timelength::OneMinute,
daily_offset_hours: 0,
keep_stats_for_days: 120,
slack_url,
local_auth: true,
github_oauth: Default::default(),
google_oauth: Default::default(),
aws: Default::default(),
docker_organizations: Default::default(),
mongo: MongoConfig {
uri: mongo_uri,
db_name: mongo_db_name,
app_name: "monitor".to_string(),
},
jwt_secret: generate_secret(40),
github_webhook_secret: generate_secret(30),
github_webhook_base_url: None,
passkey: generate_secret(30),
};
write_to_toml(&path, &config);
println!(
"\n{} has been generated at {path}\n",
"core config".bold()
);
}
pub fn start_mongo(sub_matches: &ArgMatches) {
let username = sub_matches.get_one::<String>("username");
let password = sub_matches.get_one::<String>("password");
if (username.is_some() && password.is_none()) {
println!(
"\n❌ must provide {} if username is provided ❌\n",
"--password".bold()
);
return;
}
if (username.is_none() && password.is_some()) {
println!(
"\n❌ must provide {} if password is provided ❌\n",
"--username".bold()
);
return;
}
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
let name = sub_matches
.get_one::<String>("name")
.map(|p| p.as_str())
.unwrap_or("monitor-mongo");
let port = sub_matches
.get_one::<String>("port")
.map(|p| p.as_str())
.unwrap_or("27017")
.parse::<u16>()
.expect("invalid port");
let network = sub_matches
.get_one::<String>("network")
.map(|p| p.as_str())
.unwrap_or("bridge");
let mount = sub_matches
.get_one::<String>("mount")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/db");
let restart = sub_matches
.get_one::<String>("restart")
.map(|p| p.as_str())
.unwrap_or("unless-stopped")
.parse::<RestartMode>()
.expect("invalid restart mode");
let env = if let (Some(username), Some(password)) = (username, password) {
format!(" --env MONGO_INITDB_ROOT_USERNAME={username} --env MONGO_INITDB_ROOT_PASSWORD={password}")
} else {
String::new()
};
println!(
"\n====================\n {} \n====================\n",
"mongo config".bold()
);
if let Some(username) = username {
println!("{}: {username}", "mongo username".dimmed());
}
println!("{}: {name}", "container name".dimmed());
println!("{}: {port}", "port".dimmed());
println!("{}: {mount}", "mount".dimmed());
println!("{}: {network}", "network".dimmed());
println!("{}: {restart}", "restart".dimmed());
if !skip_enter {
println!(
"\npress {} to start {}. {}",
"ENTER".green().bold(),
"MongoDB".bold(),
"(ctrl-c to cancel)".dimmed()
);
let buffer = &mut [0u8];
let res = std::io::stdin().read_exact(buffer);
if res.is_err() {
println!("pressed another button, exiting");
}
}
let stop =
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
let command = format!("docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} --log-opt max-size=15m --log-opt max-file=3 mongo --quiet");
let output = run_command_pipe_to_terminal(&command);
if output.success() {
println!("\n{} has been started up ✅\n", "monitor mongo".bold())
} else {
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
}
}
pub fn start_core(sub_matches: &ArgMatches) {
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
let config_path = sub_matches
.get_one::<String>("config-path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/core.config.toml")
.to_string();
let name = sub_matches
.get_one::<String>("name")
.map(|p| p.as_str())
.unwrap_or("monitor-core");
let port = sub_matches
.get_one::<String>("port")
.map(|p| p.as_str())
.unwrap_or("9000")
.parse::<u16>()
.expect("invalid port");
let network = sub_matches
.get_one::<String>("network")
.map(|p| p.as_str())
.unwrap_or("bridge");
let restart = sub_matches
.get_one::<String>("restart")
.map(|p| p.as_str())
.unwrap_or("unless-stopped")
.parse::<RestartMode>()
.expect("invalid restart mode");
let add_host = sub_matches
.get_one::<bool>("add-internal-host")
.map(|p| *p)
.unwrap_or(true);
println!(
"\n===================\n {} \n===================\n",
"core config".bold()
);
println!("{}: {name}", "container name".dimmed());
println!("{}: {config_path}", "config path".dimmed());
println!("{}: {port}", "port".dimmed());
println!("{}: {network}", "network".dimmed());
println!("{}: {restart}", "restart".dimmed());
println!("{}: {add_host}", "add internal host".dimmed());
if !skip_enter {
println!(
"\npress {} to start {}. {}",
"ENTER".green().bold(),
"monitor core".bold(),
"(ctrl-c to cancel)".dimmed()
);
let buffer = &mut [0u8];
let res = std::io::stdin().read_exact(buffer);
if res.is_err() {
println!("pressed another button, exiting");
}
}
println!("\nstarting monitor core container...\n");
let _ = run_command_pipe_to_terminal(&format!("docker pull {CORE_IMAGE_NAME}"));
let _ =
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
let add_host = if add_host {
" --add-host host.docker.internal:host-gateway"
} else {
""
};
let command = format!("docker run -d --name {name} -p {port}:9000 --network {network} -v {config_path}:/config/config.toml --restart {restart}{add_host} {CORE_IMAGE_NAME}");
let output = run_command_pipe_to_terminal(&command);
if output.success() {
println!("\n{} has been started up ✅\n", "monitor core".bold())
} else {
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
}
}
pub fn gen_periphery_config(sub_matches: &ArgMatches) {
let path = sub_matches
.get_one::<String>("path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.config.toml")
.to_string();
let port = sub_matches
.get_one::<String>("port")
.map(|p| p.as_str())
.unwrap_or("8000")
.parse::<u16>()
.expect("invalid port");
let stats_polling_rate = sub_matches
.get_one::<String>("stats-polling-rate")
.map(|p| p.as_str())
.unwrap_or("5-sec")
.parse::<Timelength>()
.expect("invalid timelength");
let allowed_ips = sub_matches
.get_one::<String>("allowed-ips")
.map(|p| p.as_str())
.unwrap_or("")
.split(",")
.filter(|ip| ip.len() > 0)
.map(|ip| {
ip.parse()
.expect("given allowed ip address is not valid ip")
})
.collect::<Vec<IpAddr>>();
let repo_dir = sub_matches
.get_one::<String>("repo-dir")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/repos")
.to_string()
.replace("~", env::var("HOME").unwrap().as_str());
let config = PeripheryConfig {
port,
repo_dir,
stats_polling_rate,
allowed_ips,
passkeys: vec![],
secrets: Default::default(),
github_accounts: Default::default(),
docker_accounts: Default::default(),
};
write_to_toml(&path, &config);
println!(
"\n{} generated at {path}\n",
"periphery config".bold()
);
}
pub fn start_periphery_systemd(sub_matches: &ArgMatches) {
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
let install = *sub_matches.get_one::<bool>("install").unwrap_or(&false);
let config_path = sub_matches
.get_one::<String>("config-path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.config.toml")
.to_string();
println!(
"\n========================\n {} \n========================\n",
"periphery config".bold()
);
println!("{}: systemd", "run with".dimmed());
println!("{}: {config_path}", "config path".dimmed());
if !skip_enter {
println!(
"\npress {} to start {}. {}",
"ENTER".green().bold(),
"monitor periphery".bold(),
"(ctrl-c to cancel)".dimmed()
);
let buffer = &mut [0u8];
let res = std::io::stdin().read_exact(buffer);
if res.is_err() {
println!("pressed another button, exiting");
}
}
if install {
install_periphery_from_crates_io();
}
gen_periphery_service_file(&config_path);
let user = env::var("USER").expect("failed to find $USER env var");
let command =
format!("systemctl --user daemon-reload && systemctl --user enable --now periphery && loginctl enable-linger {user}");
let output = run_command_pipe_to_terminal(&command);
if output.success() {
println!(
"\n{} has been started up ✅\n",
"monitor periphery".bold()
)
} else {
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
}
}
pub fn start_periphery_daemon(sub_matches: &ArgMatches) {
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
let install = *sub_matches.get_one::<bool>("install").unwrap_or(&false);
let config_path = sub_matches
.get_one::<String>("config-path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.config.toml")
.to_string();
let stdout = sub_matches
.get_one::<String>("stdout")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.log.out")
.to_string();
let stderr = sub_matches
.get_one::<String>("stderr")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.log.err")
.to_string();
println!(
"\n========================\n {} \n========================\n",
"periphery config".bold()
);
println!("{}: daemon", "run as".dimmed());
println!("{}: {config_path}", "config path".dimmed());
println!("{}: {stdout}", "stdout".dimmed());
println!("{}: {stderr}", "stderr".dimmed());
if !skip_enter {
println!(
"\npress {} to start {}. {}",
"ENTER".green().bold(),
"monitor periphery".bold(),
"(ctrl-c to cancel)".dimmed()
);
let buffer = &mut [0u8];
let res = std::io::stdin().read_exact(buffer);
if res.is_err() {
println!("pressed another button, exiting");
}
}
if install {
install_periphery_from_crates_io();
}
let command = format!("if pgrep periphery; then pkill periphery; fi && periphery --daemon --config-path {config_path} --stdout {stdout} --stderr {stderr}");
let output = run_command_pipe_to_terminal(&command);
if output.success() {
println!(
"\n{} has been started up ✅\n",
"monitor periphery".bold()
)
} else {
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
}
}
pub fn start_periphery_container(sub_matches: &ArgMatches) {
let skip_enter = *sub_matches.get_one::<bool>("yes").unwrap_or(&false);
let config_path = sub_matches
.get_one::<String>("config-path")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/periphery.config.toml")
.to_string();
let repo_dir = sub_matches
.get_one::<String>("repo-dir")
.map(|p| p.as_str())
.unwrap_or("~/.monitor/repos")
.to_string();
let name = sub_matches
.get_one::<String>("name")
.map(|p| p.as_str())
.unwrap_or("monitor-periphery");
let port = sub_matches
.get_one::<String>("port")
.map(|p| p.as_str())
.unwrap_or("8000")
.parse::<u16>()
.expect("invalid port");
let network = sub_matches
.get_one::<String>("network")
.map(|p| p.as_str())
.unwrap_or("bridge");
let restart = sub_matches
.get_one::<String>("restart")
.map(|p| p.as_str())
.unwrap_or("unless-stopped")
.parse::<RestartMode>()
.expect("invalid restart mode");
println!(
"\n========================\n {} \n========================\n",
"periphery config".bold()
);
println!("{}: container", "run as".dimmed());
println!("{}: {name}", "container name".dimmed());
println!("{}: {config_path}", "config path".dimmed());
println!("{}: {repo_dir}", "repo folder".dimmed());
println!("{}: {port}", "port".dimmed());
println!("{}: {network}", "network".dimmed());
println!("{}: {restart}", "restart".dimmed());
if !skip_enter {
println!(
"\npress {} to start {}. {}",
"ENTER".green().bold(),
"monitor periphery".bold(),
"(ctrl-c to cancel)".dimmed()
);
let buffer = &mut [0u8];
let res = std::io::stdin().read_exact(buffer);
if res.is_err() {
println!("pressed another button, exiting");
}
}
println!("\nstarting monitor periphery container...\n");
let _ = run_command_pipe_to_terminal(&format!("docker pull {PERIPHERY_IMAGE_NAME}"));
let _ =
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
let command = format!("docker run -d --name {name} -p {port}:8000 --network {network} -v {config_path}:/config/config.toml -v {repo_dir}:/repos -v /var/run/docker.sock:/var/run/docker.sock --restart {restart} {PERIPHERY_IMAGE_NAME}");
let output = run_command_pipe_to_terminal(&command);
if output.success() {
println!(
"\n{} has been started up ✅\n",
"monitor periphery".bold()
)
} else {
eprintln!("\n❌ there was some {} on startup ❌\n", "error".red())
}
}
pub fn gen_periphery_service_file(config_path: &str) {
let home = env::var("HOME").expect("failed to find $HOME env var");
let _ = std::fs::create_dir_all(format!("{home}/.config/systemd/user"));
let mut file = File::create(format!("{home}/.config/systemd/user/periphery.service"))
.expect("failed to create user systemd unit file");
file.write_all(periphery_unit_file(config_path).as_bytes())
.expect("failed to write config file");
}
fn write_to_toml(path: &str, toml: impl Serialize) {
let path = PathBuf::from_str(&path.replace("~", &std::env::var("HOME").unwrap()))
.expect("not a valid path");
let _ = fs::create_dir_all(pop_path(&path));
fs::write(
path,
toml::to_string(&toml).expect("failed to parse config into toml"),
)
.expect("❌ failed to write toml to file ❌");
}
fn pop_path(path: &PathBuf) -> PathBuf {
let mut clone = path.clone();
clone.pop();
clone
}
fn generate_secret(length: usize) -> String {
rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
fn periphery_unit_file(config_path: &str) -> String {
let home = env::var("HOME").expect("failed to find $HOME env var");
let user = env::var("USER").expect("failed to find $USER env var");
format!(
"[Unit]
Description=agent to connect with monitor core
[Service]
ExecStart={home}/.monitor/bin/periphery --config-path {config_path} --home-dir {home}
Restart=on-failure
TimeoutStartSec=0
[Install]
WantedBy=default.target"
)
}
fn install_periphery_from_crates_io() {
println!("\ninstalling periphery binary...\n");
let install_output = run_command_pipe_to_terminal(&format!("cargo install {PERIPHERY_CRATE}"));
if install_output.success() {
println!("\ninstallation finished, starting monitor periphery daemon\n");
} else {
panic!(
"\n❌ there was some {} during periphery installation ❌\n",
"error".red()
)
}
}

253
cli/src/main.rs Normal file
View File

@@ -0,0 +1,253 @@
#![allow(unused)]
use clap::{arg, Arg, Command};
mod helpers;
mod types;
use helpers::*;
fn cli() -> Command {
Command::new("monitor")
.about("\na cli to set up monitor components, like the periphery client")
.version(env!("CARGO_PKG_VERSION"))
.subcommand_required(true)
.arg_required_else_help(true)
.allow_external_subcommands(true)
.subcommand(
Command::new("core")
.about("tools to set up monitor core")
.subcommand_required(true)
.arg_required_else_help(true)
.allow_external_subcommands(true)
.subcommand(
Command::new("gen-config")
.about("generate a core config file")
.arg(
arg!(--host <HOST> "the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech'")
.required(true)
)
.arg(
arg!(--path <PATH> "sets path of generated config file. default is '~/.monitor/core.config.toml'")
.required(false)
)
.arg(
arg!(--port <PORT> "sets port core will run on. default is 9000. if running in docker, keep this port as is, set the external port when running core start command")
.required(false)
)
.arg(
arg!(--"mongo-uri" <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
.required(false)
)
.arg(
arg!(--"mongo-db-name" <NAME> "sets the db name to use. default is 'monitor'")
.required(false)
)
.arg(
arg!(--"jwt-valid-for" <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
.required(false)
)
.arg(
arg!(--"slack-url" <URL> "sets the slack url to use for slack notifications")
.required(false)
),
)
.subcommand(
Command::new("start-mongo")
.about("start up a local mongo container for monitor core")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--name <NAME> "specify the name of the mongo container. default is monitor-mongo")
.required(false)
)
.arg(
arg!(--username <USERNAME> "specify the admin username for mongo. default is mongo with no auth")
.required(false)
)
.arg(
arg!(--password <PASSWORD> "specify the admin password for mongo. default is mongo with no auth")
.required(false)
)
.arg(
arg!(--port <PORT> "sets port mongo will run on. default is 27017")
.required(false)
)
.arg(
arg!(--mount <PATH> "sets the path the mongo data is mounted into. default is ~/.monitor/db")
.required(false)
)
.arg(
arg!(--network <NETWORK> "sets docker network of mongo container. default is bridge")
.required(false)
)
.arg(
arg!(--restart <RESTART> "sets docker restart mode of mongo container. default is unless-stopped")
)
)
.subcommand(
Command::new("start")
.about("start up monitor core in container")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--name <NAME> "specify the name of the monitor core container. default is monitor-core")
)
.arg(
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
.required(false)
)
.arg(
arg!(--port <PORT> "sets port monitor core will run on. default is 9000")
.required(false)
)
.arg(
arg!(--network <NETWORK> "sets docker network of monitor core container. default is bridge")
.required(false)
)
.arg(
arg!(--restart <RESTART> "sets docker restart mode of monitor core container. default is unless-stopped")
)
.arg(
arg!(--"add-internal-host" "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
)
),
)
.subcommand(
Command::new("periphery")
.about("tools to set up monitor periphery")
.subcommand_required(true)
.arg_required_else_help(true)
.allow_external_subcommands(true)
.subcommand(
Command::new("gen-config")
.about("generate a periphery config file")
.arg(
arg!(--path <PATH> "sets path of generated config file. default is '~/.monitor/periphery.config.toml'")
.required(false)
)
.arg(
arg!(--port <PORT> "sets port periphery will run on. default is 8000. if running in docker, keep this port as is, set the external port when running periphery start command")
.required(false)
)
.arg(
arg!(--"stats-polling-rate" <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
.required(false)
)
.arg(
arg!(--"allowed-ips" <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
.required(false)
)
.arg(
arg!(--"repo-dir" <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
)
)
.subcommand(
Command::new("start")
.about("tools to start periphery as daemon or container")
.subcommand(
Command::new("systemd")
.about("manage periphery with systemd running under current user")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
)
.subcommand(
Command::new("daemon")
.about("start up monitor periphery daemon")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
.arg(
arg!(--stdout <PATH> "specify the file path for periphery to log stdout to. default is ~/.monitor/periphery.log.out")
.required(false)
)
.arg(
arg!(--stderr <PATH> "specify the file path for periphery to log stderr to. default is ~/.monitor/periphery.log.err")
.required(false)
)
)
// .subcommand(
// Command::new("container")
// .about("start up monitor periphery in docker container")
// .arg(
// arg!(--yes "used in scripts to skip 'enter to continue' step")
// )
// .arg(
// arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
// )
// .arg(
// arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
// .required(false)
// )
// .arg(arg!(--"repo-dir" <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
// .arg(
// arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
// .required(false)
// )
// .arg(
// arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
// .required(false)
// )
// .arg(
// arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
// )
// )
),
)
}
fn main() {
let matches = cli().get_matches();
match matches.subcommand() {
Some(("core", sub_matches)) => {
let core_command = sub_matches.subcommand().expect("\n❌ invalid call, should be 'monitor core <gen-config, start-mongo, start> <flags>' ❌\n");
match core_command {
("gen-config", sub_matches) => gen_core_config(sub_matches),
("start-mongo", sub_matches) => start_mongo(sub_matches),
("start", sub_matches) => start_core(sub_matches),
_ => {
println!("\n❌ invalid call, should be 'monitor core <gen-config, start-mongo, start> <flags>' ❌\n")
}
}
}
Some(("periphery", sub_matches)) => {
let periphery_command = sub_matches.subcommand().expect(
"\n❌ invalid call, should be 'monitor periphery <gen-config, start> <flags>' ❌\n",
);
match periphery_command {
("gen-config", sub_matches) => gen_periphery_config(sub_matches),
("start", sub_matches) => {
let periphery_start_command = sub_matches.subcommand().expect("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n");
match periphery_start_command {
("systemd", sub_matches) => start_periphery_systemd(sub_matches),
("daemon", sub_matches) => start_periphery_daemon(sub_matches),
// ("container", sub_matches) => start_periphery_container(sub_matches),
_ => println!("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n")
}
}
_ => {
println!("\n❌ invalid call, should be 'monitor periphery <gen-config, start>...' ❌\n")
}
}
}
_ => println!("\n❌ invalid call, should be 'monitor <core, periphery> ...' ❌\n"),
}
}

34
cli/src/types.d.ts vendored
View File

@@ -1,34 +0,0 @@
export type Config = {
core?: CoreOrPeripheryConfig;
periphery?: CoreOrPeripheryConfig;
mongo?: {
url: string;
startConfig?: StartConfig;
};
// registry?: {
// url: string;
// startConfig?: StartConfig;
// };
};
export type CoreOrPeripheryConfig = {
name: string;
secretVolume: string; // to mount secrets.json into the container
sysroot: string;
port: number;
restart: string;
host?: string;
};
export type StartConfig = {
// if this is attached, the cli will start container with this config and add
name: string;
port: number;
volume: string | false;
restart: string;
};
export type SetConfig = (
field: keyof Config,
val: Config[keyof Config]
) => void;

207
cli/src/types.rs Normal file
View File

@@ -0,0 +1,207 @@
use std::{collections::HashMap, net::IpAddr};
use async_timing_util::Timelength;
use serde_derive::{Deserialize, Serialize};
use strum_macros::{Display, EnumString};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CoreConfig {
// the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech'
pub host: String,
// port the core web server runs on
#[serde(default = "default_core_port")]
pub port: u16,
// daily utc offset in hours to run daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
#[serde(default)]
pub daily_offset_hours: u8,
// number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
#[serde(default)]
pub keep_stats_for_days: u64, // 0 means never prune
pub jwt_secret: String,
#[serde(default = "default_jwt_valid_for")]
pub jwt_valid_for: Timelength,
// interval at which to collect server stats and alert for out of bounds
pub monitoring_interval: Timelength,
// used to verify validity from github webhooks
pub github_webhook_secret: String,
// used to form the frontend listener url, if None will use 'host'.
pub github_webhook_base_url: Option<String>,
// sent in auth header with req to periphery
pub passkey: String,
// integration with slack app
pub slack_url: Option<String>,
// enable login with local auth
pub local_auth: bool,
// allowed docker orgs used with monitor. first in this list will be default for build
#[serde(default)]
pub docker_organizations: Vec<String>,
pub mongo: MongoConfig,
#[serde(default)]
pub github_oauth: OauthCredentials,
#[serde(default)]
pub google_oauth: OauthCredentials,
#[serde(default)]
pub aws: AwsBuilderConfig,
}
fn default_core_port() -> u16 {
9000
}
fn default_jwt_valid_for() -> Timelength {
Timelength::OneWeek
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct OauthCredentials {
#[serde(default)]
pub enabled: bool,
#[serde(default)]
pub id: String,
#[serde(default)]
pub secret: String,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MongoConfig {
pub uri: String,
#[serde(default = "default_core_mongo_app_name")]
pub app_name: String,
#[serde(default = "default_core_mongo_db_name")]
pub db_name: String,
}
fn default_core_mongo_app_name() -> String {
"monitor_core".to_string()
}
fn default_core_mongo_db_name() -> String {
"monitor".to_string()
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct AwsBuilderConfig {
#[serde(skip_serializing)]
pub access_key_id: String,
#[serde(skip_serializing)]
pub secret_access_key: String,
pub default_ami_id: String,
pub default_subnet_id: String,
pub default_key_pair_name: String,
#[serde(default)]
pub available_ami_accounts: AvailableAmiAccounts,
#[serde(default = "default_aws_region")]
pub default_region: String,
#[serde(default = "default_volume_gb")]
pub default_volume_gb: i32,
#[serde(default = "default_instance_type")]
pub default_instance_type: String,
#[serde(default)]
pub default_security_group_ids: Vec<String>,
#[serde(default)]
pub default_assign_public_ip: bool,
}
fn default_aws_region() -> String {
String::from("us-east-1")
}
fn default_volume_gb() -> i32 {
8
}
fn default_instance_type() -> String {
String::from("m5.2xlarge")
}
pub type AvailableAmiAccounts = HashMap<String, AmiAccounts>; // (ami_id, AmiAccounts)
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct AmiAccounts {
pub name: String,
#[serde(default)]
pub github: Vec<String>,
#[serde(default)]
pub docker: Vec<String>,
}
pub type GithubUsername = String;
pub type GithubToken = String;
pub type GithubAccounts = HashMap<GithubUsername, GithubToken>;
pub type DockerUsername = String;
pub type DockerToken = String;
pub type DockerAccounts = HashMap<DockerUsername, DockerToken>;
pub type SecretsMap = HashMap<String, String>;
#[derive(Serialize, Deserialize, Debug)]
pub struct PeripheryConfig {
#[serde(default = "default_periphery_port")]
pub port: u16,
#[serde(default = "default_repo_dir")]
pub repo_dir: String,
#[serde(default = "default_stats_refresh_interval")]
pub stats_polling_rate: Timelength,
#[serde(default)]
pub allowed_ips: Vec<IpAddr>,
#[serde(default)]
pub passkeys: Vec<String>,
#[serde(default)]
pub secrets: SecretsMap,
#[serde(default)]
pub github_accounts: GithubAccounts,
#[serde(default)]
pub docker_accounts: DockerAccounts,
}
fn default_periphery_port() -> u16 {
8000
}
fn default_repo_dir() -> String {
"/repos".to_string()
}
fn default_stats_refresh_interval() -> Timelength {
Timelength::FiveSeconds
}
#[derive(Serialize, Deserialize, Debug, Display, EnumString, PartialEq, Hash, Eq, Clone, Copy)]
pub enum RestartMode {
#[serde(rename = "no")]
#[strum(serialize = "no")]
NoRestart,
#[serde(rename = "on-failure")]
#[strum(serialize = "on-failure")]
OnFailure,
#[serde(rename = "always")]
#[strum(serialize = "always")]
Always,
#[serde(rename = "unless-stopped")]
#[strum(serialize = "unless-stopped")]
UnlessStopped,
}

View File

@@ -1,47 +0,0 @@
const getFlags = async () => {
const meow = await import("meow");
const cli = meow.default(
`
Usage
$ npx @mbecker20/monitor-cli
Options
--core, -c setup monitor core
--periphery, -p setup monitor periphery
--restart, -r restart monitor
--mongo-url the url of mongo used with monitor (used with restart)
--pull-latest whether to pull latest monitor core image (used with restart)
--restart-default restart monitor with defaults
Examples
$ npx @mbecker20/monitor-cli --core
`,
{
importMeta: import.meta,
flags: {
core: {
type: "boolean",
alias: "-c",
},
periphery: {
type: "boolean",
alias: "-p",
},
restart: {
type: "boolean",
alias: "-r",
},
mongoUrl: {
type: "string",
},
pullLatest: {
type: "boolean",
},
restartDefault: {
type: "boolean",
},
},
}
);
return cli.flags
};
export default getFlags;

View File

@@ -1,118 +0,0 @@
import { CommandLogError } from "@monitor/types";
import {
CORE_IMAGE,
DEFAULT_PERIPHERY_PORT,
DEFAULT_PORT,
DOCKER_NETWORK,
PERIPHERY_IMAGE,
} from "../../config";
import { Config, StartConfig } from "../../types";
import { addInitialDocs } from "../mongoose/addInitialDocs";
import { deleteContainer } from "./docker";
import { execute } from "./execute";
import { noTrailingSlash, toDashedName, trailingSlash } from "./general";
export type Stage = "mongo" | "registry" | "core" | "periphery" | "docs";
export type Update = {
stage: Stage;
result?: CommandLogError;
description: string;
};
export default async function deploy(
config: Config,
onComplete: (update: Update) => void
) {
const { core, periphery, mongo } = config;
if (core) {
if (mongo) {
await createNetwork();
if (mongo.startConfig) {
const result = await deployMongo(mongo.startConfig);
onComplete({
stage: "mongo",
result,
description: "mongo started",
});
}
// if (registry.startConfig) {
// const result = await deployRegistry(registry.startConfig);
// onComplete({
// stage: "registry",
// result,
// description: "registry started",
// });
// }
const result = await deployCore(config);
onComplete({
stage: "core",
result,
description: "monitor core started",
});
await addInitialDocs(config);
onComplete({
stage: "docs",
description: "configurations added to db",
});
}
} else if (periphery) {
await deleteContainer(toDashedName(periphery.name));
const result = await deployPeriphery(config);
onComplete({
stage: "periphery",
result,
description: "monitor periphery started",
});
}
}
async function deployCore({ core, mongo }: Config) {
await execute("docker pull mbecker2020/monitor-core:latest");
const { name, secretVolume, port, restart, sysroot, host } = core!;
const nameConfig = `--name ${toDashedName(name)}`;
const volumes = `-v ${secretVolume}:/secrets -v /var/run/docker.sock:/var/run/docker.sock -v ${sysroot}:/monitor-root`;
const network = `-p ${port}:${DEFAULT_PORT} --network ${DOCKER_NETWORK}`;
const env = `-e MONGO_URL=${mongo?.url} -e SYSROOT=${trailingSlash(
core?.sysroot!
)} -e HOST=${noTrailingSlash(host!)}`;
const restartArg = `--restart ${restart}`;
const command = `docker run -d ${nameConfig} ${volumes} ${network} ${env} ${restartArg} ${CORE_IMAGE}`;
return await execute(command);
}
async function deployPeriphery({ periphery }: Config) {
await execute("docker pull mbecker2020/monitor-periphery:latest");
const { name, port, secretVolume, restart, sysroot } = periphery!;
const nameConfig = `--name ${toDashedName(name)}`;
const volume = `-v ${secretVolume}:/secrets -v /var/run/docker.sock:/var/run/docker.sock -v ${sysroot}:/monitor-root`;
const network = `-p ${port}:${DEFAULT_PERIPHERY_PORT}`;
const env = `-e SYSROOT=${trailingSlash(periphery?.sysroot!)}`;
const restartArg = `--restart ${restart}`;
const hostCommunication = "--add-host=host.docker.internal:host-gateway"
const command = `docker run -d ${nameConfig} ${volume} ${network} ${env} ${restartArg} ${hostCommunication} ${PERIPHERY_IMAGE}`;
return await execute(command);
}
async function deployMongo({ name, port, volume, restart }: StartConfig) {
const command = `docker run -d --name ${name} -p ${port}:27017${
volume ? ` -v ${volume}:/data/db` : ""
} --network ${DOCKER_NETWORK} --restart ${restart} mongo:latest`;
return await execute(command);
}
// async function deployRegistry({ name, port, volume, restart }: StartConfig) {
// const command = `docker run -d --name ${name} -p ${port}:5000${
// volume ? ` -v ${volume}:/var/lib/registry` : ""
// } --network ${DOCKER_NETWORK} --restart ${restart} registry:2`;
// return await execute(command);
// }
async function createNetwork() {
const command = `docker network create ${DOCKER_NETWORK}`;
return await execute(command);
}

View File

@@ -1,184 +0,0 @@
import { CommandLogError, Conversion, DockerRunArgs, EnvironmentVar } from "@monitor/types";
import { execute } from "./execute";
export type InstallLog = {
stage: string;
log: CommandLogError;
};
export async function installDockerUbuntu(
onCommandEnd: (log: InstallLog) => void,
systemCtlEnable?: boolean
) {
const total = 6 + (systemCtlEnable ? 1 : 0);
const update = await execute("sudo apt-get update");
console.log(update);
onCommandEnd({
stage: `${
update.isError ? "error updating" : "updated"
} system (1 of ${total})`,
log: update,
});
if (update.isError) return true;
const installDeps = await execute(`sudo apt-get install \
ca-certificates \
curl \
gnupg \
lsb-release`);
console.log(installDeps);
onCommandEnd({
stage: `${
installDeps.isError ? "error installing" : "installed"
} dependencies (2 of ${total})`,
log: installDeps,
});
if (installDeps.isError) return true;
const addKey = await execute(
"curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg"
);
console.log(addKey);
onCommandEnd({
stage: `${
addKey.isError ? "error adding" : "added"
} docker key (3 of ${total})`,
log: addKey,
});
if (addKey.isError) return true;
const setStableRepository = await execute(`echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null`);
console.log(setStableRepository);
onCommandEnd({
stage: `${
setStableRepository.isError ? "error setting" : "set"
} docker stable repository (4 of ${total})`,
log: setStableRepository,
});
if (setStableRepository.isError) return;
const installDocker = await execute(
"sudo apt-get udpate && sudo apt-get install docker-ce docker-ce-cli containerd.io -y"
);
console.log(installDocker);
onCommandEnd({
stage: `${
installDocker.isError ? "error installing" : "installed"
} docker (5 of ${total})`,
log: installDocker,
});
if (installDocker.isError) return true;
const addUser = await execute(
"sudo groupadd docker && sudo usermod -aG docker $USER && newgrp docker"
);
console.log(addUser)
onCommandEnd({
stage: `${
addUser.isError ? "error adding" : "added"
} user to docker user group (6 of ${total})`,
log: addUser,
});
if (addUser.isError) return true;
if (systemCtlEnable) {
const startOnBoot = await execute(
"sudo systemctl enable docker.service && sudo systemctl enable containerd.service"
);
console.log(startOnBoot);
onCommandEnd({
stage: `${
startOnBoot.isError ? "error configuring" : "configured"
} to start on boot (7 of ${total})`,
log: startOnBoot,
});
if (startOnBoot.isError) return true;
}
}
export async function isDockerInstalled() {
const res = await execute("docker ps");
return !res.isError;
}
export async function deleteContainer(containerName: string) {
return await execute(
`docker stop ${containerName} && docker container rm ${containerName}`
);
}
/* Docker Run for Deployments */
export async function dockerRun(
{
image,
ports,
environment,
network,
volumes,
restart,
postImage,
containerName,
containerUser,
}: DockerRunArgs
) {
const command =
`docker run -d` +
name(containerName) +
containerUserString(containerUser) +
portsString(ports) +
volsString(volumes) +
envString(environment) +
restartString(restart) +
networkString(network) +
` ${image}${postImage ? " " + postImage : ""}`;
return await execute(command);
}
function name(containerName?: string) {
return containerName ? ` --name ${containerName}` : "";
}
function portsString(ports?: Conversion[]) {
return ports && ports.length > 0
? ports
.map(({ local, container }) => ` -p ${local}:${container}`)
.reduce((prev, curr) => prev + curr)
: "";
}
function volsString(volumes?: Conversion[]) {
return volumes && volumes.length > 0
? volumes
.map(({ local, container }) => {
return ` -v ${local}:${container}`;
})
.reduce((prev, curr) => prev + curr)
: "";
}
function restartString(restart?: string) {
return restart
? ` --restart=${restart}${restart === "on-failure" ? ":10" : ""}`
: "";
}
function envString(environment?: EnvironmentVar[]) {
return environment && environment.length > 0
? environment
.map(({ variable, value }) => ` -e "${variable}=${value}"`)
.reduce((prev, curr) => prev + curr)
: "";
}
function networkString(network?: string) {
return network ? ` --network ${network}` : "";
}
function containerUserString(containerUser?: string) {
return containerUser && containerUser.length > 0
? ` -u ${containerUser}`
: "";
}

View File

@@ -1,25 +0,0 @@
import { exec } from "child_process";
import { promisify } from "util";
import { CommandLogError } from "@monitor/types";
import { prettyStringify } from "./general";
export const pExec = promisify(exec);
export async function execute(
command: string,
commandForLog?: string
): Promise<CommandLogError> {
try {
return {
command,
log: await pExec(command),
isError: false,
};
} catch (err) {
return {
command: commandForLog || command,
log: { stderr: prettyStringify(err) },
isError: true,
};
}
}

View File

@@ -1,23 +0,0 @@
export function toDashedName(name: string) {
return name.toLowerCase().replaceAll(" ", "-");
}
export function bound(num: number, min: number, max: number) {
return Math.min(max, Math.max(min, num));
}
export function prettyStringify(json: any): string {
return JSON.stringify(json, undefined, 2);
}
export function timestamp() {
return Math.floor(Date.now() / 1000);
}
export function trailingSlash(str: string) {
return str[str.length - 1] === "/" ? str : str + "/";
}
export function noTrailingSlash(str: string) {
return str[str.length - 1] === "/" ? str.slice(0, str.length - 1) : str;
}

View File

@@ -1,49 +0,0 @@
import { getCoreDeployment } from "../mongoose/deployment";
import { deleteContainer, dockerRun } from "./docker";
import { execute } from "./execute";
import { prettyStringify } from "./general";
export type RestartError = {
message: string;
error: string;
}
export async function restart(
args: { mongoUrl: string, pullLatest: boolean },
onError: (err: RestartError) => void
) {
try {
if (args.pullLatest) {
try {
await execute("docker pull mbecker2020/monitor-core");
} catch (error) {
onError({
message: "failed to pull latest image",
error: prettyStringify(error),
});
}
}
const deployment = await getCoreDeployment(args);
if (deployment) {
try {
await deleteContainer(deployment.containerName!);
return await dockerRun(deployment);
} catch (error) {
onError({
message: "failed to restart container",
error: prettyStringify(error),
});
}
} else {
onError({
message: "could not find deployment at name",
error: "",
});
}
} catch (error) {
onError({
message: "failed to connect to mongo at url",
error: prettyStringify(error),
});
}
}

View File

@@ -1,55 +0,0 @@
import { useInput, Key } from "ink";
import { useCallback, useEffect, useState } from "react";
export function useBlinker(interval = 750) {
const [on, setOn] = useState(false);
useEffect(() => {
const int = setInterval(() => {
setOn((on) => !on);
}, interval);
return () => clearInterval(int);
}, []);
return on;
}
export function useKey(key: keyof Key, callback: () => void) {
useInput((_, k) => {
if (k[key]) callback();
});
}
export function useEnter(onEnter: () => void) {
useKey("return", onEnter);
}
export function useEsc(onEsc: () => void) {
useKey("escape", onEsc);
}
export function useStore<T>(
init: T
): [
T,
(field: keyof T, val: T[keyof T]) => void,
(...updates: Array<[field: keyof T, val: T[keyof T]]>) => void
] {
const [store, setStore] = useState(init);
const set = useCallback((field: keyof T, val: T[keyof T]) => {
setStore((store) => ({ ...store, [field]: val }));
}, []);
const setMany = useCallback(
(...updates: Array<[field: keyof T, val: T[keyof T]]>) => {
setStore((store) =>
Object.assign(
{},
store,
...updates.map(([field, val]) => ({ [field]: val }))
)
);
},
[]
);
return [store, set, setMany];
}

View File

@@ -1,15 +0,0 @@
import getFlags from "./flags";
import { isDockerInstalled } from "./helpers/docker";
// used to load async prerequisites
async function init() {
const flags = await getFlags();
const dockerInstalled = await isDockerInstalled();
return {
flags,
dockerInstalled
}
}
export default init;

View File

@@ -1,109 +0,0 @@
import { Deployment, Update } from "@monitor/types";
import mongoose from "mongoose";
import { DEFAULT_PORT, DOCKER_NETWORK } from "../../config";
import { Config } from "../../types";
import { timestamp, toDashedName } from "../helpers/general";
import deploymentModel from "./deployment";
import serverModel from "./server";
import updateModel from "./update";
// import userModel from "./user";
export async function addInitialDocs({ core, mongo }: Config) {
await mongoose.connect(
mongo?.startConfig
? mongo!.url.replaceAll(toDashedName(mongo!.startConfig!.name), "127.0.0.1")
: mongo!.url
);
const servers = serverModel();
const deployments = deploymentModel();
const updates = updateModel();
// const users = userModel();
const coreServer = {
name: "core server",
address: "monitor core",
enabled: true,
isCore: true,
};
const coreServerID = (await servers.create(coreServer)).toObject()._id;
const coreDeployment: Deployment = {
name: core!.name,
isCore: true,
containerName: toDashedName(core!.name),
image: "mbecker2020/monitor-core",
restart: core?.restart,
volumes: [
{ local: core?.secretVolume!, container: "/secrets" },
{ local: "/var/run/docker.sock", container: "/var/run/docker.sock" },
{ local: core?.sysroot!, container: "/monitor-root" }
],
ports: [
{ local: core?.port.toString()!, container: DEFAULT_PORT.toString() },
],
environment: [
{ variable: "MONGO_URL", value: mongo!.url },
{ variable: "SYSROOT", value: core!.sysroot },
{ variable: "HOST", value: core!.host! }
],
network: DOCKER_NETWORK,
serverID: coreServerID,
owners: ["admin"],
};
await deployments.create(coreDeployment);
if (mongo?.startConfig) {
const mongoDeployment: Deployment = {
name: mongo.startConfig.name,
containerName: toDashedName(mongo.startConfig.name),
ports: [{ local: mongo.startConfig.port.toString(), container: "27017" }],
volumes: mongo.startConfig.volume
? [{ local: mongo.startConfig.volume, container: "/data/db" }]
: undefined,
restart: mongo.startConfig.restart,
image: "mongo",
network: DOCKER_NETWORK,
owners: ["admin"],
serverID: coreServerID,
};
await deployments.create(mongoDeployment);
}
// if (registry?.startConfig) {
// const registryDeployment: Deployment = {
// name: registry.startConfig.name,
// containerName: toDashedName(registry.startConfig.name),
// ports: [
// { local: registry.startConfig.port.toString(), container: "5000" },
// ],
// volumes: registry.startConfig.volume
// ? [
// {
// local: registry.startConfig.volume,
// container: "/var/lib/registry",
// },
// ]
// : undefined,
// restart: registry.startConfig.restart,
// image: "registry:2",
// network: DOCKER_NETWORK,
// serverID: coreServerID,
// owners: ["admin"],
// };
// await deployments.create(registryDeployment);
// }
const startupUpdate: Update = {
operation: "Startup",
command: "Start monitor",
log: {
stdout: "monitor started successfully",
},
timestamp: timestamp(),
note: "",
operator: "admin"
}
await updates.create(startupUpdate);
}

View File

@@ -1,53 +0,0 @@
import { Deployment } from "@monitor/types";
import mongoose from "mongoose";
import { model, Schema } from "mongoose";
export default function deploymentModel() {
const Conversion = new Schema({
local: String,
container: String,
});
const EnvironmentVar = new Schema({
variable: String,
value: String,
});
const schema = new Schema<Deployment>({
name: { type: String, unique: true, index: true },
containerName: { type: String, unique: true, index: true }, // for auto pull of frontend repo as well
isCore: Boolean,
owners: { type: [String], default: [] },
serverID: { type: String, index: true },
buildID: { type: String, index: true }, // if deploying a monitor build
/* to create docker run command */
image: String, // used if deploying an external image (from docker hub)
ports: [Conversion],
volumes: [Conversion],
environment: [EnvironmentVar],
network: String,
restart: String,
postImage: String, // interpolated into run command after the image String
containerUser: String, // after -u in the run command
dockerAccount: String,
/* to manage repo for static frontend, mounted as a volume */
repo: String,
branch: String,
subfolder: String,
githubAccount: String,
containerMount: String, // the file path to mount repo on inside the container
repoMount: String,
});
return model("Deployment", schema)
}
export async function getCoreDeployment({ mongoUrl }: { mongoUrl: string }) {
await mongoose.connect(mongoUrl);
const deployments = deploymentModel();
return (await deployments.findOne({ isCore: true }).lean().exec()) as
| Deployment
| undefined;
}

View File

@@ -1,14 +0,0 @@
import { Server } from "@monitor/types";
import { model, Schema } from "mongoose";
export default function serverModel() {
const schema = new Schema<Server>({
name: { type: String, unique: true },
address: String,
enabled: { type: Boolean, default: true },
isCore: Boolean,
owners: { type: [String], default: [] }
});
return model("Server", schema);
}

View File

@@ -1,24 +0,0 @@
import { Update } from "@monitor/types";
import { model, Schema } from "mongoose";
export default function updateModel() {
const Log = new Schema({
stdout: String,
stderr: String,
});
const schema = new Schema<Update>({
buildID: { type: String, index: true },
deploymentID: { type: String, index: true },
serverID: { type: String, index: true },
operation: { type: String, index: true },
command: String,
log: Log,
timestamp: Number,
note: String,
isError: Boolean,
operator: { type: String, index: true }, // the userID or username
});
return model("Update", schema);
}

View File

@@ -1,14 +0,0 @@
import { User } from "@monitor/types";
import { Schema, model } from "mongoose";
export default function userModel() {
const schema = new Schema<User>({
username: { type: String, index: true, required: true },
permissions: { type: Number, default: 0 },
password: String,
avatar: String,
githubID: { type: Number, index: true },
});
return model("User", schema);
}

View File

@@ -1,48 +0,0 @@
import { atom, useAtom } from "jotai";
import { useCallback } from "react";
export function createUseConfig<T>(init: T) {
const configAtom = atom<T>(init);
return () => {
const [config, setConfig] = useAtom(configAtom);
const set = useCallback((field: keyof T, val: T[keyof T]) => {
setConfig((config) => ({ ...config, [field]: val }));
}, []);
const setMany = useCallback(
(...updates: Array<[field: keyof T, val: T[keyof T]]>) => {
setConfig((config) =>
Object.assign(
{},
config,
...updates.map(([field, val]) => ({ [field]: val }))
)
);
},
[]
);
return {
config: config as T,
set,
setMany
};
}
}
export function createUseSequence() {
const currentAtom = atom(0);
return () => {
const [current, set] = useAtom(currentAtom);
const next = useCallback(() => {
set((current) => current + 1);
}, []);
const prev = useCallback(() => {
set((current) => Math.max(current - 1, 0));
}, []);
return {
current,
next,
prev,
};
};
}

View File

@@ -1,20 +0,0 @@
{
"$schema": "https://json.schemastore.org/tsconfig",
"compilerOptions": {
"lib": [
"ESNext"
],
"module": "ESNext",
"jsx": "react",
"moduleResolution": "node",
"target": "ESNext",
"strict": true,
"esModuleInterop": true,
"allowJs": true,
"noImplicitAny": false,
"noEmit": true
},
"include": [
"src"
]
}

View File

@@ -1,16 +0,0 @@
import { readFileSync, writeFileSync } from "fs";
const pkgjson = JSON.parse(readFileSync("package.json"));
const [major, minor, increment] = pkgjson.version
.split(".")
.map((item) => Number(item));
pkgjson.version = `${major}.${minor}.${increment + 1}`
writeFileSync(
"package.json",
JSON.stringify(pkgjson, undefined, 2)
);
console.log("version updated to", pkgjson.version);

View File

@@ -1,12 +0,0 @@
import { defineConfig } from "vite";
export default defineConfig({
build: {
outDir: "build",
target: "node12",
ssr: true,
rollupOptions: {
input: "./src/cli.tsx",
},
},
});

View File

@@ -0,0 +1,68 @@
# this should be the url used to access monitor in browser, potentially behind DNS, eg https://monitor.mogh.tech or http://12.34.56.78:9000
host = "https://monitor.mogh.tech"
# the port the core system will run on. if running core in docker container, leave as this port as 9000 and use port bind eg. -p 9001:9000
port = 9000
# daily utc offset in hours to send daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
daily_offset_hours = 13
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
keep_stats_for_days = 120
# secret used to generate the jwt. should be some randomly generated hash.
jwt_secret = "your_jwt_secret"
# can be 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day
jwt_valid_for = "1-wk"
# webhook url given by slack app
slack_url = "your_slack_app_webhook_url"
# token that has to be given to github during webhook config as the secret
github_webhook_secret = "your_random_webhook_secret"
# optional. an alternate base url that is used to recieve github webhook requests. if not provided, will use 'host' address as base
github_webhook_base_url = "https://monitor-github-webhook.mogh.tech"
# token used to authenticate core requests to periphery
passkey = "your_random_passkey"
# can be 30-sec, 1-min, 2-min, 5-min
monitoring_interval = "1-min"
# allow or deny user login with username / password
local_auth = true
# these will be given in the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
[aws]
access_key_id = "your_aws_key_id"
secret_access_key = "your_aws_secret_key"
default_region = "us-east-1"
default_ami_id = "your_periphery_ami"
default_key_pair_name = "your_default_key_pair_name"
default_instance_type = "m5.2xlarge"
default_volume_gb = 8
default_subnet_id = "your_default_subnet_id"
default_security_group_ids = ["sg_id_1", "sg_id_2"]
default_assign_public_ip = false
[aws.available_ami_accounts]
your_periphery_ami = { name = "default ami", github = ["github_username"], docker = ["docker_username"] }
[github_oauth]
enabled = true
id = "your_github_client_id"
secret = "your_github_client_secret"
[google_oauth]
enabled = true
id = "your_google_client_id"
secret = "your_google_client_secret"
[mongo]
uri = "your_mongo_uri"
app_name = "monitor_core"
db_name = "monitor"

View File

@@ -0,0 +1 @@
CONFIG_PATH=../config/core.config.example.toml # optional, default is /config/config.toml. this is usually bind mounted into the container

View File

@@ -0,0 +1,16 @@
port = 8000 # optional. 8000 is default
repo_dir = "/repos" # optional. /repos is default. no reason to change if running the docker container, just mount your desired repo dir to /repos in the container
stats_polling_rate = "5-sec" # optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded
allowed_ips = ["127.0.0.1"] # optional. default is empty, which will not block any request by ip.
passkeys = ["abcdefghijk"] # optional. default is empty, which will not require any passkey to be passed by core.
[secrets] # optional. can inject these values into your deployments configuration.
secret_variable = "secret_value"
[github_accounts] # optional
github_username1 = "github_token1"
github_username2 = "github_token2"
[docker_accounts] # optional
docker_username1 = "docker_token1"
docker_username2 = "docker_token2"

View File

@@ -0,0 +1 @@
CONFIG_PATH=../config/periphery.config.example.toml # optional, default is /config/config.toml. this is usually bind mounted into the container

1
core/.gitignore vendored
View File

@@ -1 +0,0 @@
frontend

39
core/Cargo.toml Normal file
View File

@@ -0,0 +1,39 @@
[package]
name = "core"
version = "0.2.4"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
helpers = { package = "monitor_helpers", path = "../lib/helpers" }
types = { package = "monitor_types", path = "../lib/types" }
db = { package = "db_client", path = "../lib/db_client" }
periphery = { package = "periphery_client", path = "../lib/periphery_client" }
axum_oauth2 = { path = "../lib/axum_oauth2" }
tokio = { version = "1.25", features = ["full"] }
tokio-tungstenite = { version = "0.18", features=["native-tls"] }
tokio-util = "0.7"
axum = { version = "0.6", features = ["ws", "json"] }
axum-extra = { version = "0.5.0", features = ["spa"] }
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.4.0", features = ["cors"] }
slack = { package = "slack_client_rs", version = "0.0.8" }
mungos = "0.3.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
dotenv = "0.15"
envy = "0.4"
anyhow = "1.0"
bcrypt = "0.14"
jwt = "0.16"
hmac = "0.12"
sha2 = "0.10"
async_timing_util = "0.1.14"
futures-util = "0.3"
diff-struct = "0.5"
typeshare = "1.0.0"
hex = "0.4"
aws-config = "0.54"
aws-sdk-ec2 = "0.24"

View File

@@ -1,18 +0,0 @@
FROM node:alpine
RUN apk update && apk add docker git openrc yarn python3 --no-cache
RUN addgroup root docker && rc-update add docker boot
WORKDIR /monitor
COPY ./build/package.json ./
RUN apk add --no-cache --virtual .gyp python3 make g++ \
&& npm i \
&& apk del .gyp
COPY ./build/main.js ./
COPY ./frontend /frontend
CMD node main.js

View File

@@ -1,43 +0,0 @@
{
"name": "monitor-core",
"version": "1.0.0",
"main": "index.js",
"author": "mbecker20",
"license": "GPL v3.0",
"scripts": {
"start": "tsc && node build/main.js",
"build-copy-frontend": "cd ../frontend && yarn build && cd ../core && rm -r frontend && mkdir frontend && cp -a ../frontend/build/. ./frontend/",
"build": "vite build && node post-build.mjs && yarn build-copy-frontend && docker build -t mbecker2020/monitor-core .",
"push": "yarn build && docker push mbecker2020/monitor-core"
},
"devDependencies": {
"@monitor/types": "1.0.0",
"@swc/core": "^1.2.156",
"@types/bcrypt": "^5.0.0",
"@types/dockerode": "^3.3.3",
"@types/fs-extra": "^9.0.13",
"@types/ws": "^8.5.3",
"typescript": "^4.6.2",
"vite": "^2.8.6"
},
"dependencies": {
"@monitor/util": "1.0.0",
"@monitor/util-node": "1.0.0",
"@slack/web-api": "^6.7.1",
"axios": "^0.26.1",
"bcrypt": "^5.0.1",
"dockerode": "^3.3.1",
"fast-jwt": "^1.5.1",
"fastify": "^3.27.4",
"fastify-cors": "^6.0.3",
"fastify-helmet": "^7.0.1",
"fastify-jwt": "^4.1.3",
"fastify-oauth2": "^4.5.0",
"fastify-plugin": "^3.0.1",
"fastify-static": "^4.5.0",
"fastify-websocket": "^4.2.0",
"fs-extra": "^10.0.1",
"mongoose": "^6.2.6",
"node-os-utils": "^1.3.6"
}
}

View File

@@ -1,11 +0,0 @@
import { readFileSync, writeFileSync } from "fs";
const raw = readFileSync("package.json");
const json = JSON.parse(raw);
delete json.scripts;
delete json.devDependencies;
delete json.dependencies["@monitor/util"];
delete json.dependencies["@monitor/util-node"];
writeFileSync("./build/package.json", JSON.stringify(json, undefined, 2));

588
core/src/actions/build.rs Normal file
View File

@@ -0,0 +1,588 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::{all_logs_success, to_monitor_name};
use mungos::{doc, to_bson};
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Build, Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget, Version,
};
use crate::{
auth::RequestUser,
cloud::aws::{
self, create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
},
state::State,
};
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
impl State {
pub async fn get_build_check_permissions(
&self,
build_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Build> {
let build = self.db.get_build(build_id).await?;
let permissions = build.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(build)
} else {
Err(anyhow!(
"user does not have required permissions on this build"
))
}
}
pub async fn build_busy(&self, id: &str) -> bool {
match self.build_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_build(&self, name: &str, user: &RequestUser) -> anyhow::Result<Build> {
if !user.is_admin && !user.create_build_permissions {
return Err(anyhow!("user does not have permission to create builds"));
}
let start_ts = monitor_timestamp();
let build = Build {
name: to_monitor_name(name),
docker_organization: self
.config
.docker_organizations
.get(0)
.map(|d| d.to_string()),
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
last_built_at: "never".to_string(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let build_id = self
.db
.builds
.create_one(build)
.await
.context("failed at adding build to db")?;
let build = self.db.get_build(&build_id).await?;
let update = Update {
target: UpdateTarget::Build(build_id),
operation: Operation::CreateBuild,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(build)
}
pub async fn create_full_build(
&self,
mut build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
build.id = self.create_build(&build.name, user).await?.id;
let build = self.update_build(build, user).await?;
Ok(build)
}
pub async fn copy_build(
&self,
target_id: &str,
new_name: String,
user: &RequestUser,
) -> anyhow::Result<Build> {
let mut build = self
.get_build_check_permissions(target_id, user, PermissionLevel::Update)
.await?;
build.name = new_name;
build.version = Version::default();
let build = self.create_full_build(build, user).await?;
Ok(build)
}
pub async fn delete_build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Build> {
if self.build_busy(build_id).await {
return Err(anyhow!("build busy"));
}
let build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db.builds.delete_one(build_id).await?;
let update = Update {
target: UpdateTarget::Build(build_id.to_string()),
operation: Operation::DeleteBuild,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete build",
format!("deleted build {}", build.name),
)],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(build)
}
pub async fn update_build(
&self,
new_build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
if self.build_busy(&new_build.id).await {
return Err(anyhow!("build busy"));
}
let id = new_build.id.clone();
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
let res = self.update_build_inner(new_build, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
res
}
async fn update_build_inner(
&self,
mut new_build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
let start_ts = monitor_timestamp();
let current_build = self
.get_build_check_permissions(&new_build.id, user, PermissionLevel::Update)
.await?;
if let Some(new_server_id) = &new_build.server_id {
if current_build.server_id.is_none()
|| new_server_id != current_build.server_id.as_ref().unwrap()
{
self.get_server_check_permissions(new_server_id, user, PermissionLevel::Update)
.await
.context("user does not have permission to attach build to this server")?;
}
}
// none of these should be changed through this method
new_build.name = current_build.name.clone();
new_build.permissions = current_build.permissions.clone();
new_build.last_built_at = current_build.last_built_at.clone();
new_build.created_at = current_build.created_at.clone();
new_build.updated_at = start_ts.clone();
self.db
.builds
.update_one(&new_build.id, mungos::Update::Regular(new_build.clone()))
.await
.context("failed at update one build")?;
let diff = current_build.diff(&new_build);
let update = Update {
operation: Operation::UpdateBuild,
target: UpdateTarget::Build(new_build.id.clone()),
start_ts,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"build update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
end_ts: Some(monitor_timestamp()),
success: true,
..Default::default()
};
// update.id = self.add_update(update.clone()).await?;
// if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
// || option_diff_is_some(&diff.on_clone)
// {
// let server = self.db.get_server(&current_build.server_id).await?;
// match self.periphery.clone_repo(&server, &new_build).await {
// Ok(clone_logs) => {
// update.logs.extend(clone_logs);
// }
// Err(e) => update
// .logs
// .push(Log::error("cloning repo", format!("{e:#?}"))),
// }
// }
// update.end_ts = Some(monitor_timestamp());
// update.success = all_logs_success(&update.logs);
// update.status = UpdateStatus::Complete;
self.add_update(update).await?;
Ok(new_build)
}
pub async fn build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
if self.build_busy(build_id).await {
return Err(anyhow!("build busy"));
}
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = true;
}
let res = self.build_inner(build_id, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = false;
}
res
}
async fn build_inner(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
let mut build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.await?;
build.version.increment();
let mut update = Update {
target: UpdateTarget::Build(build_id.to_string()),
operation: Operation::BuildBuild,
start_ts: monitor_timestamp(),
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
version: build.version.clone().into(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let (server, aws_client) = if let Some(server_id) = &build.server_id {
let server = self.db.get_server(server_id).await;
if let Err(e) = server {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update
.logs
.push(Log::error("get build server", format!("{e:#?}")));
self.update_update(update.clone()).await?;
return Err(e);
}
let server = Ec2Instance {
instance_id: String::new(),
server: server.unwrap(),
};
(server, None)
} else if build.aws_config.is_some() {
let start_ts = monitor_timestamp();
let res = self.create_ec2_instance_for_build(&build).await;
if let Err(e) = res {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update.logs.push(Log {
stage: "start build server".to_string(),
stderr: format!("{e:#?}"),
success: false,
start_ts,
end_ts: monitor_timestamp(),
..Default::default()
});
self.update_update(update).await?;
return Err(e);
}
let (server, aws_client, logs) = res.unwrap();
update.logs.extend(logs);
self.update_update(update.clone()).await?;
(server, aws_client)
} else {
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
update.logs.push(Log::error(
"start build",
"build has neither server_id nor aws_config attached".to_string(),
));
self.update_update(update).await?;
return Err(anyhow!(
"build has neither server_id or aws_config attached"
));
};
let clone_success = match self.periphery.clone_repo(&server.server, &build).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
if !clone_success {
let _ = self
.periphery
.delete_repo(&server.server, &build.name)
.await;
if let Some(aws_client) = aws_client {
self.terminate_ec2_instance(aws_client, &server, &mut update)
.await;
}
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.success = false;
self.update_update(update.clone()).await?;
return Ok(update);
}
self.update_update(update.clone()).await?;
let build_logs = match self
.periphery
.build(&server.server, &build)
.await
.context("failed at call to periphery to build")
{
Ok(logs) => logs,
Err(e) => Some(vec![Log::error("build", format!("{e:#?}"))]),
};
match build_logs {
Some(logs) => {
let success = all_logs_success(&logs);
update.logs.extend(logs);
if success {
let _ = self
.db
.builds
.update_one::<Build>(
build_id,
mungos::Update::Set(doc! {
"version": to_bson(&build.version)
.context("failed at converting version to bson")?,
"last_built_at": monitor_timestamp(),
}),
)
.await;
}
}
None => {
update
.logs
.push(Log::error("build", "builder busy".to_string()));
}
}
let _ = self
.periphery
.delete_repo(&server.server, &build.name)
.await;
if let Some(aws_client) = aws_client {
self.terminate_ec2_instance(aws_client, &server, &mut update)
.await;
}
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
async fn create_ec2_instance_for_build(
&self,
build: &Build,
) -> anyhow::Result<(Ec2Instance, Option<aws::Client>, Vec<Log>)> {
if build.aws_config.is_none() {
return Err(anyhow!("build has no aws_config attached"));
}
let start_instance_ts = monitor_timestamp();
let aws_config = build.aws_config.as_ref().unwrap();
let region = aws_config
.region
.as_ref()
.unwrap_or(&self.config.aws.default_region)
.to_string();
let aws_client = create_ec2_client(
region,
&self.config.aws.access_key_id,
self.config.aws.secret_access_key.clone(),
)
.await;
let ami_id = aws_config
.ami_id
.as_ref()
.unwrap_or(&self.config.aws.default_ami_id);
let instance_type = aws_config
.instance_type
.as_ref()
.unwrap_or(&self.config.aws.default_instance_type);
let subnet_id = aws_config
.subnet_id
.as_ref()
.unwrap_or(&self.config.aws.default_subnet_id);
let security_group_ids = aws_config
.security_group_ids
.as_ref()
.unwrap_or(&self.config.aws.default_security_group_ids)
.to_owned();
let readable_sec_group_ids = security_group_ids.join(", ");
let volume_size_gb = *aws_config
.volume_gb
.as_ref()
.unwrap_or(&self.config.aws.default_volume_gb);
let key_pair_name = aws_config
.key_pair_name
.as_ref()
.unwrap_or(&self.config.aws.default_key_pair_name);
let assign_public_ip = *aws_config
.assign_public_ip
.as_ref()
.unwrap_or(&self.config.aws.default_assign_public_ip);
let instance = create_instance_with_ami(
&aws_client,
&format!("BUILDER-{}-v{}", build.name, build.version.to_string()),
ami_id,
instance_type,
subnet_id,
security_group_ids,
volume_size_gb,
key_pair_name,
assign_public_ip,
)
.await?;
let instance_id = &instance.instance_id;
let start_log = Log {
stage: "start build instance".to_string(),
success: true,
stdout: format!("instance id: {instance_id}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_size_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}"),
start_ts: start_instance_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
let start_connect_ts = monitor_timestamp();
let mut res = Ok(String::new());
for _ in 0..BUILDER_POLL_MAX_TRIES {
let version = self.periphery.get_version(&instance.server).await;
if let Ok(version) = version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: format!("established contact with periphery on builder\nperiphery version: v{version}"),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
return Ok((instance, Some(aws_client), vec![start_log, connect_log]));
}
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)).await;
}
let _ = terminate_ec2_instance(&aws_client, &instance.instance_id).await;
Err(anyhow!(
"unable to reach periphery agent on build server\n{res:#?}"
))
}
async fn terminate_ec2_instance(
&self,
aws_client: aws::Client,
server: &Ec2Instance,
update: &mut Update,
) {
let res = terminate_ec2_instance(&aws_client, &server.instance_id).await;
if let Err(e) = res {
update
.logs
.push(Log::error("terminate instance", format!("{e:#?}")))
} else {
update.logs.push(Log::simple(
"terminate instance",
format!("terminate instance id {}", server.instance_id),
))
}
}
// pub async fn reclone_build(
// &self,
// build_id: &str,
// user: &RequestUser,
// ) -> anyhow::Result<Update> {
// if self.build_busy(build_id).await {
// return Err(anyhow!("build busy"));
// }
// {
// let mut lock = self.build_action_states.lock().await;
// let entry = lock.entry(build_id.to_string()).or_default();
// entry.recloning = true;
// }
// let res = self.reclone_build_inner(build_id, user).await;
// {
// let mut lock = self.build_action_states.lock().await;
// let entry = lock.entry(build_id.to_string()).or_default();
// entry.recloning = false;
// }
// res
// }
// async fn reclone_build_inner(
// &self,
// build_id: &str,
// user: &RequestUser,
// ) -> anyhow::Result<Update> {
// let build = self
// .get_build_check_permissions(build_id, user, PermissionLevel::Update)
// .await?;
// let server = self.db.get_server(&build.server_id).await?;
// let mut update = Update {
// target: UpdateTarget::Build(build_id.to_string()),
// operation: Operation::RecloneBuild,
// start_ts: monitor_timestamp(),
// status: UpdateStatus::InProgress,
// operator: user.id.clone(),
// success: true,
// ..Default::default()
// };
// update.id = self.add_update(update.clone()).await?;
// update.success = match self.periphery.clone_repo(&server, &build).await {
// Ok(clone_logs) => {
// update.logs.extend(clone_logs);
// true
// }
// Err(e) => {
// update
// .logs
// .push(Log::error("clone repo", format!("{e:#?}")));
// false
// }
// };
// update.status = UpdateStatus::Complete;
// update.end_ts = Some(monitor_timestamp());
// self.update_update(update.clone()).await?;
// Ok(update)
// }
}

View File

@@ -0,0 +1,664 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::{all_logs_success, to_monitor_name};
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Deployment, Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget,
};
use crate::{
auth::RequestUser,
helpers::{any_option_diff_is_some, get_image_name, option_diff_is_some},
state::State,
};
impl State {
pub async fn get_deployment_check_permissions(
&self,
deployment_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Deployment> {
let deployment = self.db.get_deployment(deployment_id).await?;
let permissions = deployment.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(deployment)
} else {
Err(anyhow!(
"user does not have required permissions on this deployment"
))
}
}
pub async fn deployment_busy(&self, id: &str) -> bool {
match self.deployment_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_deployment(
&self,
name: &str,
server_id: String,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let deployment = Deployment {
name: to_monitor_name(name),
server_id,
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let deployment_id = self
.db
.deployments
.create_one(deployment)
.await
.context("failed to add deployment to db")?;
let deployment = self.db.get_deployment(&deployment_id).await?;
let update = Update {
target: UpdateTarget::Deployment(deployment_id),
operation: Operation::CreateDeployment,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(deployment)
}
pub async fn create_full_deployment(
&self,
mut deployment: Deployment,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
deployment.id = self
.create_deployment(&deployment.name, deployment.server_id.clone(), user)
.await?
.id;
let deployment = self.update_deployment(deployment, user).await?;
Ok(deployment)
}
pub async fn copy_deployment(
&self,
target_id: &str,
new_name: String,
new_server_id: String,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
let mut deployment = self
.get_deployment_check_permissions(target_id, user, PermissionLevel::Update)
.await?;
deployment.name = new_name;
deployment.server_id = new_server_id;
let deployment = self.create_full_deployment(deployment, user).await?;
Ok(deployment)
}
pub async fn delete_deployment(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let server = self.db.get_server(&deployment.server_id).await?;
let log = match self
.periphery
.container_remove(&server, &deployment.name)
.await
{
Ok(log) => log,
Err(e) => Log::error("destroy container", format!("{e:#?}")),
};
self.db
.deployments
.delete_one(deployment_id)
.await
.context(format!(
"failed at deleting deployment at {deployment_id} from mongo"
))?;
let update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::DeleteDeployment,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![
log,
Log::simple(
"delete deployment",
format!(
"deleted deployment {} on server {}",
deployment.name, server.name
),
),
],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(deployment)
}
pub async fn update_deployment(
&self,
new_deployment: Deployment,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(&new_deployment.id).await {
return Err(anyhow!("deployment busy"));
}
let id = new_deployment.id.clone();
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
let res = self.update_deployment_inner(new_deployment, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
res
}
async fn update_deployment_inner(
&self,
mut new_deployment: Deployment,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
let current_deployment = self
.get_deployment_check_permissions(&new_deployment.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
// none of these should be changed through this method
new_deployment.name = current_deployment.name.clone();
new_deployment.permissions = current_deployment.permissions.clone();
new_deployment.server_id = current_deployment.server_id.clone();
new_deployment.created_at = current_deployment.created_at.clone();
new_deployment.updated_at = start_ts.clone();
self.db
.deployments
.update_one(
&new_deployment.id,
mungos::Update::Regular(new_deployment.clone()),
)
.await
.context("failed at update one deployment")?;
let diff = current_deployment.diff(&new_deployment);
let mut update = Update {
operation: Operation::UpdateDeployment,
target: UpdateTarget::Deployment(new_deployment.id.clone()),
start_ts,
status: UpdateStatus::InProgress,
logs: vec![Log::simple(
"deployment update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
if any_option_diff_is_some(&[&diff.repo, &diff.branch, &diff.github_account])
|| option_diff_is_some(&diff.on_clone)
{
let server = self.db.get_server(&current_deployment.server_id).await?;
match self.periphery.clone_repo(&server, &new_deployment).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
}
Err(e) => update
.logs
.push(Log::error("cloning repo", format!("{e:#?}"))),
}
}
update.end_ts = Some(monitor_timestamp());
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
self.update_update(update).await?;
Ok(new_deployment)
}
pub async fn reclone_deployment(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = true;
}
let res = self.reclone_deployment_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = false;
}
res
}
async fn reclone_deployment_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::RecloneDeployment,
start_ts: monitor_timestamp(),
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
update.success = match self.periphery.clone_repo(&server, &deployment).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn deploy_container(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = true;
}
let res = self.deploy_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = false;
}
res
}
async fn deploy_container_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let mut deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let version = if let Some(build_id) = &deployment.build_id {
let build = self.db.get_build(build_id).await?;
let image = get_image_name(&build);
if deployment.docker_run_args.docker_account.is_none() {
if let Some(docker_account) = &build.docker_account {
deployment.docker_run_args.docker_account = Some(docker_account.to_string())
};
}
let version = if let Some(version) = &deployment.build_version {
version.clone()
} else {
build.version.clone()
};
deployment.docker_run_args.image = format!("{image}:{}", version.to_string());
Some(version)
} else {
None
};
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::DeployContainer,
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
version,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let deploy_log = match self.periphery.deploy(&server, &deployment).await {
Ok(log) => log,
Err(e) => Log::error("deploy container", format!("{e:#?}")),
};
update.success = deploy_log.success;
update.logs.push(deploy_log);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn start_container(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = true;
}
let res = self.start_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = false;
}
res
}
async fn start_container_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::StartContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = self
.periphery
.container_start(&server, &deployment.name)
.await;
update.success = match log {
Ok(log) => {
let success = log.success;
update.logs.push(log);
success
}
Err(e) => {
update
.logs
.push(Log::error("start container", format!("{e:#?}")));
false
}
};
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn stop_container(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = true;
}
let res = self.stop_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = false;
}
res
}
async fn stop_container_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::StopContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = self
.periphery
.container_stop(&server, &deployment.name)
.await;
update.success = match log {
Ok(log) => {
let success = log.success;
update.logs.push(log);
success
}
Err(e) => {
update
.logs
.push(Log::error("stop container", format!("{e:#?}")));
false
}
};
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn remove_container(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = true;
}
let res = self.remove_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = false;
}
res
}
async fn remove_container_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::RemoveContainer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = self
.periphery
.container_remove(&server, &deployment.name)
.await;
update.success = match log {
Ok(log) => {
let success = log.success;
update.logs.push(log);
success
}
Err(e) => {
update
.logs
.push(Log::error("remove container", format!("{e:#?}")));
false
}
};
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn pull_deployment_repo(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = true;
}
let res = self.pull_deployment_repo_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = false;
}
res
}
async fn pull_deployment_repo_inner(
&self,
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
.get_deployment_check_permissions(deployment_id, user, PermissionLevel::Execute)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let mut update = Update {
target: UpdateTarget::Deployment(deployment_id.to_string()),
operation: Operation::PullDeployment,
start_ts,
status: UpdateStatus::InProgress,
operator: user.id.clone(),
success: true,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let logs = self
.periphery
.pull_repo(
&server,
&deployment.name,
&deployment.branch,
&deployment.on_pull,
)
.await?;
update.success = all_logs_success(&logs);
update.logs.extend(logs);
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
self.update_update(update.clone()).await?;
Ok(update)
}
}

141
core/src/actions/group.rs Normal file
View File

@@ -0,0 +1,141 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::to_monitor_name;
use types::{
monitor_timestamp, traits::Permissioned, Group, Log, Operation, PermissionLevel, Update,
UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_group_check_permissions(
&self,
deployment_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Group> {
let group = self.db.get_group(deployment_id).await?;
let permissions = group.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(group)
} else {
Err(anyhow!(
"user does not have required permissions on this deployment"
))
}
}
pub async fn create_group(&self, name: &str, user: &RequestUser) -> anyhow::Result<Group> {
let start_ts = monitor_timestamp();
let group = Group {
name: to_monitor_name(name),
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let group_id = self
.db
.groups
.create_one(group)
.await
.context("failed to add group to db")?;
let group = self.db.get_group(&group_id).await?;
let update = Update {
target: UpdateTarget::Group(group_id),
operation: Operation::CreateGroup,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(group)
}
pub async fn create_full_group(
&self,
mut full_group: Group,
user: &RequestUser,
) -> anyhow::Result<Group> {
let group = self.create_group(&full_group.name, user).await?;
full_group.id = group.id;
let group = self.update_group(full_group, user).await?;
Ok(group)
}
pub async fn delete_group(&self, id: &str, user: &RequestUser) -> anyhow::Result<Group> {
let group = self
.get_group_check_permissions(id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db
.groups
.delete_one(id)
.await
.context(format!("failed at deleting group at {id} from mongo"))?;
let update = Update {
target: UpdateTarget::Group(id.to_string()),
operation: Operation::DeleteGroup,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete group",
format!("deleted group {}", group.name),
)],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(group)
}
pub async fn update_group(
&self,
mut new_group: Group,
user: &RequestUser,
) -> anyhow::Result<Group> {
let current_group = self
.get_group_check_permissions(&new_group.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
// none of these should be changed through this method
new_group.name = current_group.name.clone();
new_group.permissions = current_group.permissions.clone();
new_group.created_at = current_group.created_at.clone();
new_group.updated_at = start_ts.clone();
self.db
.groups
.update_one(&new_group.id, mungos::Update::Regular(new_group.clone()))
.await
.context("failed at update one group")?;
let diff = current_group.diff(&new_group);
let update = Update {
operation: Operation::UpdateGroup,
target: UpdateTarget::Group(new_group.id.clone()),
end_ts: Some(start_ts.clone()),
start_ts,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"group update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(new_group)
}
}

43
core/src/actions/mod.rs Normal file
View File

@@ -0,0 +1,43 @@
use anyhow::Context;
use types::Update;
use crate::state::State;
mod build;
mod deployment;
mod group;
mod procedure;
mod server;
impl State {
pub async fn send_update(&self, update: Update) -> anyhow::Result<()> {
self.update.sender.lock().await.send(update)?;
Ok(())
}
pub async fn add_update(&self, mut update: Update) -> anyhow::Result<String> {
update.id = self
.db
.updates
.create_one(update.clone())
.await
.context("failed to insert update into db")?
.to_string();
let id = update.id.clone();
let _ = self.send_update(update).await;
Ok(id)
}
pub async fn update_update(&self, mut update: Update) -> anyhow::Result<()> {
let mut update_id = String::new();
std::mem::swap(&mut update.id, &mut update_id);
self.db
.updates
.update_one(&update_id, mungos::Update::Regular(update.clone()))
.await
.context("failed to update the update on db. the update build process was deleted")?;
std::mem::swap(&mut update.id, &mut update_id);
let _ = self.send_update(update).await;
Ok(())
}
}

View File

@@ -0,0 +1,295 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::to_monitor_name;
use types::{
monitor_timestamp, traits::Permissioned, Log, Operation, PermissionLevel, Procedure,
ProcedureOperation::*, ProcedureStage, Update, UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_procedure_check_permissions(
&self,
procedure_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Procedure> {
let procedure = self.db.get_procedure(procedure_id).await?;
let permissions = procedure.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(procedure)
} else {
Err(anyhow!(
"user does not have required permissions on this procedure"
))
}
}
pub async fn create_procedure(
&self,
name: &str,
user: &RequestUser,
) -> anyhow::Result<Procedure> {
let start_ts = monitor_timestamp();
let procedure = Procedure {
name: to_monitor_name(name),
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let procedure_id = self
.db
.procedures
.create_one(procedure)
.await
.context("failed to add procedure to db")?;
let procedure = self.db.get_procedure(&procedure_id).await?;
let update = Update {
target: UpdateTarget::Procedure(procedure_id),
operation: Operation::CreateProcedure,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(procedure)
}
pub async fn create_full_procedure(
&self,
mut full_procedure: Procedure,
user: &RequestUser,
) -> anyhow::Result<Procedure> {
let procedure = self.create_procedure(&full_procedure.name, user).await?;
full_procedure.id = procedure.id;
let procedure = self.update_procedure(full_procedure, user).await?;
Ok(procedure)
}
pub async fn delete_procedure(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<Procedure> {
let procedure = self
.get_procedure_check_permissions(id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db
.procedures
.delete_one(id)
.await
.context(format!("failed at deleting procedure at {id} from mongo"))?;
let update = Update {
target: UpdateTarget::Procedure(id.to_string()),
operation: Operation::DeleteProcedure,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete procedure",
format!("deleted procedure {}", procedure.name),
)],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(procedure)
}
pub async fn update_procedure(
&self,
mut new_procedure: Procedure,
user: &RequestUser,
) -> anyhow::Result<Procedure> {
let current_procedure = self
.get_procedure_check_permissions(&new_procedure.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
// none of these should be changed through this method
new_procedure.name = current_procedure.name.clone();
new_procedure.permissions = current_procedure.permissions.clone();
new_procedure.created_at = current_procedure.created_at.clone();
new_procedure.updated_at = start_ts.clone();
for ProcedureStage {
operation,
target_id,
} in &new_procedure.stages
{
match operation {
BuildBuild => {
self.get_build_check_permissions(&target_id, user, PermissionLevel::Execute)
.await?;
}
DeployContainer | StartContainer | StopContainer | RemoveContainer
| PullDeployment | RecloneDeployment => {
self.get_deployment_check_permissions(
target_id,
user,
PermissionLevel::Execute,
)
.await?;
}
PruneImagesServer | PruneContainersServer | PruneNetworksServer => {
self.get_server_check_permissions(target_id, user, PermissionLevel::Execute)
.await?;
}
RunProcedure => {
self.get_procedure_check_permissions(target_id, user, PermissionLevel::Execute)
.await?;
}
None => {}
}
}
self.db
.procedures
.update_one(
&new_procedure.id,
mungos::Update::Regular(new_procedure.clone()),
)
.await
.context("failed at update one procedure")?;
let diff = current_procedure.diff(&new_procedure);
let update = Update {
operation: Operation::UpdateProcedure,
target: UpdateTarget::Procedure(new_procedure.id.clone()),
end_ts: Some(start_ts.clone()),
start_ts,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"procedure update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(new_procedure)
}
pub async fn run_procedure(
&self,
procedure_id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<Update>> {
let procedure = self
.get_procedure_check_permissions(procedure_id, user, PermissionLevel::Execute)
.await?;
let mut updates = Vec::new();
for ProcedureStage {
operation,
target_id,
} in procedure.stages
{
match operation {
None => {}
// deployment
StartContainer => {
let update = self
.start_container(&target_id, user)
.await
.context(format!(
"failed at start container for deployment (id: {target_id})"
))?;
updates.push(update);
}
StopContainer => {
let update = self
.stop_container(&target_id, user)
.await
.context(format!(
"failed at stop container for deployment (id: {target_id})"
))?;
updates.push(update);
}
RemoveContainer => {
let update = self
.remove_container(&target_id, user)
.await
.context(format!(
"failed at remove container for deployment (id: {target_id})"
))?;
updates.push(update);
}
DeployContainer => {
let update = self
.deploy_container(&target_id, user)
.await
.context(format!(
"failed at deploy container for deployment (id: {target_id})"
))?;
updates.push(update);
}
RecloneDeployment => {
let update = self
.reclone_deployment(&target_id, user)
.await
.context(format!("failed at reclone deployment (id: {target_id})"))?;
updates.push(update);
}
PullDeployment => {
// implement this one
// let update = self.pull
}
// build
BuildBuild => {
let update = self
.build(&target_id, user)
.await
.context(format!("failed at build (id: {target_id})"))?;
updates.push(update);
}
// server
PruneImagesServer => {
let update = self.prune_images(&target_id, user).await.context(format!(
"failed at prune images on server (id: {target_id})"
))?;
updates.push(update);
}
PruneContainersServer => {
let update = self
.prune_containers(&target_id, user)
.await
.context(format!(
"failed at prune containers on server (id: {target_id})"
))?;
updates.push(update);
}
PruneNetworksServer => {
let update = self
.prune_networks(&target_id, user)
.await
.context(format!(
"failed at prune networks on servers (id: {target_id})"
))?;
updates.push(update);
}
// procedure
RunProcedure => {
// need to figure out async recursion
// need to guard against infinite procedure loops when they are updated
// let proc_updates = self
// .run_procedure(&target_id, user)
// .await
// .context(format!("failed to run nested procedure (id: {target_id})"))?;
// updates.extend(proc_updates);
}
}
}
Ok(updates)
}
}

394
core/src/actions/server.rs Normal file
View File

@@ -0,0 +1,394 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use futures_util::future::join_all;
use helpers::to_monitor_name;
use mungos::doc;
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Log, Operation, PermissionLevel, Server, Update, UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_server_check_permissions(
&self,
server_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Server> {
let server = self.db.get_server(server_id).await?;
let permissions = server.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(server)
} else {
Err(anyhow!(
"user does not have required permissions on this server"
))
}
}
pub async fn server_busy(&self, id: &str) -> bool {
match self.server_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_server(
&self,
name: &str,
address: String,
user: &RequestUser,
) -> anyhow::Result<Server> {
if !user.is_admin && !user.create_server_permissions {
return Err(anyhow!(
"user does not have permissions to add server (not admin)"
));
}
let start_ts = monitor_timestamp();
let server = Server {
name: to_monitor_name(name),
address,
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let server_id = self
.db
.servers
.create_one(server)
.await
.context("failed to add server to db")?;
let server = self.db.get_server(&server_id).await?;
let update = Update {
target: UpdateTarget::Server(server_id),
operation: Operation::CreateServer,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(server)
}
pub async fn create_full_server(
&self,
mut server: Server,
user: &RequestUser,
) -> anyhow::Result<Server> {
server.id = self
.create_server(&server.name, server.address.clone(), user)
.await?
.id;
let server = self.update_server(server, user).await?;
Ok(server)
}
pub async fn delete_server(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(server_id).await {
return Err(anyhow!("server busy"));
}
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::Server(server_id.to_string()),
operation: Operation::DeleteServer,
start_ts,
operator: user.id.clone(),
success: true,
status: UpdateStatus::InProgress,
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let res = {
let delete_deployments = self
.db
.deployments
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user).await });
let delete_builds = self
.db
.builds
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user).await });
let update_groups = self
.db
.groups
.update_many(doc! {}, doc! { "$pull": { "servers": server_id } });
let (dep_res, build_res, group_res) = tokio::join!(
join_all(delete_deployments),
join_all(delete_builds),
update_groups
);
dep_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
build_res.into_iter().collect::<anyhow::Result<Vec<_>>>()?;
group_res?;
self.db.servers.delete_one(&server_id).await?;
anyhow::Ok(())
};
let log = match res {
Ok(_) => Log::simple("delete server", format!("deleted server {}", server.name)),
Err(e) => Log::error("delete server", format!("failed to delete server\n{e:#?}")),
};
update.end_ts = Some(monitor_timestamp());
update.status = UpdateStatus::Complete;
update.success = log.success;
update.logs.push(log);
self.update_update(update).await?;
Ok(server)
}
pub async fn update_server(
&self,
mut new_server: Server,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(&new_server.id).await {
return Err(anyhow!("server busy"));
}
let current_server = self
.get_server_check_permissions(&new_server.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
new_server.permissions = current_server.permissions.clone();
new_server.created_at = current_server.created_at.clone();
new_server.updated_at = start_ts.clone();
let diff = current_server.diff(&new_server);
self.db
.servers
.update_one(&new_server.id, mungos::Update::Regular(new_server.clone()))
.await
.context("failed at update one server")?;
let update = Update {
operation: Operation::UpdateServer,
target: UpdateTarget::Server(new_server.id.clone()),
start_ts,
end_ts: Some(monitor_timestamp()),
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"server update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(new_server)
}
pub async fn prune_networks(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = true;
}
let res = self.prune_networks_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = false;
}
res
}
async fn prune_networks_inner(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
.await?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::Server(server_id.to_owned()),
operation: Operation::PruneNetworksServer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = match self.periphery.network_prune(&server).await.context(format!(
"failed to prune networks on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error("prune networks", format!("{e:#?}")),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn prune_images(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = true;
}
let res = self.prune_images_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = false;
}
res
}
async fn prune_images_inner(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
.await?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::Server(server_id.to_owned()),
operation: Operation::PruneImagesServer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = match self
.periphery
.image_prune(&server)
.await
.context(format!("failed to prune images on server {}", server.name))
{
Ok(log) => log,
Err(e) => Log::error("prune images", format!("{e:#?}")),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
self.update_update(update.clone()).await?;
Ok(update)
}
pub async fn prune_containers(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = true;
}
let res = self.prune_containers_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = false;
}
res
}
async fn prune_containers_inner(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Execute)
.await?;
let start_ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::Server(server_id.to_owned()),
operation: Operation::PruneContainersServer,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
let log = match self
.periphery
.container_prune(&server)
.await
.context(format!(
"failed to prune containers on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error("prune containers", format!("{e:#?}")),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
self.update_update(update.clone()).await?;
Ok(update)
}
}

320
core/src/api/build.rs Normal file
View File

@@ -0,0 +1,320 @@
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, FindOptions, Serialize};
use types::{
traits::Permissioned, AwsBuilderConfig, Build, BuildActionState, BuildVersionsReponse,
Operation, PermissionLevel, UpdateStatus,
};
use typeshare::typeshare;
const NUM_VERSIONS_PER_PAGE: u64 = 10;
use crate::{
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
use super::spawn_request_action;
#[derive(Serialize, Deserialize)]
struct BuildId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct CreateBuildBody {
name: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct CopyBuildBody {
name: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct BuildVersionsQuery {
#[serde(default)]
page: u32,
major: Option<i32>,
minor: Option<i32>,
patch: Option<i32>,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = state
.get_build_check_permissions(&build_id.id, &user, PermissionLevel::Read)
.await
.map_err(handle_anyhow_error)?;
response!(Json(build))
},
),
)
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let builds = state
.list_builds(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(builds))
},
),
)
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<CreateBuildBody>| async move {
let build = state
.create_build(&build.name, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(build))
},
),
)
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
state
.create_full_build(build, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>,
Json(build): Json<CopyBuildBody>| async move {
let build = spawn_request_action(async move {
state
.copy_build(&id, build.name, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = spawn_request_action(async move {
state
.delete_build(&build_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
state
.update_build(build, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/:id/build",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let update = spawn_request_action(async move {
state
.build(&build_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>| async move {
let action_state = state
.get_build_action_states(id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(action_state))
},
),
)
.route(
"/:id/versions",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }),
Query(query): Query<BuildVersionsQuery>| async move {
let versions = state
.get_build_versions(&id, &user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(versions))
},
),
)
.route(
"/aws_builder_defaults",
get(|Extension(state): StateExtension| async move {
Json(AwsBuilderConfig {
access_key_id: String::new(),
secret_access_key: String::new(),
..state.config.aws.clone()
})
}),
)
.route(
"/docker_organizations",
get(|Extension(state): StateExtension| async move {
Json(state.config.docker_organizations.clone())
}),
)
}
impl State {
async fn list_builds(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Build>> {
let builds: Vec<Build> = self
.db
.builds
.get_some(query, None)
.await
.context("failed at get all builds query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
Ok(builds)
}
async fn get_build_action_states(
&self,
id: String,
user: &RequestUser,
) -> anyhow::Result<BuildActionState> {
self.get_build_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.build_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
Ok(action_state)
}
pub async fn get_build_versions(
&self,
id: &str,
user: &RequestUser,
query: BuildVersionsQuery,
) -> anyhow::Result<Vec<BuildVersionsReponse>> {
self.get_build_check_permissions(&id, user, PermissionLevel::Read)
.await?;
let mut filter = doc! {
"target": {
"type": "Build",
"id": id
},
"operation": Operation::BuildBuild.to_string(),
"status": UpdateStatus::Complete.to_string(),
"success": true
};
if let Some(major) = query.major {
filter.insert("version.major", major);
}
if let Some(minor) = query.minor {
filter.insert("version.minor", minor);
}
if let Some(patch) = query.patch {
filter.insert("version.patch", patch);
}
let versions = self
.db
.updates
.get_some(
filter,
FindOptions::builder()
.sort(doc! { "_id": -1 })
.limit(NUM_VERSIONS_PER_PAGE as i64)
.skip(query.page as u64 * NUM_VERSIONS_PER_PAGE)
.build(),
)
.await
.context("failed to pull versions from mongo")?
.into_iter()
.map(|u| (u.version, u.start_ts))
.filter(|(v, _)| v.is_some())
.map(|(v, ts)| BuildVersionsReponse {
version: v.unwrap(),
ts,
})
.collect();
Ok(versions)
}
}

510
core/src/api/deployment.rs Normal file
View File

@@ -0,0 +1,510 @@
use std::collections::HashMap;
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use futures_util::future::join_all;
use helpers::handle_anyhow_error;
use mungos::{doc, options::FindOneOptions, Deserialize, Document, Serialize};
use types::{
traits::Permissioned, Deployment, DeploymentActionState, DeploymentWithContainerState,
DockerContainerState, DockerContainerStats, Log, Operation, PermissionLevel, Server,
UpdateStatus,
};
use typeshare::typeshare;
use crate::{
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
use super::spawn_request_action;
#[derive(Serialize, Deserialize)]
pub struct DeploymentId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CreateDeploymentBody {
name: String,
server_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CopyDeploymentBody {
name: String,
server_id: String,
}
#[typeshare]
#[derive(Deserialize)]
pub struct GetContainerLogQuery {
tail: Option<u32>,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let res = state
.get_deployment_with_container_state(&user, &deployment_id.id)
.await
.map_err(handle_anyhow_error)?;
response!(Json(res))
},
),
)
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let deployments = state
.list_deployments_with_container_state(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(deployments))
},
),
)
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(deployment): Json<CreateDeploymentBody>| async move {
let deployment = state
.create_deployment(&deployment.name, deployment.server_id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(deployment))
},
),
)
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(full_deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
.create_full_deployment(full_deployment, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(deployment))
},
),
)
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>,
Json(deployment): Json<CopyDeploymentBody>| async move {
let deployment = spawn_request_action(async move {
state
.copy_deployment(&id, deployment.name, deployment.server_id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(deployment))
},
),
)
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let deployment = spawn_request_action(async move {
state
.delete_deployment(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(deployment))
},
),
)
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
.update_deployment(deployment, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(deployment))
},
),
)
.route(
"/:id/reclone",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.reclone_deployment(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/deploy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.deploy_container(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/start_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.start_container(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/stop_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.stop_container(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/remove_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.remove_container(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/pull",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
let update = spawn_request_action(async move {
state
.pull_deployment_repo(&deployment_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>| async move {
let action_state = state
.get_deployment_action_states(id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(action_state))
},
),
)
.route(
"/:id/log",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>,
Query(query): Query<GetContainerLogQuery>| async move {
let log = state
.get_deployment_container_log(&deployment_id.id, &user, query.tail)
.await
.map_err(handle_anyhow_error)?;
response!(Json(log))
},
),
)
.route(
"/:id/stats",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id })| async move {
let stats = state
.get_deployment_container_stats(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/deployed_version",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id })| async move {
let version = state
.get_deployment_deployed_version(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(version)
},
),
)
}
impl State {
async fn get_deployment_with_container_state(
&self,
user: &RequestUser,
id: &str,
) -> anyhow::Result<DeploymentWithContainerState> {
let deployment = self
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let (state, container) = match self.periphery.container_list(&server).await {
Ok(containers) => match containers.into_iter().find(|c| c.name == deployment.name) {
Some(container) => (container.state, Some(container)),
None => (DockerContainerState::NotDeployed, None),
},
Err(_) => (DockerContainerState::Unknown, None),
};
Ok(DeploymentWithContainerState {
deployment,
state,
container,
})
}
async fn list_deployments_with_container_state(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<DeploymentWithContainerState>> {
let deployments: Vec<Deployment> = self
.db
.deployments
.get_some(query, None)
.await
.context("failed at get all deployments query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
let mut servers: Vec<Server> = Vec::new();
for d in &deployments {
if servers.iter().find(|s| s.id == d.server_id).is_none() {
servers.push(self.db.get_server(&d.server_id).await?)
}
}
let containers_futures = servers
.into_iter()
.map(|server| async { (self.periphery.container_list(&server).await, server.id) });
let containers = join_all(containers_futures)
.await
.into_iter()
.map(|(container, server_id)| (server_id, container.ok()))
.collect::<HashMap<_, _>>();
let deployments_with_containers = deployments
.into_iter()
.map(|deployment| {
let (state, container) = match containers.get(&deployment.server_id).unwrap() {
Some(container) => {
match container
.iter()
.find(|c| c.name == deployment.name)
.map(|c| c.to_owned())
{
Some(container) => (container.state, Some(container)),
None => (DockerContainerState::NotDeployed, None),
}
}
None => (DockerContainerState::Unknown, None),
};
DeploymentWithContainerState {
container,
deployment,
state,
}
})
.collect::<Vec<DeploymentWithContainerState>>();
Ok(deployments_with_containers)
}
async fn get_deployment_action_states(
&self,
id: String,
user: &RequestUser,
) -> anyhow::Result<DeploymentActionState> {
self.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.deployment_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
Ok(action_state)
}
async fn get_deployment_container_log(
&self,
id: &str,
user: &RequestUser,
tail: Option<u32>,
) -> anyhow::Result<Log> {
let deployment = self
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let log = self
.periphery
.container_log(&server, &deployment.name, tail)
.await?;
Ok(log)
}
async fn get_deployment_container_stats(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<DockerContainerStats> {
let deployment = self
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let server = self.db.get_server(&deployment.server_id).await?;
let stats = self
.periphery
.container_stats(&server, &deployment.name)
.await?;
Ok(stats)
}
async fn get_deployment_deployed_version(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<String> {
let deployment = self
.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
if deployment.build_id.is_some() {
let latest_deploy_update = self
.db
.updates
.find_one(
doc! {
"target": {
"type": "Deployment",
"id": id
},
"operation": Operation::DeployContainer.to_string(),
"status": UpdateStatus::Complete.to_string(),
"success": true,
},
FindOneOptions::builder().sort(doc! { "_id": -1 }).build(),
)
.await
.context("failed at query to get latest deploy update from mongo")?;
if let Some(update) = latest_deploy_update {
if let Some(version) = update.version {
Ok(version.to_string())
} else {
Ok("latest".to_string())
}
} else {
Ok("latest".to_string())
}
} else {
let split = deployment
.docker_run_args
.image
.split(':')
.collect::<Vec<&str>>();
if let Some(version) = split.get(1) {
Ok(version.to_string())
} else {
Ok("latest".to_string())
}
}
}
}

View File

@@ -0,0 +1,176 @@
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use axum_oauth2::random_duration;
use helpers::handle_anyhow_error;
use hex::ToHex;
use hmac::{Hmac, Mac};
use mungos::Deserialize;
use sha2::Sha256;
use types::GITHUB_WEBHOOK_USER_ID;
use crate::{
auth::RequestUser,
state::{State, StateExtension},
};
use super::spawn_request_action;
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
spawn_request_action(async move {
state.handle_build_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
}).await?
}),
)
.route(
"/deployment/:id",
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
spawn_request_action(async move {
state.handle_deployment_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
}).await?
}),
)
.route(
"/procedure/:id",
post(|state: StateExtension, Path(Id { id }), headers: HeaderMap, body: String| async move {
spawn_request_action(async move {
state.handle_procedure_webhook(&id, headers, body).await.map_err(handle_anyhow_error)
}).await?
}),
)
}
impl State {
async fn handle_build_webhook(
&self,
id: &str,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
self.verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let expected_branch = self
.db
.get_build(id)
.await?
.branch
.ok_or(anyhow!("build has no branch attached"))?;
if request_branch != expected_branch {
return Err(anyhow!("request branch does not match expected"));
}
self.build(
id,
&RequestUser {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;
Ok(())
}
async fn handle_deployment_webhook(
&self,
id: &str,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
self.verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let expected_branch = self
.db
.get_deployment(id)
.await?
.branch
.ok_or(anyhow!("deployment has no branch attached"))?;
if request_branch != expected_branch {
return Err(anyhow!("request branch does not match expected"));
}
self.pull_deployment_repo(
id,
&RequestUser {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;
Ok(())
}
async fn handle_procedure_webhook(
&self,
id: &str,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
self.verify_gh_signature(headers, &body).await?;
let request_branch = extract_branch(&body)?;
let expected_branches = self.db.get_procedure(id).await?.webhook_branches;
if !expected_branches.contains(&request_branch) {
return Err(anyhow!("request branch does not match expected"));
}
self.run_procedure(
id,
&RequestUser {
id: String::from(GITHUB_WEBHOOK_USER_ID),
is_admin: true,
create_server_permissions: false,
create_build_permissions: false,
},
)
.await?;
Ok(())
}
async fn verify_gh_signature(&self, headers: HeaderMap, body: &str) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let mut mac = HmacSha256::new_from_slice(self.config.github_webhook_secret.as_bytes())
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}

142
core/src/api/group.rs Normal file
View File

@@ -0,0 +1,142 @@
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use types::{traits::Permissioned, Group, PermissionLevel};
use typeshare::typeshare;
use crate::{
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
#[derive(Serialize, Deserialize)]
pub struct GroupId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CreateGroupBody {
name: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(group_id): Path<GroupId>| async move {
let group = state
.get_group_check_permissions(&group_id.id, &user, PermissionLevel::Read)
.await
.map_err(handle_anyhow_error)?;
response!(Json(group))
},
),
)
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let groups = state
.list_groups(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(groups))
},
),
)
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(group): Json<CreateGroupBody>| async move {
let group = state
.create_group(&group.name, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(group))
},
),
)
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(group): Json<Group>| async move {
let group = state
.create_full_group(group, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(group))
},
),
)
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(group_id): Path<GroupId>| async move {
let group = state
.delete_group(&group_id.id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(group))
},
),
)
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(group): Json<Group>| async move {
let group = state
.update_group(group, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(group))
},
),
)
}
impl State {
async fn list_groups(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Group>> {
let groups: Vec<Group> = self
.db
.groups
.get_some(query, None)
.await
.context("failed at get all groups query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
// groups.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
Ok(groups)
}
}

205
core/src/api/mod.rs Normal file
View File

@@ -0,0 +1,205 @@
use anyhow::{anyhow, Context};
use axum::{
body::Body,
extract::Path,
http::{Request, StatusCode},
middleware,
routing::{get, post},
Extension, Json, Router,
};
use futures_util::Future;
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use types::{PermissionLevel, UpdateTarget, User};
use typeshare::typeshare;
use crate::{
auth::{auth_request, JwtExtension, RequestUser, RequestUserExtension},
state::{State, StateExtension},
};
pub mod build;
pub mod deployment;
mod github_listener;
pub mod group;
pub mod permissions;
pub mod procedure;
pub mod secret;
pub mod server;
pub mod update;
#[typeshare]
#[derive(Deserialize)]
struct UpdateDescriptionBody {
target: UpdateTarget,
description: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/user",
get(|jwt, req| async { get_user(jwt, req).await.map_err(handle_anyhow_error) }),
)
.nest("/listener", github_listener::router())
.nest(
"/",
Router::new()
.route(
"/username/:id",
get(|state, user_id| async {
get_username(state, user_id)
.await
.map_err(handle_anyhow_error)
}),
)
.route(
"/github_webhook_base_url",
get(|state: StateExtension| async move {
state
.config
.github_webhook_base_url
.as_ref()
.unwrap_or(&state.config.host)
.to_string()
}),
)
.route(
"/update_description",
post(
|state: StateExtension,
user: RequestUserExtension,
body: Json<UpdateDescriptionBody>| async move {
state
.update_description(&body.target, &body.description, &user)
.await
.map_err(handle_anyhow_error)
},
),
)
.route("/users", get(get_users))
.nest("/build", build::router())
.nest("/deployment", deployment::router())
.nest("/server", server::router())
.nest("/procedure", procedure::router())
.nest("/group", group::router())
.nest("/update", update::router())
.nest("/permissions", permissions::router())
.nest("/secret", secret::router())
.layer(middleware::from_fn(auth_request)),
)
}
async fn get_user(Extension(jwt): JwtExtension, req: Request<Body>) -> anyhow::Result<Json<User>> {
let mut user = jwt.authenticate(&req).await?;
user.password = None;
for secret in &mut user.secrets {
secret.hash = String::new();
}
Ok(Json(user))
}
#[derive(Deserialize)]
struct UserId {
id: String,
}
async fn get_username(
state: StateExtension,
Path(UserId { id }): Path<UserId>,
) -> anyhow::Result<String> {
let user = state.db.get_user(&id).await?;
Ok(user.username)
}
async fn get_users(
state: StateExtension,
user: RequestUserExtension,
) -> Result<Json<Vec<User>>, (StatusCode, String)> {
if user.is_admin {
let users = state
.db
.users
.get_some(None, None)
.await
.context("failed to get users from db")
.map_err(handle_anyhow_error)?
.into_iter()
.map(|u| User {
password: None,
secrets: vec![],
..u
})
.collect::<Vec<_>>();
Ok(Json(users))
} else {
Err((StatusCode::UNAUTHORIZED, "user is not admin".to_string()))
}
}
// need to run requested actions in here to prevent them being dropped mid action when user disconnects prematurely
pub async fn spawn_request_action<A>(action: A) -> Result<A::Output, (StatusCode, String)>
where
A: Future + Send + 'static,
A::Output: Send + 'static,
{
let res = tokio::spawn(action)
.await
.context("failure at action thread spawn")
.map_err(handle_anyhow_error)?;
Ok(res)
}
impl State {
pub async fn update_description(
&self,
target: &UpdateTarget,
description: &str,
user: &RequestUser,
) -> anyhow::Result<()> {
match target {
UpdateTarget::Build(id) => {
self.get_build_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.builds
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Deployment(id) => {
self.get_deployment_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.deployments
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Server(id) => {
self.get_server_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.servers
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Group(id) => {
self.get_group_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.groups
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Procedure(id) => {
self.get_procedure_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.procedures
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
_ => return Err(anyhow!("invalid target: {target:?}")),
}
Ok(())
}
}

382
core/src/api/permissions.rs Normal file
View File

@@ -0,0 +1,382 @@
use anyhow::{anyhow, Context};
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Serialize};
use types::{
monitor_timestamp, Build, Deployment, Log, Operation, PermissionLevel, PermissionsTarget,
Procedure, Server, Update, UpdateStatus, UpdateTarget,
};
use typeshare::typeshare;
use crate::{auth::RequestUserExtension, response, state::StateExtension};
#[typeshare]
#[derive(Serialize, Deserialize)]
struct PermissionsUpdateBody {
user_id: String,
permission: PermissionLevel,
target_type: PermissionsTarget,
target_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct ModifyUserEnabledBody {
user_id: String,
enabled: bool,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct ModifyUserCreateServerBody {
user_id: String,
create_server_permissions: bool,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
struct ModifyUserCreateBuildBody {
user_id: String,
create_build_permissions: bool,
}
pub fn router() -> Router {
Router::new()
.route(
"/update",
post(|state, user, update| async {
let update = update_permissions(state, user, update)
.await
.map_err(handle_anyhow_error)?;
response!(Json(update))
}),
)
.route(
"/modify_enabled",
post(|state, user, body| async {
let update = modify_user_enabled(state, user, body)
.await
.map_err(handle_anyhow_error)?;
response!(Json(update))
}),
)
.route(
"/modify_create_server",
post(|state, user, body| async {
let update = modify_user_create_server_permissions(state, user, body)
.await
.map_err(handle_anyhow_error)?;
response!(Json(update))
}),
)
.route(
"/modify_create_build",
post(|state, user, body| async {
let update = modify_user_create_build_permissions(state, user, body)
.await
.map_err(handle_anyhow_error)?;
response!(Json(update))
}),
)
}
async fn update_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(permission_update): Json<PermissionsUpdateBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
return Err(anyhow!(
"user not authorized for this action (is not admin)"
));
}
let target_user = state
.db
.users
.find_one_by_id(&permission_update.user_id)
.await
.context("failed at find target user query")?
.ok_or(anyhow!(
"failed to find a user with id {}",
permission_update.user_id
))?;
if !target_user.enabled {
return Err(anyhow!("target user not enabled"));
}
let mut update = Update {
operation: Operation::ModifyUserPermissions,
start_ts: monitor_timestamp(),
success: true,
operator: user.id.clone(),
status: UpdateStatus::Complete,
..Default::default()
};
let log_text = match permission_update.target_type {
PermissionsTarget::Server => {
let server = state
.db
.servers
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find server query")?
.ok_or(anyhow!(
"failed to find a server with id {}",
permission_update.target_id
))?;
state
.db
.servers
.update_one::<Server>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Server(server.id);
format!(
"user {} given {} permissions on server {}",
target_user.username, permission_update.permission, server.name
)
}
PermissionsTarget::Deployment => {
let deployment = state
.db
.deployments
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find deployment query")?
.ok_or(anyhow!(
"failed to find a deployment with id {}",
permission_update.target_id
))?;
state
.db
.deployments
.update_one::<Deployment>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Deployment(deployment.id);
format!(
"user {} (id: {}) given {} permissions on deployment {}",
target_user.username, target_user.id, permission_update.permission, deployment.name
)
}
PermissionsTarget::Build => {
let build = state
.db
.builds
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find build query")?
.ok_or(anyhow!(
"failed to find a build with id {}",
permission_update.target_id
))?;
state
.db
.builds
.update_one::<Build>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Build(build.id);
format!(
"user {} given {} permissions on build {}",
target_user.username, permission_update.permission, build.name
)
}
PermissionsTarget::Procedure => {
let procedure = state
.db
.procedures
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find build query")?
.ok_or(anyhow!(
"failed to find a build with id {}",
permission_update.target_id
))?;
state
.db
.procedures
.update_one::<Procedure>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Procedure(procedure.id);
format!(
"user {} given {} permissions on procedure {}",
target_user.username, permission_update.permission, procedure.name
)
}
};
update
.logs
.push(Log::simple("modify permissions", log_text));
update.end_ts = Some(monitor_timestamp());
update.id = state.add_update(update.clone()).await?;
Ok(update)
}
async fn modify_user_enabled(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(ModifyUserEnabledBody { user_id, enabled }): Json<ModifyUserEnabledBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
}
let user = state
.db
.users
.find_one_by_id(&user_id)
.await
.context("failed at mongo query to find target user")?
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
state
.db
.users
.update_one::<Document>(&user_id, mungos::Update::Set(doc! { "enabled": enabled }))
.await?;
let update_type = if enabled { "enabled" } else { "disabled" };
let ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::System,
operation: Operation::ModifyUserEnabled,
logs: vec![Log::simple(
"modify user enabled",
format!("{update_type} {} (id: {})", user.username, user.id),
)],
start_ts: ts.clone(),
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
Ok(update)
}
async fn modify_user_create_server_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(ModifyUserCreateServerBody {
user_id,
create_server_permissions,
}): Json<ModifyUserCreateServerBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
}
let user = state
.db
.users
.find_one_by_id(&user_id)
.await
.context("failed at mongo query to find target user")?
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
state
.db
.users
.update_one::<Document>(
&user_id,
mungos::Update::Set(doc! { "create_server_permissions": create_server_permissions }),
)
.await?;
let update_type = if create_server_permissions {
"enabled"
} else {
"disabled"
};
let ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::System,
operation: Operation::ModifyUserCreateServerPermissions,
logs: vec![Log::simple(
"modify user create server permissions",
format!(
"{update_type} create server permissions for {} (id: {})",
user.username, user.id
),
)],
start_ts: ts.clone(),
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
Ok(update)
}
async fn modify_user_create_build_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(ModifyUserCreateBuildBody {
user_id,
create_build_permissions,
}): Json<ModifyUserCreateBuildBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
}
let user = state
.db
.users
.find_one_by_id(&user_id)
.await
.context("failed at mongo query to find target user")?
.ok_or(anyhow!("did not find any user with user_id {user_id}"))?;
state
.db
.users
.update_one::<Document>(
&user_id,
mungos::Update::Set(doc! { "create_build_permissions": create_build_permissions }),
)
.await?;
let update_type = if create_build_permissions {
"enabled"
} else {
"disabled"
};
let ts = monitor_timestamp();
let mut update = Update {
target: UpdateTarget::System,
operation: Operation::ModifyUserCreateBuildPermissions,
logs: vec![Log::simple(
"modify user create build permissions",
format!(
"{update_type} create build permissions for {} (id: {})",
user.username, user.id
),
)],
start_ts: ts.clone(),
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
Ok(update)
}

165
core/src/api/procedure.rs Normal file
View File

@@ -0,0 +1,165 @@
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use types::{traits::Permissioned, PermissionLevel, Procedure};
use typeshare::typeshare;
use crate::{
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
use super::spawn_request_action;
#[derive(Serialize, Deserialize)]
pub struct ProcedureId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CreateProcedureBody {
name: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(procedure_id): Path<ProcedureId>| async move {
let procedure = state
.get_procedure_check_permissions(
&procedure_id.id,
&user,
PermissionLevel::Read,
)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedure))
},
),
)
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let procedures = state
.list_procedures(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedures))
},
),
)
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(procedure): Json<CreateProcedureBody>| async move {
let procedure = state
.create_procedure(&procedure.name, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedure))
},
),
)
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(procedure): Json<Procedure>| async move {
let procedure = state
.create_full_procedure(procedure, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedure))
},
),
)
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(procedure_id): Path<ProcedureId>| async move {
let procedure = state
.delete_procedure(&procedure_id.id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedure))
},
),
)
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Json(procedure): Json<Procedure>| async move {
let procedure = state
.update_procedure(procedure, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(procedure))
},
),
)
.route(
"/:id/run",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(procedure_id): Path<ProcedureId>| async move {
let update = spawn_request_action(async move {
state
.run_procedure(&procedure_id.id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
}
impl State {
async fn list_procedures(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Procedure>> {
let procedures: Vec<Procedure> = self
.db
.procedures
.get_some(query, None)
.await
.context("failed at get all procedures query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
// procedures.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
Ok(procedures)
}
}

112
core/src/api/secret.rs Normal file
View File

@@ -0,0 +1,112 @@
use anyhow::{anyhow, Context};
use axum::{
extract::Path,
routing::{delete, post},
Extension, Json, Router,
};
use helpers::{generate_secret, handle_anyhow_error};
use mungos::{doc, to_bson, Deserialize, Document, Serialize, Update};
use types::{monitor_timestamp, ApiSecret};
use typeshare::typeshare;
use crate::{auth::RequestUserExtension, state::StateExtension};
const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
#[typeshare]
#[derive(Serialize, Deserialize)]
struct CreateSecretBody {
name: String,
expires: Option<String>,
}
#[derive(Serialize, Deserialize)]
struct DeleteSecretPath {
name: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/create",
post(|state, user, secret| async {
create(state, user, secret)
.await
.map_err(handle_anyhow_error)
}),
)
.route(
"/delete/:name",
delete(|state, user, secret_id| async {
delete_one(state, user, secret_id)
.await
.map_err(handle_anyhow_error)
}),
)
}
impl Into<ApiSecret> for CreateSecretBody {
fn into(self) -> ApiSecret {
ApiSecret {
name: self.name,
expires: self.expires,
created_at: monitor_timestamp(),
..Default::default()
}
}
}
async fn create(
Extension(state): StateExtension,
Extension(req_user): RequestUserExtension,
Json(secret): Json<CreateSecretBody>,
) -> anyhow::Result<String> {
let user = state.db.get_user(&req_user.id).await?;
for s in &user.secrets {
if s.name == secret.name {
return Err(anyhow!("secret with name {} already exists", secret.name));
}
}
let mut secret: ApiSecret = secret.into();
let secret_str = generate_secret(SECRET_LENGTH);
secret.hash =
bcrypt::hash(&secret_str, BCRYPT_COST).context("failed at hashing secret string")?;
state
.db
.users
.update_one::<Document>(
&req_user.id,
Update::Custom(doc! {
"$push": {
"secrets": to_bson(&secret).context("failed at converting secret to bson")?
}
}),
)
.await
.context("failed at mongo update query")?;
Ok(secret_str)
}
async fn delete_one(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeleteSecretPath { name }): Path<DeleteSecretPath>,
) -> anyhow::Result<()> {
state
.db
.users
.update_one::<Document>(
&user.id,
Update::Custom(doc! {
"$pull": {
"secrets": {
"name": name
}
}
}),
)
.await
.context("failed at mongo update query")?;
Ok(())
}

647
core/src/api/server.rs Normal file
View File

@@ -0,0 +1,647 @@
use anyhow::{anyhow, Context};
use async_timing_util::get_timelength_in_ms;
use axum::{
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
response::IntoResponse,
routing::{delete, get, patch, post},
Json, Router,
};
use futures_util::{future::join_all, SinkExt, StreamExt};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document};
use tokio::select;
use tokio_tungstenite::tungstenite::Message;
use tokio_util::sync::CancellationToken;
use types::{
traits::Permissioned, BasicContainerInfo, HistoricalStatsQuery, ImageSummary, Network,
PermissionLevel, Server, ServerActionState, ServerStatus, ServerWithStatus, SystemInformation,
SystemStats, SystemStatsQuery, SystemStatsRecord,
};
use typeshare::typeshare;
const MAX_HISTORICAL_STATS_LIMIT: i64 = 1000;
use crate::{
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
use super::spawn_request_action;
#[derive(Deserialize)]
struct ServerId {
id: String,
}
#[derive(Deserialize)]
struct Ts {
ts: i64,
}
#[typeshare]
#[derive(Deserialize)]
pub struct CreateServerBody {
name: String,
address: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(server_id): Path<ServerId>| async move {
let server = state
.get_server(&server_id.id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(server))
},
),
)
.route(
"/list",
get(
|state: StateExtension,
user: RequestUserExtension,
Query(query): Query<Document>| async move {
let servers = state
.list_servers(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(servers))
},
),
)
.route(
"/create",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(server): Json<CreateServerBody>| async move {
let server = state
.create_server(&server.name, server.address, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(server))
},
),
)
.route(
"/create_full",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(server): Json<Server>| async move {
let server = state
.create_full_server(server, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(server))
},
),
)
.route(
"/:id/delete",
delete(
|state: StateExtension,
user: RequestUserExtension,
Path(server): Path<ServerId>| async move {
let server = state
.delete_server(&server.id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(server))
},
),
)
.route(
"/update",
patch(
|state: StateExtension,
user: RequestUserExtension,
Json(server): Json<Server>| async move {
let server = state
.update_server(server, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(server))
},
),
)
.route(
"/:id/version",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = state
.get_server_version(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/system_information",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = state
.get_server_system_info(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/stats",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id }),
query: Query<SystemStatsQuery>| async move {
let stats = state
.get_server_stats(&id, &user, &query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/stats/history",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id }),
query: Query<HistoricalStatsQuery>| async move {
let stats = state
.get_historical_stats(&id, &user, &query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/stats/at_ts",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id }),
Query(Ts { ts })| async move {
let stats = state
.get_stats_at_ts(&id, &user, ts)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/stats/ws",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id }),
Query(query): Query<SystemStatsQuery>,
ws: WebSocketUpgrade| async move {
let connection = state
.subscribe_to_stats_ws(&id, &user, &query, ws)
.await
.map_err(handle_anyhow_error)?;
response!(connection)
},
),
)
.route(
"/:id/networks",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = state
.get_networks(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/networks/prune",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = spawn_request_action(async move {
state
.prune_networks(&id, &user)
.await
.map_err(handle_anyhow_error)
}).await??;
response!(Json(stats))
},
),
)
.route(
"/:id/images",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = state
.get_images(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/images/prune",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = spawn_request_action(async move {
state
.prune_images(&id, &user)
.await
.map_err(handle_anyhow_error)
}).await??;
response!(Json(stats))
},
),
)
.route(
"/:id/containers",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = state
.get_containers(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(stats))
},
),
)
.route(
"/:id/containers/prune",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let stats = spawn_request_action(async move {
state
.prune_containers(&id, &user)
.await
.map_err(handle_anyhow_error)
}).await??;
response!(Json(stats))
},
),
)
.route(
"/:id/github_accounts",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let github_accounts = state
.get_github_accounts(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(github_accounts))
},
),
)
.route(
"/:id/docker_accounts",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let docker_accounts = state
.get_docker_accounts(&id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(docker_accounts))
},
),
)
.route(
"/:id/action_state",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(ServerId { id })| async move {
let action_state = state
.get_server_action_states(id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(action_state))
},
),
)
}
impl State {
async fn get_server(&self, id: &str, user: &RequestUser) -> anyhow::Result<ServerWithStatus> {
let server = self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await?;
let status = if server.enabled {
let res = self.periphery.health_check(&server).await;
match res {
Ok(_) => ServerStatus::Ok,
Err(_) => ServerStatus::NotOk,
}
} else {
ServerStatus::Disabled
};
Ok(ServerWithStatus { server, status })
}
async fn list_servers(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<ServerWithStatus>> {
let futures = self
.db
.servers
.get_some(query, None)
.await
.context("failed at get all servers query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.map(|server| async {
let status = if server.enabled {
let res = self.periphery.health_check(&server).await;
match res {
Ok(_) => ServerStatus::Ok,
Err(_) => ServerStatus::NotOk,
}
} else {
ServerStatus::Disabled
};
ServerWithStatus { server, status }
});
Ok(join_all(futures).await)
}
async fn get_server_version(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<String> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let version = self.periphery.get_version(&server).await.context(format!(
"failed to get system information from server {}",
server.name
))?;
Ok(version)
}
async fn get_server_system_info(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<SystemInformation> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let stats = self
.periphery
.get_system_information(&server)
.await
.context(format!(
"failed to get system information from server {}",
server.name
))?;
Ok(stats)
}
async fn get_server_stats(
&self,
server_id: &str,
user: &RequestUser,
query: &SystemStatsQuery,
) -> anyhow::Result<SystemStats> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let stats = self
.periphery
.get_system_stats(&server, query)
.await
.context(format!("failed to get stats from server {}", server.name))?;
Ok(stats)
}
async fn get_historical_stats(
&self,
server_id: &str,
user: &RequestUser,
query: &HistoricalStatsQuery,
) -> anyhow::Result<Vec<SystemStatsRecord>> {
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
MAX_HISTORICAL_STATS_LIMIT
} else {
query.limit as i64
};
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let ts_mod = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
let mut projection = doc! { "processes": 0, "disk.disks": 0 };
if !query.networks {
projection.insert("networks", 0);
}
if !query.components {
projection.insert("components", 0);
}
self.db
.stats
.get_most_recent(
"ts",
limit,
query.page as u64 * limit as u64,
doc! { "server_id": server_id, "ts": { "$mod": [ts_mod, 0] } },
projection,
)
.await
.context("failed at mongo query to get stats")
}
async fn get_stats_at_ts(
&self,
server_id: &str,
user: &RequestUser,
ts: i64,
) -> anyhow::Result<SystemStatsRecord> {
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
self.db
.stats
.find_one(doc! { "server_id": server_id, "ts": ts }, None)
.await
.context("failed at mongo query to get full stat entry")?
.ok_or(anyhow!("did not find entry for server at time"))
}
async fn subscribe_to_stats_ws(
&self,
server_id: &str,
user: &RequestUser,
query: &SystemStatsQuery,
ws: WebSocketUpgrade,
) -> anyhow::Result<impl IntoResponse> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let mut stats_reciever = self.periphery.subscribe_to_stats_ws(&server, query).await?;
let upgrade = ws.on_upgrade(|socket| async move {
let (mut ws_sender, mut ws_recv) = socket.split();
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
tokio::spawn(async move {
loop {
let stats = select! {
_ = cancel_clone.cancelled() => break,
stats = stats_reciever.next() => stats
};
if let Some(Ok(Message::Text(msg))) = stats {
let _ = ws_sender.send(AxumMessage::Text(msg)).await;
}
}
});
while let Some(msg) = ws_recv.next().await {
match msg {
Ok(msg) => match msg {
AxumMessage::Close(_) => {
cancel.cancel();
return;
}
_ => {}
},
Err(_) => {
cancel.cancel();
return;
}
}
}
});
Ok(upgrade)
}
async fn get_networks(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<Network>> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let stats = self.periphery.network_list(&server).await.context(format!(
"failed to get networks from server {}",
server.name
))?;
Ok(stats)
}
async fn get_images(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<ImageSummary>> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let images = self
.periphery
.image_list(&server)
.await
.context(format!("failed to get images from server {}", server.name))?;
Ok(images)
}
async fn get_containers(
&self,
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<BasicContainerInfo>> {
let server = self
.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let containers = self
.periphery
.container_list(&server)
.await
.context(format!(
"failed to get containers from server {}",
server.name
))?;
Ok(containers)
}
async fn get_github_accounts(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<String>> {
let server = self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await?;
let github_accounts = self.periphery.get_github_accounts(&server).await?;
Ok(github_accounts)
}
async fn get_docker_accounts(
&self,
id: &str,
user: &RequestUser,
) -> anyhow::Result<Vec<String>> {
let server = self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await?;
let docker_accounts = self.periphery.get_docker_accounts(&server).await?;
Ok(docker_accounts)
}
async fn get_server_action_states(
&self,
id: String,
user: &RequestUser,
) -> anyhow::Result<ServerActionState> {
self.get_server_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.server_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
Ok(action_state)
}
}

221
core/src/api/update.rs Normal file
View File

@@ -0,0 +1,221 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use axum::{extract::Query, routing::get, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, to_bson, ObjectId};
use serde_json::Value;
use types::{PermissionLevel, Update, UpdateTarget};
use crate::{
auth::{RequestUser, RequestUserExtension},
helpers::parse_comma_seperated_list,
response,
state::{State, StateExtension},
};
const NUM_UPDATES_PER_PAGE: usize = 10;
pub fn router() -> Router {
Router::new().route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Query(value): Query<Value>| async move {
let offset = value
.get("offset")
.map(|v| v.as_str().unwrap_or("0").parse().unwrap_or(0))
.unwrap_or(0);
let target = serde_json::from_str::<UpdateTarget>(&value.to_string()).ok();
let show_builds = value
.get("show_builds")
.map(|b| {
b.as_str()
.unwrap_or("false")
.parse::<bool>()
.unwrap_or_default()
})
.unwrap_or_default();
let operations = value
.get("operations")
.map(|o| {
let o = o.as_str().unwrap_or_default();
if o.len() == 0 {
return None;
}
parse_comma_seperated_list::<String>(o).ok()
})
.flatten();
let updates = state
.list_updates(target, offset, show_builds, operations, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(updates))
},
),
)
}
impl State {
async fn permission_on_update_target(
&self,
update_target: &UpdateTarget,
user: &RequestUser,
) -> anyhow::Result<()> {
if user.is_admin {
Ok(())
} else {
match update_target {
UpdateTarget::System => {
if user.is_admin {
Ok(())
} else {
Err(anyhow!("user must be admin to see system updates"))
}
}
UpdateTarget::Build(id) => self
.get_build_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Deployment(id) => self
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Server(id) => self
.get_server_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Procedure(id) => self
.get_procedure_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Group(id) => self
.get_group_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
}
}
}
pub async fn list_updates(
&self,
target: Option<UpdateTarget>,
offset: u64,
show_builds: bool,
operations: Option<Vec<String>>,
user: &RequestUser,
) -> anyhow::Result<Vec<Update>> {
let mut filter = match target {
Some(target) => {
if let (UpdateTarget::Deployment(id), true) = (&target, show_builds) {
let deployment = self
.get_deployment_check_permissions(id, user, PermissionLevel::Read)
.await?;
if let Some(build_id) = &deployment.build_id {
let build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Read)
.await;
if let Ok(_) = build {
doc! {
"$or": [
{"target": to_bson(&target).unwrap()},
{"target": { "type": "Build", "id": build_id }, "operation": "build_build"}
],
}
} else {
doc! {
"target": to_bson(&target).unwrap()
}
}
} else {
doc! {
"target": to_bson(&target).unwrap()
}
}
} else {
self.permission_on_update_target(&target, user).await?;
doc! {
"target": to_bson(&target).unwrap()
}
}
}
None => {
if user.is_admin {
doc! {}
} else {
let permissions_field = format!("permissions.{}", user.id);
let target_filter = doc! {
"$or": [
{ &permissions_field: "update" },
{ &permissions_field: "execute" },
{ &permissions_field: "read" },
]
};
let build_ids = self
.db
.builds
.get_some(target_filter.clone(), None)
.await
.context("failed at query to get users builds")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.collect::<Vec<_>>();
let deployment_ids = self
.db
.deployments
.get_some(target_filter.clone(), None)
.await
.context("failed at query to get users deployments")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.collect::<Vec<_>>();
let server_ids = self
.db
.servers
.get_some(target_filter.clone(), None)
.await
.context("failed at query to get users servers")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.collect::<Vec<_>>();
let procedure_ids = self
.db
.procedures
.get_some(target_filter, None)
.await
.context("failed at query to get users procedures")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.collect::<Vec<_>>();
let filter = doc! {
"$or": [
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } }
]
};
filter
}
}
};
if let Some(operations) = operations {
filter.insert("operation", doc! { "$in": operations });
}
let mut updates = self
.db
.updates
.get_most_recent(
"start_ts",
NUM_UPDATES_PER_PAGE as i64,
offset,
filter,
None,
)
.await
.context("mongo get most recent updates query failed")?;
updates.reverse();
Ok(updates)
}
}

95
core/src/auth/github.rs Normal file
View File

@@ -0,0 +1,95 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::github::{GithubOauthClient, GithubOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
use super::JwtExtension;
pub fn router(config: &CoreConfig) -> Router {
let client = GithubOauthClient::new(
config.github_oauth.id.clone(),
config.github_oauth.secret.clone(),
format!("{}/auth/github/callback", config.host),
&[],
"monitor".to_string(),
);
Router::new()
.route(
"/login",
get(|Extension(client): GithubOauthExtension| async move {
Redirect::to(&client.get_login_redirect_url())
}),
)
.route(
"/callback",
get(|client, jwt, state, query| async {
let redirect = callback(client, jwt, state, query)
.await
.map_err(handle_anyhow_error)?;
response!(redirect)
}),
)
.layer(Extension(Arc::new(client)))
}
#[derive(Deserialize)]
struct CallbackQuery {
state: String,
code: String,
}
async fn callback(
Extension(client): GithubOauthExtension,
Extension(jwt_client): JwtExtension,
Extension(state): StateExtension,
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
if !client.check_state(&query.state) {
return Err(anyhow!("state mismatch"));
}
let token = client.get_access_token(&query.code).await?;
let github_user = client.get_github_user(&token.access_token).await?;
let github_id = github_user.id.to_string();
let user = state
.db
.users
.find_one(doc! { "github_id": &github_id }, None)
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let user = User {
username: github_user.login,
avatar: github_user.avatar_url.into(),
github_id: github_id.into(),
created_at: ts.clone(),
updated_at: ts,
..Default::default()
};
let user_id = state
.db
.users
.create_one(user)
.await
.context("failed to create user on mongo")?;
jwt_client
.generate(user_id)
.context("failed to generate jwt")?
}
};
let exchange_token = jwt_client.create_exchange_token(jwt);
Ok(Redirect::to(&format!(
"{}?token={exchange_token}",
state.config.host
)))
}

118
core/src/auth/google.rs Normal file
View File

@@ -0,0 +1,118 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::google::{GoogleOauthClient, GoogleOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
use super::JwtExtension;
pub fn router(config: &CoreConfig) -> Router {
let client = GoogleOauthClient::new(
config.google_oauth.id.clone(),
config.google_oauth.secret.clone(),
format!("{}/auth/google/callback", config.host),
&[
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/userinfo.email",
],
"monitor".to_string(),
);
Router::new()
.route(
"/login",
get(|Extension(client): GoogleOauthExtension| async move {
Redirect::to(&client.get_login_redirect_url())
}),
)
.route(
"/callback",
get(|client, jwt, state, query| async {
let redirect = callback(client, jwt, state, query)
.await
.map_err(handle_anyhow_error)?;
response!(redirect)
}),
)
.layer(Extension(Arc::new(client)))
}
#[derive(Deserialize)]
struct CallbackQuery {
state: Option<String>,
code: Option<String>,
error: Option<String>,
}
async fn callback(
Extension(client): GoogleOauthExtension,
Extension(jwt_client): JwtExtension,
Extension(state): StateExtension,
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
if let Some(error) = query.error {
return Err(anyhow!("auth error from google: {error}"));
}
if !client.check_state(
&query
.state
.ok_or(anyhow!("callback query does not contain state"))?,
) {
return Err(anyhow!("state mismatch"));
}
let token = client
.get_access_token(
&query
.code
.ok_or(anyhow!("callback query does not contain code"))?,
)
.await?;
let google_user = client.get_google_user(&token.id_token)?;
let google_id = google_user.id.to_string();
let user = state
.db
.users
.find_one(doc! { "google_id": &google_id }, None)
.await
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let user = User {
username: google_user
.email
.split("@")
.collect::<Vec<&str>>()
.get(0)
.unwrap()
.to_string(),
avatar: google_user.picture.into(),
google_id: google_id.into(),
created_at: ts.clone(),
updated_at: ts,
..Default::default()
};
let user_id = state
.db
.users
.create_one(user)
.await
.context("failed to create user on mongo")?;
jwt_client
.generate(user_id)
.context("failed to generate jwt")?
}
};
let exchange_token = jwt_client.create_exchange_token(jwt);
Ok(Redirect::to(&format!(
"{}?token={exchange_token}",
state.config.host
)))
}

187
core/src/auth/jwt.rs Normal file
View File

@@ -0,0 +1,187 @@
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use anyhow::{anyhow, Context};
use async_timing_util::{get_timelength_in_ms, unix_timestamp_ms, Timelength};
use axum::{body::Body, http::Request, Extension};
use axum_oauth2::random_string;
use hmac::{Hmac, Mac};
use jwt::{SignWithKey, VerifyWithKey};
use mungos::{Deserialize, Serialize};
use sha2::Sha256;
use types::{CoreConfig, User};
use crate::state::State;
pub type JwtExtension = Extension<Arc<JwtClient>>;
pub type RequestUserExtension = Extension<Arc<RequestUser>>;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
pub struct RequestUser {
pub id: String,
pub is_admin: bool,
pub create_server_permissions: bool,
pub create_build_permissions: bool,
}
#[derive(Serialize, Deserialize)]
pub struct JwtClaims {
pub id: String,
pub iat: u128,
pub exp: u128,
}
pub struct JwtClient {
key: Hmac<Sha256>,
valid_for_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
impl JwtClient {
pub fn extension(config: &CoreConfig) -> JwtExtension {
let key = Hmac::new_from_slice(config.jwt_secret.as_bytes())
.expect("failed at taking HmacSha256 of jwt secret");
let client = JwtClient {
key,
valid_for_ms: get_timelength_in_ms(config.jwt_valid_for.to_string().parse().unwrap()),
exchange_tokens: Default::default(),
};
Extension(Arc::new(client))
}
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.valid_for_ms;
let claims = JwtClaims {
id: user_id,
iat,
exp,
};
let jwt = claims
.sign_with_key(&self.key)
.context("failed at signing claim")?;
Ok(jwt)
}
pub async fn authenticate_check_enabled(
&self,
req: &Request<Body>,
) -> anyhow::Result<Arc<RequestUser>> {
let jwt = req
.headers()
.get("authorization")
.ok_or(anyhow!(
"no authorization header provided. must be Bearer <jwt_token>"
))?
.to_str()?
.replace("Bearer ", "")
.replace("bearer ", "");
let state = req
.extensions()
.get::<Arc<State>>()
.ok_or(anyhow!("failed at getting state handle"))?;
let user = self
.auth_jwt_check_enabled(&jwt, &state)
.await
.context("failed to authenticate jwt")?;
Ok(Arc::new(user))
}
pub async fn auth_jwt_check_enabled(
&self,
jwt: &str,
state: &State,
) -> anyhow::Result<RequestUser> {
let claims: JwtClaims = jwt
.verify_with_key(&self.key)
.context("failed to verify claims")?;
if claims.exp > unix_timestamp_ms() {
let user = state
.db
.users
.find_one_by_id(&claims.id)
.await?
.ok_or(anyhow!("did not find user with id {}", claims.id))?;
if user.enabled {
let user = RequestUser {
id: claims.id,
is_admin: user.admin,
create_server_permissions: user.create_server_permissions,
create_build_permissions: user.create_build_permissions,
};
Ok(user)
} else {
Err(anyhow!("user not enabled"))
}
} else {
Err(anyhow!("token has expired"))
}
}
pub async fn authenticate(&self, req: &Request<Body>) -> anyhow::Result<User> {
let jwt = req
.headers()
.get("authorization")
.ok_or(anyhow!(
"no authorization header provided. must be Bearer <jwt_token>"
))?
.to_str()?
.replace("Bearer ", "")
.replace("bearer ", "");
let state = req
.extensions()
.get::<Arc<State>>()
.ok_or(anyhow!("failed at getting state handle"))?;
let user = self
.auth_jwt(&jwt, &state)
.await
.context("failed to authenticate jwt")?;
Ok(user)
}
pub async fn auth_jwt(&self, jwt: &str, state: &State) -> anyhow::Result<User> {
let claims: JwtClaims = jwt
.verify_with_key(&self.key)
.context("failed to verify claims")?;
if claims.exp > unix_timestamp_ms() {
let user = state
.db
.users
.find_one_by_id(&claims.id)
.await?
.ok_or(anyhow!("did not find user with id {}", claims.id))?;
Ok(user)
} else {
Err(anyhow!("token has expired"))
}
}
pub fn create_exchange_token(&self, jwt: String) -> String {
let exchange_token = random_string(40);
self.exchange_tokens.lock().unwrap().insert(
exchange_token.clone(),
(
jwt,
unix_timestamp_ms() + get_timelength_in_ms(Timelength::OneMinute),
),
);
exchange_token
}
pub fn redeem_exchange_token(&self, exchange_token: &str) -> anyhow::Result<String> {
let (jwt, valid_until) = self
.exchange_tokens
.lock()
.unwrap()
.remove(exchange_token)
.ok_or(anyhow!("invalid exchange token: unrecognized"))?;
if unix_timestamp_ms() < valid_until {
Ok(jwt)
} else {
Err(anyhow!("invalid exchange token: expired"))
}
}
}

97
core/src/auth/local.rs Normal file
View File

@@ -0,0 +1,97 @@
use anyhow::{anyhow, Context};
use axum::{extract::Json, routing::post, Extension, Router};
use helpers::handle_anyhow_error;
use mungos::doc;
use types::{monitor_timestamp, User, UserCredentials};
use crate::state::StateExtension;
use super::jwt::JwtExtension;
const BCRYPT_COST: u32 = 10;
pub fn router() -> Router {
Router::new()
.route(
"/create_user",
post(|db, jwt, body| async {
create_user_handler(db, jwt, body)
.await
.map_err(handle_anyhow_error)
}),
)
.route(
"/login",
post(|db, jwt, body| async {
login_handler(db, jwt, body)
.await
.map_err(handle_anyhow_error)
}),
)
}
async fn create_user_handler(
Extension(state): StateExtension,
Extension(jwt): JwtExtension,
Json(UserCredentials { username, password }): Json<UserCredentials>,
) -> anyhow::Result<String> {
let password = bcrypt::hash(password, BCRYPT_COST).context("failed to hash password")?;
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
let ts = monitor_timestamp();
let user = User {
username,
password: Some(password),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()
};
let user_id = state
.db
.users
.create_one(user)
.await
.context("failed to create user")?;
let jwt = jwt
.generate(user_id)
.context("failed to generate jwt for user")?;
Ok(jwt)
}
async fn login_handler(
Extension(state): StateExtension,
Extension(jwt): JwtExtension,
Json(UserCredentials { username, password }): Json<UserCredentials>,
) -> anyhow::Result<String> {
let user = state
.db
.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed at mongo query")?
.ok_or(anyhow!("did not find user with username {username}"))?;
let user_pw_hash = user
.password
.ok_or(anyhow!("invalid login, user does not have password login"))?;
let verified = bcrypt::verify(password, &user_pw_hash).context("failed at verify password")?;
if !verified {
return Err(anyhow!("invalid credentials"));
}
let jwt = jwt
.generate(user.id)
.context("failed at generating jwt for user")?;
Ok(jwt)
}

109
core/src/auth/mod.rs Normal file
View File

@@ -0,0 +1,109 @@
use std::sync::Arc;
use axum::{
body::Body,
http::{Request, StatusCode},
middleware::Next,
response::Response,
routing::{get, post},
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Serialize};
use types::CoreConfig;
use typeshare::typeshare;
mod github;
mod google;
mod jwt;
mod local;
mod secret;
use crate::state::StateExtension;
pub use self::jwt::{JwtClaims, JwtClient, JwtExtension, RequestUser, RequestUserExtension};
#[typeshare]
#[derive(Serialize)]
struct LoginOptions {
local: bool,
github: bool,
google: bool,
}
pub fn router(config: &CoreConfig) -> Router {
let mut router = Router::new()
.route(
"/options",
get(|Extension(state): StateExtension| async move {
Json(LoginOptions {
local: state.config.local_auth,
github: state.config.github_oauth.enabled
&& state.config.github_oauth.id.len() > 0
&& state.config.github_oauth.secret.len() > 0,
google: state.config.google_oauth.enabled
&& state.config.google_oauth.id.len() > 0
&& state.config.google_oauth.secret.len() > 0,
})
}),
)
.route(
"/exchange",
post(|jwt, body| async {
exchange_for_jwt(jwt, body)
.await
.map_err(handle_anyhow_error)
}),
)
.nest("/secret", secret::router());
if config.local_auth {
router = router.nest("/local", local::router());
}
if config.github_oauth.enabled
&& config.github_oauth.id.len() > 0
&& config.github_oauth.secret.len() > 0
{
router = router.nest("/github", github::router(config));
}
if config.google_oauth.enabled
&& config.google_oauth.id.len() > 0
&& config.google_oauth.secret.len() > 0
{
router = router.nest("/google", google::router(config));
}
router
}
#[typeshare]
#[derive(Deserialize)]
struct TokenExchangeBody {
token: String,
}
async fn exchange_for_jwt(
Extension(jwt): JwtExtension,
Json(body): Json<TokenExchangeBody>,
) -> anyhow::Result<String> {
let jwt = jwt.redeem_exchange_token(&body.token)?;
Ok(jwt)
}
pub async fn auth_request(
mut req: Request<Body>,
next: Next<Body>,
) -> Result<Response, (StatusCode, String)> {
let jwt_client = req.extensions().get::<Arc<JwtClient>>().ok_or((
StatusCode::UNAUTHORIZED,
"failed to get jwt client extension".to_string(),
))?;
let user = jwt_client
.authenticate_check_enabled(&req)
.await
.map_err(|e| (StatusCode::UNAUTHORIZED, format!("{e:#?}")))?;
req.extensions_mut().insert(user);
Ok(next.run(req).await)
}

62
core/src/auth/secret.rs Normal file
View File

@@ -0,0 +1,62 @@
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Update};
use types::unix_from_monitor_ts;
use crate::state::StateExtension;
use super::JwtExtension;
#[derive(Deserialize)]
pub struct SecretLoginBody {
username: String,
secret: String,
}
pub fn router() -> Router {
Router::new().route(
"/login",
post(|db, jwt, body| async { login(db, jwt, body).await.map_err(handle_anyhow_error) }),
)
}
pub async fn login(
Extension(state): StateExtension,
Extension(jwt): JwtExtension,
Json(SecretLoginBody { username, secret }): Json<SecretLoginBody>,
) -> anyhow::Result<String> {
let user = state
.db
.users
.find_one(doc! { "username": &username }, None)
.await
.context("failed at mongo query")?
.ok_or(anyhow!("did not find user with username {username}"))?;
let ts = unix_timestamp_ms() as i64;
for s in user.secrets {
if let Some(expires) = s.expires {
let expires = unix_from_monitor_ts(&expires)?;
if expires < ts {
state
.db
.users
.update_one::<Document>(
&user.id,
Update::Custom(doc! { "$pull": { "secrets": { "name": s.name } } }),
)
.await
.context("failed to remove expired secret")?;
continue;
}
}
if bcrypt::verify(&secret, &s.hash).context("failed at verifying hash")? {
let jwt = jwt
.generate(user.id)
.context("failed at generating jwt for user")?;
return Ok(jwt);
}
}
Err(anyhow!("invalid secret"))
}

199
core/src/cloud/aws.rs Normal file
View File

@@ -0,0 +1,199 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use aws_sdk_ec2::model::{
BlockDeviceMapping, EbsBlockDevice, InstanceNetworkInterfaceSpecification, InstanceStateChange,
InstanceStateName, InstanceStatus, ResourceType, Tag, TagSpecification,
};
pub use aws_sdk_ec2::{
model::InstanceType,
output::{DescribeInstanceStatusOutput, TerminateInstancesOutput},
Client, Region,
};
use types::Server;
pub async fn create_ec2_client(
region: String,
access_key_id: &str,
secret_access_key: String,
) -> Client {
// There may be a better way to pass these keys to client
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
let region = Region::new(region);
let config = aws_config::from_env().region(region).load().await;
let client = Client::new(&config);
client
}
pub struct Ec2Instance {
pub instance_id: String,
pub server: Server,
}
const POLL_RATE_SECS: u64 = 2;
const MAX_POLL_TRIES: usize = 30;
/// this will only resolve after the instance is running
/// should still poll the periphery agent after creation
pub async fn create_instance_with_ami(
client: &Client,
instance_name: &str,
ami_id: &str,
instance_type: &str,
subnet_id: &str,
security_group_ids: Vec<String>,
volume_size_gb: i32,
key_pair_name: &str,
assign_public_ip: bool,
) -> anyhow::Result<Ec2Instance> {
let instance_type = InstanceType::from(instance_type);
if let InstanceType::Unknown(t) = instance_type {
return Err(anyhow!("unknown instance type {t:?}"));
}
let res = client
.run_instances()
.image_id(ami_id)
.instance_type(instance_type)
.block_device_mappings(
BlockDeviceMapping::builder()
.set_device_name(String::from("/dev/sda1").into())
.set_ebs(
EbsBlockDevice::builder()
.volume_size(volume_size_gb)
.build()
.into(),
)
.build(),
)
.network_interfaces(
InstanceNetworkInterfaceSpecification::builder()
.subnet_id(subnet_id)
.associate_public_ip_address(assign_public_ip)
.set_groups(security_group_ids.into())
.device_index(0)
.build(),
)
.key_name(key_pair_name)
.tag_specifications(
TagSpecification::builder()
.tags(Tag::builder().key("Name").value(instance_name).build())
.resource_type(ResourceType::Instance)
.build(),
)
.min_count(1)
.max_count(1)
.send()
.await
.context("failed to start builder ec2 instance")?;
let instance = res
.instances()
.ok_or(anyhow!("got None for created instances"))?
.get(0)
.ok_or(anyhow!("instances array is empty"))?;
let instance_id = instance
.instance_id()
.ok_or(anyhow!("instance does not have instance_id"))?
.to_string();
for _ in 0..MAX_POLL_TRIES {
let state_name = get_ec2_instance_state_name(&client, &instance_id).await?;
if state_name == Some(InstanceStateName::Running) {
let ip = if assign_public_ip {
get_ec2_instance_public_ip(client, &instance_id).await?
} else {
instance
.private_ip_address()
.ok_or(anyhow!("instance does not have private ip"))?
.to_string()
};
let server = Server {
address: format!("http://{ip}:8000"),
..Default::default()
};
return Ok(Ec2Instance {
instance_id,
server,
});
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
}
Err(anyhow!("instance not running after polling"))
}
pub async fn get_ec2_instance_status(
client: &Client,
instance_id: &str,
) -> anyhow::Result<Option<InstanceStatus>> {
let status = client
.describe_instance_status()
.instance_ids(instance_id)
.send()
.await
.context("failed to get instance status from aws")?
.instance_statuses()
.ok_or(anyhow!("instance statuses is None"))?
.get(0)
.map(|s| s.to_owned());
Ok(status)
}
pub async fn get_ec2_instance_state_name(
client: &Client,
instance_id: &str,
) -> anyhow::Result<Option<InstanceStateName>> {
let status = get_ec2_instance_status(client, instance_id).await?;
if status.is_none() {
return Ok(None);
}
let state = status
.unwrap()
.instance_state()
.ok_or(anyhow!("instance state is None"))?
.name()
.ok_or(anyhow!("instance state name is None"))?
.to_owned();
Ok(Some(state))
}
pub async fn get_ec2_instance_public_ip(
client: &Client,
instance_id: &str,
) -> anyhow::Result<String> {
let ip = client
.describe_instances()
.instance_ids(instance_id)
.send()
.await
.context("failed to get instance status from aws")?
.reservations()
.ok_or(anyhow!("instance reservations is None"))?
.get(0)
.ok_or(anyhow!("instance reservations is empty"))?
.instances()
.ok_or(anyhow!("instances is None"))?
.get(0)
.ok_or(anyhow!("instances is empty"))?
.public_ip_address()
.ok_or(anyhow!("instance has no public ip"))?
.to_string();
Ok(ip)
}
pub async fn terminate_ec2_instance(
client: &Client,
instance_id: &str,
) -> anyhow::Result<InstanceStateChange> {
let res = client
.terminate_instances()
.instance_ids(instance_id)
.send()
.await
.context("failed to terminate instance from aws")?
.terminating_instances()
.ok_or(anyhow!("terminating instances is None"))?
.get(0)
.ok_or(anyhow!("terminating instances is empty"))?
.to_owned();
Ok(res)
}

1
core/src/cloud/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub mod aws;

29
core/src/config.rs Normal file
View File

@@ -0,0 +1,29 @@
use axum_extra::routing::SpaRouter;
use dotenv::dotenv;
use helpers::parse_config_file;
use mungos::Deserialize;
use types::CoreConfig;
#[derive(Deserialize, Debug)]
struct Env {
#[serde(default = "default_config_path")]
pub config_path: String,
#[serde(default = "default_frontend_path")]
pub frontend_path: String,
}
pub fn load() -> (CoreConfig, SpaRouter) {
dotenv().ok();
let env: Env = envy::from_env().expect("failed to parse environment variables");
let config = parse_config_file(&env.config_path).expect("failed to parse config");
let spa_router = SpaRouter::new("/assets", env.frontend_path);
(config, spa_router)
}
pub fn default_config_path() -> String {
"/config/config.toml".to_string()
}
fn default_frontend_path() -> String {
"/frontend".to_string()
}

View File

@@ -1,52 +0,0 @@
import { CoreSecrets } from "@monitor/types";
import {
getBooleanFromEnv,
getNumberFromEnv,
getStringFromEnv,
readJSONFile,
} from "@monitor/util-node";
import { join } from "path";
export const CORE_SERVER_NAME = getStringFromEnv(
"CORE_SERVER_NAME",
"Monitor Core"
);
export let SECRETS: CoreSecrets = readJSONFile("/secrets/secrets.json");
export function refreshSecrets() {
SECRETS = readJSONFile("/secrets/secrets.json");
}
export const LOGGER = getBooleanFromEnv("LOGGER", false);
export const PORT = getNumberFromEnv("PORT", 9000);
export const HOST = getStringFromEnv("HOST", "http://localhost:" + PORT);
export const MONGO_URL = getStringFromEnv(
"MONGO_URL",
"mongodb://127.0.0.1:27017/monitor"
);
export const TOKEN_EXPIRES_IN = getStringFromEnv("TOKEN_EXPIRES_IN", "7d");
export const PASSWORD_SALT_ROUNDS = getNumberFromEnv("PASSWORD_SALT_ROUNDS", 8);
export const SYSROOT = getStringFromEnv("SYSROOT", "/home/ubuntu/"); // the root folder monitor has access to, prepends volumes mounted using useSysroot
export const ROOT = "/monitor-root/"; // the root folder in the container that SYSROOT is mounted on
export const DEPLOYDATA_ROOT = "deployments/";
export const BUILD_REPO_PATH = join(ROOT, "builds");
export const DEPLOYMENT_REPO_PATH = join(ROOT, "repos");
export const SYS_DEPLOYMENT_REPO_PATH = join(SYSROOT, "repos");
// export const REGISTRY_URL = getStringFromEnv("REGISTRY_URL", "localhost:5000/");
export const FRONTEND_PATH = getStringFromEnv("FRONTEND_PATH", "/frontend");
export const SYSTEM_OPERATOR = "Monitor";
export const PERMISSIONS_DENY_LOG = {
stderr: "Someone tried to access this route without appropriate permissions",
};
export const UPDATES_PER_REQUEST = getNumberFromEnv("UPDATES_PER_REQUEST", 10);
export const SERVER_CHECK_TIMEOUT = getNumberFromEnv(
"SERVER_CHECK_TIMEOUT",
1000
);
export const SERVER_STATS_INTERVAL = getNumberFromEnv("SERVER_STATS_INTERVAL_MINUTES", 5) * 60 * 1000; // 5 minute check default
export const CLEAR_ALREADY_ALERTED_INTERVAL = getNumberFromEnv("CLEAR_ALREADY_ALERTED_INTERVAL_HOUR", 24) * 60 * 60 * 1000; // 24 hour default clear interval
export const SLACK_CHANNEL = getStringFromEnv("SLACK_CHANNEL", "");
export const CPU_USAGE_NOTIFY_LIMIT = getNumberFromEnv("CPU_USAGE_NOTIFY_LIMIT", 50);
export const MEM_USAGE_NOTIFY_LIMIT = getNumberFromEnv("MEM_USAGE_NOTIFY_LIMIT", 75);
export const DISK_USAGE_NOTIFY_LIMIT = getNumberFromEnv("DISK_USAGE_NOTIFY_LIMIT", 75);
export const DAILY_UPDATE_UTC_HOUR = getNumberFromEnv("DAILY_UPDATE_UTC_HOUR", 14);

56
core/src/helpers.rs Normal file
View File

@@ -0,0 +1,56 @@
use std::str::FromStr;
use anyhow::anyhow;
use diff::{Diff, OptionDiff};
use helpers::to_monitor_name;
use types::Build;
#[macro_export]
macro_rules! response {
($x:expr) => {
Ok::<_, (axum::http::StatusCode, String)>($x)
};
}
pub fn option_diff_is_some<T: Diff>(diff: &OptionDiff<T>) -> bool
where
<T as Diff>::Repr: PartialEq,
{
diff != &OptionDiff::NoChange && diff != &OptionDiff::None
}
pub fn any_option_diff_is_some<T: Diff>(diffs: &[&OptionDiff<T>]) -> bool
where
<T as Diff>::Repr: PartialEq,
{
for diff in diffs {
if diff != &&OptionDiff::NoChange && diff != &&OptionDiff::None {
return true;
}
}
return false;
}
pub fn parse_comma_seperated_list<T: FromStr>(comma_sep_list: &str) -> anyhow::Result<Vec<T>> {
comma_sep_list
.split(",")
.filter(|item| item.len() > 0)
.map(|item| {
let item = item
.parse()
.map_err(|_| anyhow!("error parsing string {item} into type T"))?;
Ok::<T, anyhow::Error>(item)
})
.collect()
}
pub fn get_image_name(build: &Build) -> String {
let name = to_monitor_name(&build.name);
match &build.docker_organization {
Some(org) => format!("{org}/{name}"),
None => match &build.docker_account {
Some(acct) => format!("{acct}/{name}"),
None => name,
},
}
}

45
core/src/main.rs Normal file
View File

@@ -0,0 +1,45 @@
// #![allow(unused)]
use ::helpers::get_socket_addr;
use auth::JwtClient;
use axum::Router;
use state::State;
use tower_http::cors::{Any, CorsLayer};
mod actions;
mod api;
mod auth;
mod cloud;
mod config;
mod helpers;
mod monitoring;
mod state;
mod ws;
#[tokio::main]
async fn main() {
let (config, spa_router) = config::load();
println!("starting monitor core on port {}...", config.port);
let app = Router::new()
.merge(spa_router)
.nest("/api", api::router())
.nest("/auth", auth::router(&config))
.nest("/ws", ws::router())
.layer(JwtClient::extension(&config))
.layer(State::extension(config.clone()).await)
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
);
println!("started monitor core on port {}", config.port);
axum::Server::bind(&get_socket_addr(config.port))
.serve(app.into_make_service())
.await
.expect("monitor core axum server crashed");
}

View File

@@ -1,51 +0,0 @@
import fastify from "fastify";
import fastifyCors from "fastify-cors";
import fastifyHelmet from "fastify-helmet";
import { HOST, LOGGER, PORT } from "./config";
import auth from "./plugins/auth";
import db from "./plugins/db";
import ws from "./plugins/ws";
import docker from "./plugins/docker";
import frontend from "./plugins/frontend";
import actionStates from "./plugins/actionStates";
import routes from "./routes";
import slackNotifier from "./plugins/slackNotifier";
import statCollector from "./plugins/statRecorder";
async function main() {
const app = fastify({ logger: LOGGER });
if (HOST.includes("https")) {
app.register(fastifyHelmet, {
contentSecurityPolicy: {
useDefaults: true,
directives: {
"connect-src": ["'self'", HOST.replace("http", "ws") + "/ws"],
"img-src": ["'self'", "https: data:"],
},
},
});
}
app
.register(fastifyCors)
.register(db)
.register(docker)
.register(auth)
.register(ws)
.register(frontend)
.register(actionStates)
.register(slackNotifier)
.register(statCollector)
.register(routes);
app.listen(PORT, "0.0.0.0", async (err, address) => {
if (err) {
app.log.error(err);
process.exit(1);
}
if (!LOGGER) console.log(`monitor core listening at ${address}`);
});
}
main();

View File

@@ -1,101 +0,0 @@
import { User } from "@monitor/types";
import {
BUILD,
mergeCommandLogError,
prettyStringify,
} from "@monitor/util";
import { join } from "path";
import { FastifyInstance } from "fastify";
import {
PERMISSIONS_DENY_LOG,
BUILD_REPO_PATH,
SECRETS,
} from "../../config";
import { BUILDING } from "../../plugins/actionStates";
import { addBuildUpdate } from "../../util/updates";
import { WebSocket } from "ws";
import { sendAlert } from "../../util/helpers";
import { dockerBuild, execute } from "@monitor/util-node";
async function build(
app: FastifyInstance,
client: WebSocket,
user: User,
{ buildID, note }: { buildID: string; note?: string }
) {
if (app.buildActionStates.busy(buildID)) {
sendAlert(client, "bad", "build busy, try again in a bit");
return;
}
const build = await app.builds.findById(buildID);
if (!build) return;
if (user.permissions! < 2 && !build.owners.includes(user.username)) {
addBuildUpdate(
app,
buildID,
BUILD,
"Build (DENIED)",
PERMISSIONS_DENY_LOG,
user.username,
note,
true
);
return;
}
app.buildActionStates.set(buildID, BUILDING, true);
app.broadcast(BUILD, { complete: false, buildID }, app.buildUserFilter(buildID));
const { cliBuild, dockerBuildArgs } = build;
try {
const pull = await execute(
`cd ${join(BUILD_REPO_PATH, build.pullName || "")} && git pull origin ${
build.branch || "main"
}`
);
const cli =
cliBuild &&
(await execute(
`cd ${join(BUILD_REPO_PATH, build.pullName!, cliBuild.path || "")} && ${
cliBuild.command
}`
));
const docker =
dockerBuildArgs &&
(await dockerBuild(
build.pullName!,
dockerBuildArgs,
BUILD_REPO_PATH,
build.dockerAccount,
build.dockerAccount && SECRETS.DOCKER_ACCOUNTS[build.dockerAccount]
));
const { command, log, isError } = mergeCommandLogError(
{ name: "pull", cle: pull },
{ name: "cli", cle: cli },
{ name: "docker", cle: docker }
);
addBuildUpdate(
app,
buildID,
BUILD,
command,
log,
user.username,
note,
isError
);
} catch (error) {
addBuildUpdate(
app,
buildID,
BUILD,
"Build (ERROR)",
{ stderr: prettyStringify(error) },
user.username,
note,
true
);
}
app.broadcast(BUILD, { complete: true, buildID }, app.buildUserFilter(buildID));
app.buildActionStates.set(buildID, BUILDING, false);
}
export default build;

View File

@@ -1,49 +0,0 @@
import { Build, User } from "@monitor/types";
import { CLONE_BUILD_REPO, mergeCommandLogError } from "@monitor/util";
import { join } from "path";
import { FastifyInstance } from "fastify";
import { BUILD_REPO_PATH, SECRETS } from "../../config";
import { addBuildUpdate } from "../../util/updates";
import { clone, execute } from "@monitor/util-node";
async function cloneRepo(
app: FastifyInstance,
user: User,
{ pullName, branch, repo, subfolder, onClone, githubAccount, _id }: Build
) {
const cloneCle = await clone(
repo!,
join(BUILD_REPO_PATH, pullName!),
subfolder,
branch,
githubAccount && SECRETS.GITHUB_ACCOUNTS[githubAccount]
);
const onCloneCle =
(onClone &&
onClone.command &&
(await execute(
`cd ${join(BUILD_REPO_PATH, pullName!, onClone.path || "")} && ${
onClone.command
}`
))) ||
undefined;
const { command, log, isError } = mergeCommandLogError(
{
name: "clone",
cle: cloneCle,
},
{ name: "post clone", cle: onCloneCle }
);
addBuildUpdate(
app,
_id!,
CLONE_BUILD_REPO,
command,
log,
user.username,
"",
isError
);
}
export default cloneRepo;

View File

@@ -1,59 +0,0 @@
import { Build, User } from "@monitor/types";
import { FastifyInstance } from "fastify";
import { CREATE_BUILD, prettyStringify } from "@monitor/util";
import { PERMISSIONS_DENY_LOG } from "../../config";
import { toDashedName } from "../../util/helpers";
import { addBuildUpdate, addSystemUpdate } from "../../util/updates";
import cloneRepo from "./clone";
async function createBuild(
app: FastifyInstance,
user: User,
message: { build: Build; note?: string }
) {
if (user.permissions! < 1) {
addSystemUpdate(
app,
CREATE_BUILD,
"Create Build (DENIED)",
PERMISSIONS_DENY_LOG,
user.username,
message.note,
true
);
return;
}
try {
const build = await app.builds.create({
...message.build,
pullName: toDashedName(message.build.name),
owners: [user.username],
});
app.buildActionStates.add(build._id!);
addBuildUpdate(
app,
build._id!,
CREATE_BUILD,
"Create Build",
{ stdout: "Build Created: " + build.name },
user.username,
message.note
);
if (build.repo) {
await cloneRepo(app, user, build);
}
return build;
} catch (err) {
addSystemUpdate(
app,
CREATE_BUILD,
"Create Build (ERROR)",
{ stderr: prettyStringify(err) },
user.username,
message.note,
true
);
}
}
export default createBuild;

View File

@@ -1,68 +0,0 @@
import { User } from "@monitor/types";
import { FastifyInstance } from "fastify";
import { remove } from "fs-extra";
import { join } from "path";
import { DELETE_BUILD, prettyStringify } from "@monitor/util";
import { PERMISSIONS_DENY_LOG, BUILD_REPO_PATH } from "../../config";
import { addSystemUpdate } from "../../util/updates";
import { WebSocket } from "ws";
import { sendAlert } from "../../util/helpers";
async function deleteBuild(
app: FastifyInstance,
client: WebSocket,
user: User,
{ buildID, note }: { buildID: string; note?: string }
) {
if (app.buildActionStates.busy(buildID)) {
sendAlert(client, "bad", "build busy, try again in a bit");
return;
}
const build = await app.builds.findById(buildID);
if (!build) return;
if (user.permissions! < 2 && !build.owners.includes(user.username)) {
addSystemUpdate(
app,
DELETE_BUILD,
"Delete Build (DENIED)",
PERMISSIONS_DENY_LOG,
user.username,
note,
true
);
return;
}
app.buildActionStates.set(buildID, "deleting", true);
app.broadcast(DELETE_BUILD, { buildID, complete: false }, app.buildUserFilter(buildID, build));
try {
await app.builds.findByIdAndDelete(buildID);
await app.deployments.updateMany(
{ buildID: build._id },
{ buildID: undefined }
);
if (build!.repo) await remove(join(BUILD_REPO_PATH, build.pullName!));
app.buildActionStates.delete(buildID);
addSystemUpdate(app, DELETE_BUILD, "Delete Build", {}, user.username, note);
app.broadcast(
DELETE_BUILD,
{ buildID, complete: true },
app.buildUserFilter(buildID, build)
);
return true;
} catch (error) {
app.buildActionStates.set(buildID, "deleting", false);
addSystemUpdate(
app,
DELETE_BUILD,
"Delete Build",
{
stderr: prettyStringify(error),
},
user.username,
note,
true
);
}
}
export default deleteBuild;

View File

@@ -1,110 +0,0 @@
import { User } from "@monitor/types";
import {
BUILD,
CLONE_BUILD_REPO,
CREATE_BUILD,
DELETE_BUILD,
PULL_BUILD,
UPDATE_BUILD,
} from "@monitor/util";
import { FastifyInstance } from "fastify";
import { remove } from "fs-extra";
import { WebSocket } from "ws";
import { join } from "path";
import { BUILD_REPO_PATH } from "../../config";
import { sendAlert } from "../../util/helpers";
import build from "./build";
import cloneRepo from "./clone";
import createBuild from "./create";
import deleteBuild from "./delete";
import pullRepo from "./pull";
import updateBuild from "./update";
async function buildMessages(
app: FastifyInstance,
client: WebSocket,
message: any,
user: User
) {
switch (message.type) {
case CREATE_BUILD:
const created = message.build && (await createBuild(app, user, message));
if (created) {
app.broadcast(
CREATE_BUILD,
{ build: created },
app.buildUserFilter(created._id!)
);
}
return true;
case DELETE_BUILD:
message.buildID && (await deleteBuild(app, client, user, message));
return true;
case UPDATE_BUILD:
const updated =
message.build && (await updateBuild(app, client, user, message));
if (updated) {
app.broadcast(
UPDATE_BUILD,
{ build: updated },
app.buildUserFilter(updated._id)
);
} else {
sendAlert(client, "bad", "update not successful");
}
return true;
case PULL_BUILD:
message.buildID && (await pullRepo(app, client, user, message));
return true;
case CLONE_BUILD_REPO:
if (message.buildID) {
if (app.buildActionStates.busy(message.buildID)) {
sendAlert(client, "bad", "build busy, try again in a bit");
return;
}
const build = await app.builds.findById(message.buildID);
if (!build) {
sendAlert(client, "bad", "could not find build");
return true;
}
app.broadcast(
CLONE_BUILD_REPO,
{
buildID: message.buildID,
complete: false,
},
app.buildUserFilter(message.buildID)
);
app.buildActionStates.set(message.buildID, "cloning", true);
await remove(join(BUILD_REPO_PATH, build.pullName!)).catch();
if (build.repo) {
await cloneRepo(app, user, build);
} else {
sendAlert(client, "bad", "build has no repo configured");
}
app.buildActionStates.set(message.buildID, "cloning", false);
app.broadcast(
CLONE_BUILD_REPO,
{
buildID: message.buildID,
complete: true,
},
app.buildUserFilter(message.buildID)
);
}
return true;
case BUILD:
message.buildID && (await build(app, client, user, message));
return true;
default:
return false;
}
}
export default buildMessages;

Some files were not shown because too many files have changed in this diff Show More