Compare commits

...

160 Commits

Author SHA1 Message Date
mbecker20
059716f178 deploy 2.0.0-dev-36 2025-10-05 18:23:43 -07:00
mbecker20
0bee1fe2c5 fix: connect and connect insecure are swapped 2025-10-05 18:20:29 -07:00
mbecker20
1e58c1a958 deploy 2.0.0-dev-35 2025-10-05 17:45:16 -07:00
mbecker20
ed1431db0a improve v1 downgrade 2025-10-05 17:43:46 -07:00
mbecker20
dc769ff159 document periphery_public_key 2025-10-05 17:39:11 -07:00
mbecker20
098f23ac4c configure Core -> Periphery insecure_tls 2025-10-05 17:36:07 -07:00
mbecker20
03f577d22f forgiving periphery public key parsing 2025-10-05 17:02:08 -07:00
mbecker20
95ca217362 deploy 2.0.0-dev-34 2025-10-05 16:52:33 -07:00
mbecker20
6d61045764 support KOMODO_PERIPHERY_PUBLIC_KEY 2025-10-05 16:52:00 -07:00
mbecker20
34e075eaf3 periphery support core_tls_insecure_skip_verify 2025-10-05 16:12:04 -07:00
mbecker20
232dc0bb4e deploy 2.0.0-dev-33 2025-10-05 14:59:23 -07:00
mbecker20
0cc0ee2aab load periphery_public_key 2025-10-05 14:58:52 -07:00
mbecker20
edebe925ff add km maintenance tasks aliases 2025-10-05 14:50:20 -07:00
mbecker20
5fd45bbc7b deploy 2.0.0-dev-32 2025-10-05 14:39:09 -07:00
mbecker20
0a490dadb2 rotation maintenance execution doesn't make individual updates 2025-10-05 14:38:45 -07:00
mbecker20
23847c15bc deploy 2.0.0-dev-31 2025-10-05 14:21:18 -07:00
mbecker20
0d238aee4f onboarding create_builder 2025-10-05 14:20:58 -07:00
mbecker20
98ad6cf5fa create update use uppercase 2025-10-05 13:41:42 -07:00
mbecker20
e35b81630b deploy 2.0.0-dev-30 2025-10-05 13:30:47 -07:00
mbecker20
1215852fe4 onboarding set Server tags 2025-10-05 13:24:38 -07:00
mbecker20
4164b76ff5 onboarded server needs to be enabled 2025-10-05 12:36:04 -07:00
mbecker20
26a9daffeb deploy 2.0.0-dev-29 2025-10-05 05:49:29 -07:00
mbecker20
8bb9f16e9b onboarding save copy server selection 2025-10-05 05:47:51 -07:00
mbecker20
b6eaf76497 Include templates in onboarding selector 2025-10-05 05:44:58 -07:00
mbecker20
073893da0e deploy 2.0.0-dev-28 2025-10-05 05:18:48 -07:00
mbecker20
e71547f1c2 configure server onboarding key 2025-10-05 05:17:56 -07:00
mbecker20
1991627990 move periphery public key to Server info (keep it out of resource sync) 2025-10-05 04:18:59 -07:00
mbecker20
3434d827a3 deploy 2.0.0-dev-27 2025-10-05 02:57:58 -07:00
mbecker20
1ef8b9878a rotate all server keys task 2025-10-05 02:57:27 -07:00
mbecker20
07ddaa8377 tweak 2025-10-05 01:41:44 -07:00
mbecker20
142c08cde4 deploy 2.0.0-dev-26 2025-10-05 01:19:21 -07:00
mbecker20
1aa1422faa periphery private key rotation 2025-10-05 01:18:56 -07:00
mbecker20
1394e8a6b1 Rotate Server private keys 2025-10-05 00:54:56 -07:00
mbecker20
420ee10211 tweaks 2025-10-04 23:59:14 -07:00
mbecker20
e918461dc5 refine onboarding key 2025-10-04 23:36:37 -07:00
mbecker20
4dc9ca27be refactor Periphery onboarding 2025-10-04 16:43:02 -07:00
mbecker20
f49b186f2f consolidate periphery docker apis into single mod 2025-10-04 16:17:32 -07:00
mbecker20
6e039b41f1 deploy 2.0.0-dev-25 2025-10-03 17:51:46 -07:00
mbecker20
e7cd77b022 tweaks 2025-10-03 17:06:14 -07:00
mbecker20
556cbd04c7 server onboarding flow using onboarding key 2025-10-03 17:01:58 -07:00
mbecker20
4e3d181466 default documented setup now uses Periphery -> Core setup 2025-10-03 12:55:06 -07:00
mbecker20
5d4326f46f NOT_FOUND if server not found 2025-10-03 03:17:37 -07:00
mbecker20
4bb486ad0a deploy 2.0.0-dev-24 2025-10-03 02:30:20 -07:00
mbecker20
d29c5112d8 Confirm server public key flow 2025-10-03 02:29:53 -07:00
mbecker20
d41315b8a4 don't navigate to /login for network errors 2025-10-03 01:58:23 -07:00
mbecker20
847404388c deploy 2.0.0-dev-23 2025-10-03 00:48:11 -07:00
mbecker20
eef8ec59b8 deploy 2.0.0-dev-22 2025-10-03 00:19:43 -07:00
mbecker20
9eb32f9ff5 store attempted public keys 2025-10-03 00:13:55 -07:00
mbecker20
859bfe67ef Improve Core side connection handling and fix Periphery -> Core error report 2025-10-02 23:03:58 -07:00
mbecker20
21ea469cd4 add login message 2 sec timeout 2025-10-02 16:00:45 -07:00
mbecker20
7fb902b892 deploy 2.0.0-dev-21 2025-10-02 03:12:59 -07:00
mbecker20
c9c4ac47ee fix clippy 2025-10-02 02:34:23 -07:00
mbecker20
f228cd31f3 deploy 2.0.0-dev-20 2025-10-02 02:33:33 -07:00
mbecker20
4feecb4b97 write key pem files by default when not otherwise provided. 2025-10-02 02:32:13 -07:00
mbecker20
e2680d0942 fix deploy 2025-10-01 21:35:27 -07:00
mbecker20
7422c0730d deploy 2.0.0-dev-19 2025-10-01 21:27:59 -07:00
mbecker20
37ac0dc7e3 update deploy 2025-10-01 21:17:43 -07:00
mbecker20
dccaca1df4 make sure not a config file before include as compose file 2025-10-01 20:32:52 -07:00
mbecker20
886aea4c36 deploy 2.0.0-dev-18 2025-10-01 19:48:40 -07:00
mbecker20
cbca070bae load keys from files 2025-10-01 19:41:32 -07:00
mbecker20
b4bdd401f6 fix unneeded base64 prefix 2025-10-01 02:36:26 -07:00
mbecker20
e546166240 use pkcs8 and spki for private / public key encoding, matching openssl 2025-10-01 02:25:41 -07:00
mbecker20
21689ce0ad periphery support same key gen functions 2025-09-29 23:32:47 -07:00
mbecker20
941787db64 slack client 0.5.0 2025-09-29 12:38:39 -07:00
mbecker20
d4b1aacac3 comment out 2025-09-29 02:21:07 -07:00
mbecker20
30f89461bf deploy 2.0.0-dev-17 2025-09-29 00:57:19 -07:00
mbecker20
a42d1397e9 back to bullseye (for max GLIBC compatibility) 2025-09-29 00:56:19 -07:00
mbecker20
b29313c28f deploy 2.0.0-dev-16 2025-09-29 00:47:17 -07:00
mbecker20
08a246a90c bullseye -> trixie 2025-09-29 00:46:51 -07:00
mbecker20
1a08df28d0 docs and config clean up 2025-09-29 00:06:35 -07:00
mbecker20
a226ffc256 fix json config load from interpolated 2025-09-28 23:20:04 -07:00
mbecker20
b385ee5ec3 start on docs update 2025-09-28 22:59:58 -07:00
mbecker20
c78c34357d remove unnecessary connected to core websocket log 2025-09-28 18:14:57 -07:00
mbecker20
4b7c692f00 deploy 2.0.0-dev-15 2025-09-28 18:02:18 -07:00
mbecker20
1ac98a096e bump async timing util to 1.1.0 to support for timelengths 2025-09-28 17:57:12 -07:00
mbecker20
281a2dc1ce first server configuration works with Periphery -> Core 2025-09-28 14:39:11 -07:00
mbecker20
0fe91378a6 tweak key gen output 2025-09-28 14:12:41 -07:00
mbecker20
11e76d1cf2 deploy 2.0.0-dev-14 2025-09-28 13:10:00 -07:00
mbecker20
a3bcd71105 simplify cache refresh with single periphery call 2025-09-28 13:05:45 -07:00
mbecker20
3ecc56dd76 clean up crypto provider install 2025-09-27 21:40:20 -07:00
mbecker20
7239cbb19b remove extra install crypto provider 2025-09-27 19:37:50 -07:00
mbecker20
a0540f7011 deploy 2.0.0-dev-13 2025-09-27 16:54:00 -07:00
mbecker20
37aea7605e gen types 2025-09-27 14:33:14 -07:00
mbecker20
78be913541 fix stuff after main rebase 2025-09-27 14:26:58 -07:00
mbecker20
c34f5ebf49 update config and compose envs 2025-09-27 14:23:49 -07:00
mbecker20
e5822cefb8 clean up socket handling 2025-09-27 14:23:49 -07:00
mbecker20
4baab194cf centralize the terminal stuff 2025-09-27 14:23:49 -07:00
mbecker20
a896583da6 deploy 2.0.0-dev-12 2025-09-27 14:23:49 -07:00
mbecker20
7b2674c38b deploy 2.0.0-dev-11 2025-09-27 14:23:42 -07:00
mbecker20
d1e32989e3 allow any number of simultaneous inbound / outbound connections (to different Cores) 2025-09-27 14:23:36 -07:00
mbecker20
e802bb3882 periphery support multiple core_public_keys 2025-09-27 14:23:36 -07:00
mbecker20
27a38b1bf5 periphery support multiple simultaneous core connections 2025-09-27 14:23:36 -07:00
mbecker20
2bc8a754be clean up passkey login 2025-09-27 14:23:36 -07:00
mbecker20
7a2a54bec1 dev-10 2025-09-27 14:23:36 -07:00
mbecker20
6a15150d59 don't cleanup server type builders 2025-09-27 14:23:31 -07:00
mbecker20
1b1dca76da deploy 2.0.0-dev-9 2025-09-27 14:23:31 -07:00
mbecker20
a032f0f4ff move system info to server cache 2025-09-27 14:23:25 -07:00
mbecker20
2749d49435 Core -> Periphery connection prefers noise handshake if 'core_public_key' is set 2025-09-27 14:23:25 -07:00
mbecker20
d88e42ef2d add specific server passkey support back 2025-09-27 14:23:25 -07:00
mbecker20
a370e7d121 support passkey auth for Core -> Periphery connection to remove the breaking change 2025-09-27 14:23:25 -07:00
mbecker20
d139ad2b3d always fallback to core config 'periphery_public_key' 2025-09-27 14:23:25 -07:00
mbecker20
8d2d180398 deploy 2.0.0-dev-8 2025-09-27 14:22:48 -07:00
mbecker20
37ca4ca986 fix server update hang 2025-09-27 14:22:42 -07:00
mbecker20
33e73b8543 use warn log 2025-09-27 14:22:42 -07:00
mbecker20
cf6e36e90c periphery server avoid auth fail log spam 2025-09-27 14:22:42 -07:00
mbecker20
9eb8b32f4a create and delete connections on demand 2025-09-27 14:22:42 -07:00
mbecker20
b400add6f1 deploy 2.0.0-dev-7 2025-09-27 14:22:41 -07:00
mbecker20
24adb89d25 execute container exec waits a bit for terminal to init before sending command 2025-09-27 14:22:36 -07:00
mbecker20
4674b2badb deploy 2.0.0-dev-6 2025-09-27 14:22:36 -07:00
mbecker20
65d1a69cb9 Mount ExecuteContainerExec periphery api 2025-09-27 14:22:27 -07:00
mbecker20
0da5718991 store connection channels under the connection 2025-09-27 14:22:27 -07:00
mbecker20
6b26cd120c simplify most of periphery client into bin/core 2025-09-27 14:22:27 -07:00
mbecker20
28e1bb19a4 deploy 2.0.0-dev-5 2025-09-27 14:22:27 -07:00
mbecker20
166107ac07 bail_if_not_connected 2025-09-27 14:22:21 -07:00
mbecker20
d77201880f dashboard Active include GlobalAutoUpdate 2025-09-27 14:22:21 -07:00
mbecker20
1d7629e9b2 Update server address description and placeholders 2025-09-27 14:22:21 -07:00
mbecker20
198f690ca5 Got invalid public key: {public_key} 2025-09-27 14:22:21 -07:00
mbecker20
531c79a144 deploy 2.0.0-dev-4 2025-09-27 14:22:21 -07:00
mbecker20
d685862713 improve Core - Periphery auth error messages 2025-09-27 14:22:09 -07:00
mbecker20
af0f245b5b deploy 2.0.0-dev-3 2025-09-27 14:22:09 -07:00
mbecker20
cba36861b7 deploy 2.0.0-dev-2 2025-09-27 14:22:02 -07:00
mbecker20
2c2c1d47b4 dev-2 2025-09-27 14:22:02 -07:00
mbecker20
3a6b997241 Json and JsonPretty formatting 2025-09-27 14:21:54 -07:00
mbecker20
7122f79b9d add -f json option to key utils (for use with jquery etc. 2025-09-27 14:21:54 -07:00
mbecker20
9bcee8122b tweak 2025-09-27 14:21:54 -07:00
mbecker20
a49c98946e add copy pubkeys 2025-09-27 14:21:54 -07:00
mbecker20
7d222a7241 dev-1 2025-09-27 14:21:54 -07:00
mbecker20
33501dac3e fix Core -> Periphery reconnection 2025-09-27 14:21:44 -07:00
mbecker20
4675dfa736 improve the logging 2025-09-27 14:21:44 -07:00
mbecker20
0be51dc784 move core connection handlers into core binary 2025-09-27 14:21:44 -07:00
mbecker20
52453d1320 set default allowed periphery public key 2025-09-27 14:21:44 -07:00
mbecker20
25da97ac1a basic configure auth 2025-09-27 14:21:44 -07:00
mbecker20
02db5a11d3 pipe through core side private / public key handling 2025-09-27 14:21:44 -07:00
mbecker20
89a5272246 rename passkey -> private_key 2025-09-27 14:21:44 -07:00
mbecker20
ae51ea1ad6 Copy core public key 2025-09-27 14:21:44 -07:00
mbecker20
3bdb4bea16 Core includes public key in CoreInfo 2025-09-27 14:21:44 -07:00
mbecker20
677bb14b5d auth forward error 2025-09-27 14:21:44 -07:00
mbecker20
6700700a80 clean up websocket handlers with many params 2025-09-27 14:21:44 -07:00
mbecker20
996d4aa129 standardize server size header identifier extraction 2025-09-27 14:21:44 -07:00
mbecker20
75894a7282 wire through private keys 2025-09-27 14:21:44 -07:00
mbecker20
2a065edcf1 avoid looping periphery client error 2025-09-27 14:21:44 -07:00
mbecker20
6f3703acfb periphery client makes more sense 2025-09-27 14:21:44 -07:00
mbecker20
59e989ecdf noise library and cli key utilities 2025-09-27 14:21:44 -07:00
mbecker20
951ff34a9e abstract websocket handling implementations on both sides 2025-09-27 14:21:12 -07:00
mbecker20
2d83105500 clean up 2025-09-27 14:21:12 -07:00
mbecker20
3d455f5142 implement noise auth basic 2025-09-27 14:21:12 -07:00
mbecker20
01de8c4a9b use standardized websocket wrappers / traits 2025-09-27 14:21:12 -07:00
mbecker20
d5de338561 outbound connection mode working 2025-09-27 14:21:12 -07:00
mbecker20
58c1afb8ef add login draft for transport 2025-09-27 14:21:12 -07:00
mbecker20
230f357b5a everything over ws working 2025-09-27 14:21:12 -07:00
mbecker20
991c95fff0 execute basically working, still need to clear the response channel upon completion 2025-09-27 14:21:12 -07:00
mbecker20
f6243fe6b1 more cleanup 2025-09-27 14:21:12 -07:00
mbecker20
9feeccba6e container terminal over connection 2025-09-27 14:21:12 -07:00
mbecker20
673c7f3a6b multiplex requests + terminal over single WS 2025-09-27 14:21:12 -07:00
mbecker20
39f900d651 standardize and consolidate logic in transport lib 2025-09-27 14:21:12 -07:00
mbecker20
8a06a0d6ce is work 2025-09-27 14:21:12 -07:00
mbecker20
7789ee4f4a prog 2025-09-27 14:21:12 -07:00
mbecker20
0472b6a7f7 fix after 1.19.4 2025-09-27 14:21:12 -07:00
mbecker20
d1d2227d36 prog 2025-09-27 14:21:11 -07:00
mbecker20
cea7c5fc5e prog on ws connect 2025-09-27 14:21:11 -07:00
220 changed files with 11694 additions and 4993 deletions

View File

@@ -3,8 +3,8 @@
"scope": "rust",
"prefix": "resolve",
"body": [
"impl Resolve<${1}, User> for State {",
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
"impl Resolve<${0}> for ${1} {",
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
"\t\ttodo!()",
"\t}",
"}"
@@ -15,9 +15,9 @@
"prefix": "static",
"body": [
"fn ${1}() -> &'static ${2} {",
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
"\t${3}.get_or_init(|| {",
"\t\t${0}",
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
"\t${0}.get_or_init(|| {",
"\t\ttodo!()",
"\t})",
"}"
]

926
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
version = "1.19.5"
version = "2.0.0-dev-36"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -26,21 +26,23 @@ environment_file = { path = "lib/environment_file" }
environment = { path = "lib/environment" }
interpolate = { path = "lib/interpolate" }
formatting = { path = "lib/formatting" }
transport = { path = "lib/transport" }
database = { path = "lib/database" }
response = { path = "lib/response" }
command = { path = "lib/command" }
config = { path = "lib/config" }
logger = { path = "lib/logger" }
cache = { path = "lib/cache" }
noise = { path = "lib/noise" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.5.1", default-features = false }
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
serror = { version = "0.5.3", default-features = false }
slack = { version = "1.1.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
async_timing_util = "1.0.0"
async_timing_util = "1.1.0"
partial_derive2 = "0.4.3"
derive_variants = "1.0.0"
mongo_indexed = "2.0.2"
@@ -60,63 +62,70 @@ futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.10.1", features = ["typed-header"] }
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.10.3", features = ["typed-header"] }
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
# SER/DE
ipnetwork = { version = "0.21.1", features = ["serde"] }
indexmap = { version = "2.11.1", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
indexmap = { version = "2.11.4", features = ["serde"] }
serde = { version = "1.0.227", features = ["derive"] }
strum = { version = "0.27.2", features = ["derive"] }
bson = { version = "2.15.0" } # must keep in sync with mongodb version
serde_yaml_ng = "0.10.0"
serde_json = "1.0.145"
serde_qs = "0.15.0"
toml = "0.9.5"
toml = "0.9.7"
url = "2.5.7"
# ERROR
anyhow = "1.0.99"
thiserror = "2.0.16"
anyhow = "1.0.100"
thiserror = "2.0.17"
# LOGGING
opentelemetry-otlp = { version = "0.30.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.30.0", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.20", features = ["json"] }
opentelemetry-semantic-conventions = "0.30.0"
tracing-opentelemetry = "0.31.0"
opentelemetry = "0.30.0"
opentelemetry-semantic-conventions = "0.31.0"
tracing-opentelemetry = "0.32.0"
opentelemetry = "0.31.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.47", features = ["derive"] }
clap = { version = "4.5.48", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "9.3.1", default-features = false }
jsonwebtoken = { version = "9.3.1", default-features = false } # locked back with octorust
rustls = { version = "0.23.32", features = ["aws-lc-rs"] }
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
openidconnect = "4.0.1"
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.17.1"
base64 = "0.22.1"
rustls = "0.23.31"
pkcs8 = "0.10.2"
snow = "0.10.0"
hmac = "0.12.1"
sha1 = "0.10.6"
sha2 = "0.10.9"
rand = "0.9.2"
hex = "0.4.3"
spki = "0.7.3"
der = "0.7.10"
# SYSTEM
portable-pty = "0.9.0"
bollard = "0.19.2"
sysinfo = "0.37.0"
bollard = "0.19.3"
sysinfo = "0.37.1"
# CLOUD
aws-config = "1.8.6"
aws-sdk-ec2 = "1.167.0"
aws-sdk-ec2 = "1.170.1"
aws-credential-types = "1.2.6"
## CRON
@@ -126,14 +135,14 @@ chrono = "0.4.42"
croner = "3.0.0"
# MISC
async-compression = { version = "0.4.30", features = ["tokio", "gzip"] }
async-compression = { version = "0.4.32", features = ["tokio", "gzip"] }
derive_builder = "0.20.2"
shell-escape = "0.1.5"
comfy-table = "7.2.1"
typeshare = "1.0.4"
octorust = "0.10.0"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.0.0"
regex = "1.11.2"
bytes = "1.10.1"
shell-escape = "0.1.5"
regex = "1.11.3"
bytes = "1.10.1"

2
action/build.ts Normal file
View File

@@ -0,0 +1,2 @@
import { run } from "./run.ts";
await run("build-komodo");

5
action/deno.json Normal file
View File

@@ -0,0 +1,5 @@
{
"imports": {
"@std/toml": "jsr:@std/toml"
}
}

2
action/deploy.ts Executable file
View File

@@ -0,0 +1,2 @@
import { run } from "./run.ts";
await run("deploy-komodo");

52
action/run.ts Normal file
View File

@@ -0,0 +1,52 @@
import * as TOML from "@std/toml";
export const run = async (action: string) => {
const branch = await new Deno.Command("bash", {
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
})
.output()
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
const prev_version = (
TOML.parse(cargo_toml_str) as {
workspace: { package: { version: string } };
}
).workspace.package.version;
const [version, tag, count] = prev_version.split("-");
const next_count = Number(count) + 1;
const next_version = `${version}-${tag}-${next_count}`;
await Deno.writeTextFile(
"Cargo.toml",
cargo_toml_str.replace(
`version = "${prev_version}"`,
`version = "${next_version}"`
)
);
// Cargo check first here to make sure lock file is updated before commit.
const cmd = `
cargo check
echo ""
git add --all
git commit --all --message "deploy ${version}-${tag}-${next_count}"
echo ""
git push
echo ""
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
`
.split("\n")
.map((line) => line.trim())
.filter((line) => line.length > 0 && !line.startsWith("//"))
.join(" && ");
new Deno.Command("bash", {
args: ["-c", cmd],
}).spawn();
};

View File

@@ -1,7 +1,7 @@
## Builds the Komodo Core, Periphery, and Util binaries
## for a specific architecture.
FROM rust:1.89.0-bullseye AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -27,6 +27,6 @@ COPY --from=builder /builder/target/release/core /core
COPY --from=builder /builder/target/release/periphery /periphery
COPY --from=builder /builder/target/release/km /km
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Binaries"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -3,7 +3,7 @@
## Uses chef for dependency caching to help speed up back-to-back builds.
FROM lukemathwalker/cargo-chef:latest-rust-1.89.0-bullseye AS chef
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
WORKDIR /builder
# Plan just the RECIPE to see if things have changed
@@ -31,6 +31,6 @@ COPY --from=builder /builder/target/release/core /core
COPY --from=builder /builder/target/release/periphery /periphery
COPY --from=builder /builder/target/release/km /km
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Binaries"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -19,6 +19,7 @@ komodo_client.workspace = true
database.workspace = true
config.workspace = true
logger.workspace = true
noise.workspace = true
# external
futures-util.workspace = true
comfy-table.workspace = true

View File

@@ -1,4 +1,4 @@
FROM rust:1.89.0-bullseye AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -20,6 +20,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "km" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -24,6 +24,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "km" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -13,6 +13,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
CMD [ "km" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo CLI"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -2,6 +2,7 @@ use std::path::Path;
use anyhow::Context;
use colored::Colorize;
use database::mungos::mongodb::bson::{Document, doc};
use komodo_client::entities::{
config::cli::args::database::DatabaseCommand, optional_string,
};
@@ -21,6 +22,7 @@ pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
DatabaseCommand::Copy { yes, index, .. } => {
copy(*index, *yes).await
}
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
}
}
@@ -318,3 +320,45 @@ async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
database::utils::copy(&source_db, &target_db).await
}
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
let config = cli_config();
println!(
"\n🦎 {} Database {} 🦎",
"Komodo".bold(),
"V1 Downgrade".purple().bold()
);
println!(
"\n{}\n",
" - Downgrade the database to V1 compatible data structures."
.dimmed()
);
if let Some(uri) = optional_string(&config.database.uri) {
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
}
if let Some(address) = optional_string(&config.database.address) {
println!("{}: {address}", " - Address".dimmed());
}
if let Some(username) = optional_string(&config.database.username) {
println!("{}: {username}", " - Username".dimmed());
}
println!(
"{}: {}\n",
" - Db Name".dimmed(),
config.database.db_name,
);
crate::command::wait_for_enter("run downgrade", yes)?;
let db = database::init(&config.database).await?;
db.collection::<Document>("Server")
.update_many(doc! {}, doc! { "$set": { "info": null } })
.await
.context("Failed to downgrade Server schema")?;
info!("V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅");
Ok(())
}

View File

@@ -230,6 +230,9 @@ pub async fn handle(
Execution::GlobalAutoUpdate(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RotateAllServerKeys(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::Sleep(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -494,6 +497,10 @@ pub async fn handle(
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::RotateAllServerKeys(request) => client
.execute(request)
.await
.map(|u| ExecutionResult::Single(u.into())),
Execution::Sleep(request) => {
let duration =
Duration::from_millis(request.duration_ms as u64);

View File

@@ -794,7 +794,7 @@ impl PrintTable for ResourceListItem<ServerListItemInfo> {
Cell::new(self.info.state.to_string())
.fg(color)
.add_attribute(Attribute::Bold),
Cell::new(self.info.address),
Cell::new(self.info.address.as_deref().unwrap_or("inbound")),
Cell::new(self.tags.join(", ")),
];
if links {

View File

@@ -41,6 +41,12 @@ async fn app() -> anyhow::Result<()> {
}
Ok(())
}
args::Command::Key { command } => {
noise::key::command::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await
}
args::Command::Container(container) => {
command::container::handle(container).await
}
@@ -54,9 +60,6 @@ async fn app() -> anyhow::Result<()> {
args::Command::Update { command } => {
command::update::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await
}
}
}

View File

@@ -20,12 +20,14 @@ periphery_client.workspace = true
environment_file.workspace = true
interpolate.workspace = true
formatting.workspace = true
transport.workspace = true
database.workspace = true
response.workspace = true
command.workspace = true
config.workspace = true
logger.workspace = true
cache.workspace = true
noise.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
@@ -38,7 +40,6 @@ slack.workspace = true
svi.workspace = true
# external
aws-credential-types.workspace = true
tokio-tungstenite.workspace = true
english-to-cron.workspace = true
openidconnect.workspace = true
jsonwebtoken.workspace = true
@@ -70,6 +71,7 @@ chrono.workspace = true
bcrypt.workspace = true
base64.workspace = true
rustls.workspace = true
bytes.workspace = true
tokio.workspace = true
serde.workspace = true
regex.workspace = true
@@ -81,3 +83,4 @@ rand.workspace = true
hmac.workspace = true
sha2.workspace = true
hex.workspace = true
url.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.89.0-bullseye AS core-builder
FROM rust:1.90.0-bullseye AS core-builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -58,6 +58,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -54,6 +54,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -43,6 +43,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -1,4 +1,3 @@
use ::slack::types::Block;
use anyhow::{Context, anyhow};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use derive_variants::ExtractVariant;

View File

@@ -1,3 +1,5 @@
use ::slack::types::OwnedBlock as Block;
use super::*;
#[instrument(level = "debug")]
@@ -477,17 +479,20 @@ pub async fn send_alert(
interpolator.interpolate_string(&mut url_interpolated)?;
let slack = ::slack::Client::new(url_interpolated);
slack.send_message(text, blocks).await.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})?;
slack
.send_owned_message_single(&text, blocks.as_deref())
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})?;
}
Ok(())
}

View File

@@ -249,7 +249,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name, true).await
@@ -298,7 +298,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name, true).await
},
@@ -344,7 +344,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.

View File

@@ -203,7 +203,8 @@ impl Resolve<ExecuteArgs> for Deploy {
update.version = version;
update_update(update.clone()).await?;
match periphery_client(&server)?
match periphery_client(&server)
.await?
.request(api::container::Deploy {
deployment,
stop_signal: self.stop_signal,
@@ -331,8 +332,9 @@ pub async fn pull_deployment_inner(
}
let res = async {
let log = match periphery_client(server)?
.request(api::image::PullImage {
let log = match periphery_client(server)
.await?
.request(api::docker::PullImage {
name: image,
account,
token,
@@ -414,7 +416,8 @@ impl Resolve<ExecuteArgs> for StartDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::StartContainer {
name: deployment.name,
})
@@ -461,7 +464,8 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::RestartContainer {
name: deployment.name,
})
@@ -510,7 +514,8 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::PauseContainer {
name: deployment.name,
})
@@ -557,7 +562,8 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::UnpauseContainer {
name: deployment.name,
})
@@ -606,7 +612,8 @@ impl Resolve<ExecuteArgs> for StopDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::StopContainer {
name: deployment.name,
signal: self
@@ -688,7 +695,8 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::container::RemoveContainer {
name: deployment.name,
signal: self

View File

@@ -1,12 +1,14 @@
use std::sync::OnceLock;
use std::{fmt::Write as _, sync::OnceLock};
use anyhow::{Context, anyhow};
use command::run_komodo_command;
use database::mungos::{find::find_collect, mongodb::bson::doc};
use formatting::{bold, format_serror};
use futures::StreamExt;
use komodo_client::{
api::execute::{
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
RotateAllServerKeys,
},
entities::{
deployment::DeploymentState, server::ServerState,
@@ -24,6 +26,7 @@ use crate::{
},
config::core_config,
helpers::update::update_update,
resource::rotate_server_keys,
state::{
db_client, deployment_status_cache, server_status_cache,
stack_status_cache,
@@ -317,3 +320,98 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
Ok(update)
}
}
//
/// Makes sure the method can only be called once at a time
fn global_rotate_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(Default::default)
}
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
anyhow!("This method is admin only.")
.status_code(StatusCode::FORBIDDEN),
);
}
let _lock = global_rotate_lock()
.try_lock()
.context("Rotate All Server Keys already in progress...")?;
let mut update = update.clone();
update_update(update.clone()).await?;
let mut servers = db_client()
.servers
.find(doc! { "config.enabled": true })
.await
.context("Failed to query servers from database")?;
let server_status_cache = server_status_cache();
let mut log = String::new();
while let Some(server) = servers.next().await {
let server = match server {
Ok(server) => server,
Err(e) => {
warn!("Failed to parse Server | {e:#}");
continue;
}
};
let Some(status) = server_status_cache.get(&server.id).await
else {
let _ = write!(
&mut log,
"\nSkipping {}: No Status ⚠️",
bold(&server.name)
);
continue;
};
if !matches!(status.state, ServerState::Ok) {
let _ = write!(
&mut log,
"\nSkipping {}: {} ⚠️",
bold(&server.name),
status.state
);
continue;
}
match rotate_server_keys(&server).await {
Ok(_) => {
let _ = write!(
&mut log,
"\nRotated keys for {} ✅",
bold(&server.name)
);
}
Err(e) => {
update.push_error_log(
"Key Rotation Failure",
format_serror(
&e.context(format!(
"Failed to rotate {} keys",
bold(&server.name)
))
.into(),
),
);
}
}
}
update.push_simple_log("Rotate Server Keys", log);
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -149,6 +149,7 @@ pub enum ExecuteRequest {
ClearRepoCache(ClearRepoCache),
BackupCoreDatabase(BackupCoreDatabase),
GlobalAutoUpdate(GlobalAutoUpdate),
RotateAllServerKeys(RotateAllServerKeys),
}
pub fn router() -> Router {

View File

@@ -105,7 +105,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
@@ -220,7 +220,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
@@ -463,7 +463,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_builder_early_return(update, repo.id, repo.name, true).await
@@ -510,7 +510,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.

View File

@@ -50,7 +50,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::StartContainer {
@@ -104,7 +104,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::RestartContainer {
@@ -160,7 +160,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::PauseContainer {
@@ -214,7 +214,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::UnpauseContainer {
@@ -270,7 +270,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::StopContainer {
@@ -332,7 +332,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::RemoveContainer {
@@ -387,7 +387,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::StartAllContainers {})
.await
.context("failed to start all containers on host")?;
@@ -437,7 +438,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::RestartAllContainers {})
.await
.context("failed to restart all containers on host")?;
@@ -489,7 +491,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::PauseAllContainers {})
.await
.context("failed to pause all containers on host")?;
@@ -539,7 +542,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::UnpauseAllContainers {})
.await
.context("failed to unpause all containers on host")?;
@@ -591,7 +595,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
let logs = periphery_client(&server)
.await?
.request(api::container::StopAllContainers {})
.await
.context("failed to stop all containers on host")?;
@@ -641,7 +646,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::container::PruneContainers {})
@@ -686,10 +691,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::network::DeleteNetwork {
.request(api::docker::DeleteNetwork {
name: self.name.clone(),
})
.await
@@ -748,10 +753,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::network::PruneNetworks {})
.request(api::docker::PruneNetworks {})
.await
.context(format!(
"failed to prune networks on server {}",
@@ -791,10 +796,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::image::DeleteImage {
.request(api::docker::DeleteImage {
name: self.name.clone(),
})
.await
@@ -850,10 +855,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::image::PruneImages {}).await {
match periphery.request(api::docker::PruneImages {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune images",
@@ -891,10 +896,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery
.request(api::volume::DeleteVolume {
.request(api::docker::DeleteVolume {
name: self.name.clone(),
})
.await
@@ -953,10 +958,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::volume::PruneVolumes {}).await {
match periphery.request(api::docker::PruneVolumes {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune volumes",
@@ -1005,7 +1010,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::build::PruneBuilders {}).await {
@@ -1057,7 +1062,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log =
match periphery.request(api::build::PruneBuildx {}).await {
@@ -1109,7 +1114,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let log = match periphery.request(api::PruneSystem {}).await {
Ok(log) => log,

View File

@@ -155,7 +155,8 @@ impl Resolve<ExecuteArgs> for DeployStack {
compose_config,
commit_hash,
commit_message,
} = periphery_client(&server)?
} = periphery_client(&server)
.await?
.request(ComposeUp {
stack: stack.clone(),
services: self.services,
@@ -749,7 +750,8 @@ pub async fn pull_stack_inner(
Default::default()
};
let res = periphery_client(server)?
let res = periphery_client(server)
.await?
.request(ComposePull {
stack,
services,
@@ -1022,7 +1024,8 @@ impl Resolve<ExecuteArgs> for RunStackService {
Default::default()
};
let log = periphery_client(&server)?
let log = periphery_client(&server)
.await?
.request(ComposeRun {
stack,
repo,

View File

@@ -145,7 +145,8 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLog {
name,
tail: cmp::min(tail, MAX_LOG_LENGTH),
@@ -183,7 +184,8 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
return Ok(Log::default());
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerLogSearch {
name,
terms,
@@ -234,7 +236,8 @@ impl Resolve<ReadArgs> for InspectDeploymentContainer {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer { name })
.await?;
Ok(res)
@@ -262,7 +265,8 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
);
}
let server = resource::get::<Server>(&server_id).await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(api::container::GetContainerStats { name })
.await
.context("failed to get stats from periphery")?;
@@ -321,7 +325,9 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
res.not_deployed += 1;
}
DeploymentState::Unknown => {
res.unknown += 1;
if !deployment.template {
res.unknown += 1;
}
}
_ => {
res.unhealthy += 1;

View File

@@ -27,7 +27,9 @@ use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request, config::core_config, helpers::periphery_client,
auth::auth_request,
config::{core_config, core_public_key},
helpers::periphery_client,
resource,
};
@@ -39,6 +41,7 @@ mod alerter;
mod build;
mod builder;
mod deployment;
mod onboarding_key;
mod permission;
mod procedure;
mod provider;
@@ -106,27 +109,29 @@ enum ReadRequest {
GetServersSummary(GetServersSummary),
GetServer(GetServer),
GetServerState(GetServerState),
GetPeripheryVersion(GetPeripheryVersion),
GetPeripheryInformation(GetPeripheryInformation),
GetServerActionState(GetServerActionState),
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
ListTerminals(ListTerminals),
// ==== DOCKER ====
GetDockerContainersSummary(GetDockerContainersSummary),
ListAllDockerContainers(ListAllDockerContainers),
ListDockerContainers(ListDockerContainers),
InspectDockerContainer(InspectDockerContainer),
GetResourceMatchingContainer(GetResourceMatchingContainer),
GetContainerLog(GetContainerLog),
SearchContainerLog(SearchContainerLog),
ListComposeProjects(ListComposeProjects),
ListDockerNetworks(ListDockerNetworks),
InspectDockerNetwork(InspectDockerNetwork),
ListDockerImages(ListDockerImages),
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
GetDockerContainersSummary(GetDockerContainersSummary),
ListAllDockerContainers(ListAllDockerContainers),
ListDockerContainers(ListDockerContainers),
ListDockerNetworks(ListDockerNetworks),
ListDockerImages(ListDockerImages),
ListDockerVolumes(ListDockerVolumes),
ListComposeProjects(ListComposeProjects),
ListTerminals(ListTerminals),
InspectDockerVolume(InspectDockerVolume),
// ==== SERVER STATS ====
GetSystemInformation(GetSystemInformation),
@@ -224,6 +229,9 @@ enum ReadRequest {
ListGitProviderAccounts(ListGitProviderAccounts),
GetDockerRegistryAccount(GetDockerRegistryAccount),
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
// ==== ONBOARDING KEY ====
ListOnboardingKeys(ListOnboardingKeys),
}
pub fn router() -> Router {
@@ -298,6 +306,7 @@ fn core_info() -> &'static GetCoreInfoResponse {
.map(|i| i.namespace.to_string())
.collect(),
timezone: config.timezone.clone(),
public_key: core_public_key().to_string(),
}
})
}
@@ -343,7 +352,8 @@ impl Resolve<ReadArgs> for ListSecrets {
};
if let Some(id) = server_id {
let server = resource::get::<Server>(&id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListSecrets {})
.await
.with_context(|| {
@@ -515,7 +525,8 @@ async fn merge_git_providers_for_server(
server_id: &str,
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListGitProviders {})
.await
.with_context(|| {
@@ -553,7 +564,8 @@ async fn merge_docker_registries_for_server(
server_id: &str,
) -> serror::Result<()> {
let server = resource::get::<Server>(server_id).await?;
let more = periphery_client(&server)?
let more = periphery_client(&server)
.await?
.request(periphery_client::api::ListDockerRegistries {})
.await
.with_context(|| {

View File

@@ -0,0 +1,30 @@
use anyhow::{Context, anyhow};
use database::mungos::find::find_collect;
use komodo_client::api::read::{
ListOnboardingKeys, ListOnboardingKeysResponse,
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{api::read::ReadArgs, state::db_client};
//
impl Resolve<ReadArgs> for ListOnboardingKeys {
async fn resolve(
self,
ReadArgs { user: admin }: &ReadArgs,
) -> serror::Result<ListOnboardingKeysResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
find_collect(&db_client().onboarding_keys, None, None)
.await
.context("Failed to query database for Server onboarding keys")
.map_err(Into::into)
}
}

View File

@@ -142,7 +142,11 @@ impl Resolve<ReadArgs> for GetReposSummary {
}
(RepoState::Ok, _) => res.ok += 1,
(RepoState::Failed, _) => res.failed += 1,
(RepoState::Unknown, _) => res.unknown += 1,
(RepoState::Unknown, _) => {
if !repo.template {
res.unknown += 1
}
}
// will never come off the cache in the building state, since that comes from action states
(RepoState::Cloning, _)
| (RepoState::Pulling, _)

View File

@@ -39,18 +39,17 @@ use komodo_client::{
use periphery_client::api::{
self as periphery,
container::InspectContainer,
image::{ImageHistory, InspectImage},
network::InspectNetwork,
volume::InspectVolume,
docker::{
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use tokio::sync::Mutex;
use crate::{
helpers::{
periphery_client,
query::{get_all_tags, get_system_info},
},
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource,
stack::compose_container_match_regex,
@@ -80,11 +79,8 @@ impl Resolve<ReadArgs> for GetServersSummary {
match server.info.state {
ServerState::Ok => {
// Check for version mismatch
let has_version_mismatch = !server.info.version.is_empty()
&& server.info.version != "Unknown"
&& server.info.version != core_version;
if has_version_mismatch {
if matches!(&server.info.version, Some(version) if version != core_version)
{
res.warning += 1;
} else {
res.healthy += 1;
@@ -94,7 +90,9 @@ impl Resolve<ReadArgs> for GetServersSummary {
res.unhealthy += 1;
}
ServerState::Disabled => {
res.disabled += 1;
if !server.template {
res.disabled += 1;
}
}
}
}
@@ -102,26 +100,6 @@ impl Resolve<ReadArgs> for GetServersSummary {
}
}
impl Resolve<ReadArgs> for GetPeripheryVersion {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPeripheryVersionResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
)
.await?;
let version = server_status_cache()
.get(&server.id)
.await
.map(|s| s.version.clone())
.unwrap_or(String::from("unknown"));
Ok(GetPeripheryVersionResponse { version })
}
}
impl Resolve<ReadArgs> for GetServer {
async fn resolve(
self,
@@ -225,6 +203,29 @@ impl Resolve<ReadArgs> for GetServerActionState {
}
}
impl Resolve<ReadArgs> for GetPeripheryInformation {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPeripheryInformationResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
)
.await?;
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.periphery_info
.as_ref()
.cloned()
.context("Server status missing Periphery Info. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
impl Resolve<ReadArgs> for GetSystemInformation {
async fn resolve(
self,
@@ -235,8 +236,17 @@ impl Resolve<ReadArgs> for GetSystemInformation {
user,
PermissionLevel::Read.into(),
)
.await?;
get_system_info(&server).await.map_err(Into::into)
.await
.status_code(StatusCode::BAD_REQUEST)?;
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.system_info
.as_ref()
.cloned()
.context("Server status missing system Info. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
@@ -251,15 +261,15 @@ impl Resolve<ReadArgs> for GetSystemStats {
PermissionLevel::Read.into(),
)
.await?;
let status =
server_status_cache().get(&server.id).await.with_context(
|| format!("did not find status for server at {}", server.id),
)?;
let stats = status
.stats
server_status_cache()
.get(&server.id)
.await
.context("Missing server status")?
.system_stats
.as_ref()
.context("server stats not available")?;
Ok(stats.clone())
.cloned()
.context("Server status missing system stats. The Server may be disconnected.")
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
}
}
@@ -289,7 +299,8 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
cached.0.clone()
}
_ => {
let stats = periphery_client(&server)?
let stats = periphery_client(&server)
.await?
.request(periphery::stats::GetSystemProcesses {})
.await?;
lock.insert(
@@ -478,7 +489,8 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer {
name: self.container,
})
@@ -506,7 +518,8 @@ impl Resolve<ReadArgs> for GetContainerLog {
PermissionLevel::Read.logs(),
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(periphery::container::GetContainerLog {
name: container,
tail: cmp::min(tail, MAX_LOG_LENGTH),
@@ -537,7 +550,8 @@ impl Resolve<ReadArgs> for SearchContainerLog {
PermissionLevel::Read.logs(),
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(periphery::container::GetContainerLogSearch {
name: container,
terms,
@@ -657,7 +671,8 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectNetwork { name: self.network })
.await?;
Ok(res)
@@ -706,7 +721,8 @@ impl Resolve<ReadArgs> for InspectDockerImage {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectImage { name: self.image })
.await?;
Ok(res)
@@ -736,7 +752,8 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(ImageHistory { name: self.image })
.await?;
Ok(res)
@@ -785,7 +802,8 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
.into(),
);
}
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectVolume { name: self.volume })
.await?;
Ok(res)
@@ -865,7 +883,8 @@ impl Resolve<ReadArgs> for ListTerminals {
let cache = terminals_cache().get_or_insert(server.id.clone());
let mut cache = cache.lock().await;
if self.fresh || komodo_timestamp() > cache.ttl {
cache.list = periphery_client(&server)?
cache.list = periphery_client(&server)
.await?
.request(periphery_client::api::terminal::ListTerminals {})
.await
.context("Failed to get fresh terminal list")?;

View File

@@ -89,7 +89,8 @@ impl Resolve<ReadArgs> for GetStackLog {
true,
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(GetComposeLog {
project: stack.project_name(false),
services,
@@ -122,7 +123,8 @@ impl Resolve<ReadArgs> for SearchStackLog {
true,
)
.await?;
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(GetComposeLogSearch {
project: stack.project_name(false),
services,
@@ -184,7 +186,8 @@ impl Resolve<ReadArgs> for InspectStackContainer {
"No service found matching '{service}'. Was the stack last deployed manually?"
).into());
};
let res = periphery_client(&server)?
let res = periphery_client(&server)
.await?
.request(InspectContainer { name })
.await?;
Ok(res)
@@ -363,7 +366,11 @@ impl Resolve<ReadArgs> for GetStacksSummary {
StackState::Running => res.running += 1,
StackState::Stopped | StackState::Paused => res.stopped += 1,
StackState::Down => res.down += 1,
StackState::Unknown => res.unknown += 1,
StackState::Unknown => {
if !stack.template {
res.unknown += 1
}
}
_ => res.unhealthy += 1,
}
}

View File

@@ -54,34 +54,20 @@ async fn execute_terminal_inner(
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute request | user: {}", user.username);
let res = async {
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
let stream = periphery_client(&server)
.await?
.execute_terminal(terminal, command)
.await
.context("Failed to execute command on periphery")?;
let stream = periphery
.execute_terminal(terminal, command)
.await
.context("Failed to execute command on periphery")?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!("/terminal/execute request {req_id} error: {e:#}");
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
Ok(axum::body::Body::from_stream(stream))
}
// ======================
@@ -112,43 +98,25 @@ async fn execute_container_exec_inner(
}: ExecuteContainerExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!(
"/terminal/execute/container request | user: {}",
user.username
);
info!("ExecuteContainerExec request | user: {}", user.username);
let res = async {
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!(
"/terminal/execute/container request {req_id} error: {e:#}"
);
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
Ok(axum::body::Body::from_stream(stream))
}
// =======================
@@ -178,45 +146,27 @@ async fn execute_deployment_exec_inner(
}: ExecuteDeploymentExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!(
"/terminal/execute/deployment request | user: {}",
user.username
);
info!("ExecuteDeploymentExec request | user: {}", user.username);
let res = async {
let deployment = get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let deployment = get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&deployment.config.server_id).await?;
let server = get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(deployment.name, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
let stream = periphery
.execute_container_exec(deployment.name, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!(
"/terminal/execute/deployment request {req_id} error: {e:#}"
);
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
Ok(axum::body::Body::from_stream(stream))
}
// ==================
@@ -247,53 +197,40 @@ async fn execute_stack_exec_inner(
}: ExecuteStackExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute/stack request | user: {}", user.username);
info!("ExecuteStackExec request | user: {}", user.username);
let res = async {
let stack = get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let stack = get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&stack.config.server_id).await?;
let server = get::<Server>(&stack.config.server_id).await?;
let container = stack_status_cache()
.get(&stack.id)
.await
.context("could not get stack status")?
.curr
.services
.iter()
.find(|s| s.service == service)
.context("could not find service")?
.container
.as_ref()
.context("could not find service container")?
.name
.clone();
let container = stack_status_cache()
.get(&stack.id)
.await
.context("could not get stack status")?
.curr
.services
.iter()
.find(|s| s.service == service)
.context("could not find service")?
.container
.as_ref()
.context("could not find service container")?
.name
.clone();
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
let stream = periphery
.execute_container_exec(container, shell, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!("/terminal/execute/stack request {req_id} error: {e:#}");
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
Ok(axum::body::Body::from_stream(stream))
}

View File

@@ -16,7 +16,8 @@ impl Resolve<WriteArgs> for CreateAction {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Action> {
resource::create::<Action>(&self.name, self.config, user).await
resource::create::<Action>(&self.name, self.config, None, user)
.await
}
}
@@ -32,7 +33,8 @@ impl Resolve<WriteArgs> for CopyAction {
PermissionLevel::Write.into(),
)
.await?;
resource::create::<Action>(&self.name, config.into(), user).await
resource::create::<Action>(&self.name, config.into(), None, user)
.await
}
}

View File

@@ -16,7 +16,8 @@ impl Resolve<WriteArgs> for CreateAlerter {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Alerter> {
resource::create::<Alerter>(&self.name, self.config, user).await
resource::create::<Alerter>(&self.name, self.config, None, user)
.await
}
}
@@ -32,7 +33,8 @@ impl Resolve<WriteArgs> for CopyAlerter {
PermissionLevel::Write.into(),
)
.await?;
resource::create::<Alerter>(&self.name, config.into(), user).await
resource::create::<Alerter>(&self.name, config.into(), None, user)
.await
}
}

View File

@@ -1,8 +1,10 @@
use std::{path::PathBuf, str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use database::mongo_indexed::doc;
use database::mungos::mongodb::bson::to_document;
use database::{
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
};
use formatting::format_serror;
use komodo_client::{
api::write::*,
@@ -21,22 +23,21 @@ use komodo_client::{
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::{
PeripheryClient,
api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
},
use periphery_client::api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
config::core_config,
connection::PeripheryConnectionArgs,
helpers::{
git_token, periphery_client,
query::get_server_with_state,
update::{add_update, make_update},
},
periphery::PeripheryClient,
permission::get_check_permissions,
resource,
state::{db_client, github_client},
@@ -50,7 +51,8 @@ impl Resolve<WriteArgs> for CreateBuild {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Build> {
resource::create::<Build>(&self.name, self.config, user).await
resource::create::<Build>(&self.name, self.config, None, user)
.await
}
}
@@ -68,7 +70,8 @@ impl Resolve<WriteArgs> for CopyBuild {
.await?;
// reset version to 0.0.0
config.version = Default::default();
resource::create::<Build>(&self.name, config.into(), user).await
resource::create::<Build>(&self.name, config.into(), None, user)
.await
}
}
@@ -428,13 +431,27 @@ async fn get_on_host_periphery(
Err(anyhow!("Files on host doesn't work with AWS builder"))
}
BuilderConfig::Url(config) => {
// TODO: Ensure connection is actually established.
// Builder id no good because it may be active for multiple connections.
let periphery = PeripheryClient::new(
config.address,
config.passkey,
Duration::from_secs(3),
);
periphery.health_check().await?;
Ok(periphery)
PeripheryConnectionArgs::from_url_builder(
&ObjectId::new().to_hex(),
&config,
),
config.insecure_tls,
&config.passkey,
)
.await?;
// Poll for connection to be estalished
let mut err = None;
for _ in 0..10 {
tokio::time::sleep(Duration::from_secs(1)).await;
match periphery.health_check().await {
Ok(_) => return Ok(periphery),
Err(e) => err = Some(e),
};
}
Err(err.context("Missing error")?)
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
@@ -449,7 +466,7 @@ async fn get_on_host_periphery(
"Builder server is disabled or not reachable"
));
};
periphery_client(&server)
periphery_client(&server).await
}
}
}

View File

@@ -16,7 +16,8 @@ impl Resolve<WriteArgs> for CreateBuilder {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Builder> {
resource::create::<Builder>(&self.name, self.config, user).await
resource::create::<Builder>(&self.name, self.config, None, user)
.await
}
}
@@ -32,7 +33,8 @@ impl Resolve<WriteArgs> for CopyBuilder {
PermissionLevel::Write.into(),
)
.await?;
resource::create::<Builder>(&self.name, config.into(), user).await
resource::create::<Builder>(&self.name, config.into(), None, user)
.await
}
}

View File

@@ -38,8 +38,13 @@ impl Resolve<WriteArgs> for CreateDeployment {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
resource::create::<Deployment>(&self.name, self.config, user)
.await
resource::create::<Deployment>(
&self.name,
self.config,
None,
user,
)
.await
}
}
@@ -56,8 +61,13 @@ impl Resolve<WriteArgs> for CopyDeployment {
PermissionLevel::Read.into(),
)
.await?;
resource::create::<Deployment>(&self.name, config.into(), user)
.await
resource::create::<Deployment>(
&self.name,
config.into(),
None,
user,
)
.await
}
}
@@ -85,7 +95,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
.into(),
);
}
let container = periphery_client(&server)?
let container = periphery_client(&server)
.await?
.request(InspectContainer {
name: self.name.clone(),
})
@@ -149,7 +160,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
});
}
resource::create::<Deployment>(&self.name, config, user).await
resource::create::<Deployment>(&self.name, config, None, user)
.await
}
}
@@ -231,7 +243,8 @@ impl Resolve<WriteArgs> for RenameDeployment {
if container_state != DeploymentState::NotDeployed {
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
let log = periphery_client(&server)?
let log = periphery_client(&server)
.await?
.request(api::container::RenameContainer {
curr_name: deployment.name.clone(),
new_name: name.clone(),

View File

@@ -23,6 +23,7 @@ mod alerter;
mod build;
mod builder;
mod deployment;
mod onboarding_key;
mod permissions;
mod procedure;
mod provider;
@@ -91,6 +92,8 @@ pub enum WriteRequest {
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
UpdateServerPublicKey(UpdateServerPublicKey),
RotateServerKeys(RotateServerKeys),
// ==== STACK ====
CreateStack(CreateStack),
@@ -185,13 +188,18 @@ pub enum WriteRequest {
UpdateVariableIsSecret(UpdateVariableIsSecret),
DeleteVariable(DeleteVariable),
// ==== PROVIDERS ====
// ==== PROVIDER ====
CreateGitProviderAccount(CreateGitProviderAccount),
UpdateGitProviderAccount(UpdateGitProviderAccount),
DeleteGitProviderAccount(DeleteGitProviderAccount),
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
// ==== ONBOARDING KEY ====
CreateOnboardingKey(CreateOnboardingKey),
UpdateOnboardingKey(UpdateOnboardingKey),
DeleteOnboardingKey(DeleteOnboardingKey),
}
pub fn router() -> Router {

View File

@@ -0,0 +1,168 @@
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::{Document, doc};
use komodo_client::{
api::write::{
CreateOnboardingKey, CreateOnboardingKeyResponse,
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
},
entities::{komodo_timestamp, onboarding_key::OnboardingKey},
};
use noise::key::EncodedKeyPair;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::{AddStatusCode, AddStatusCodeError};
use crate::{api::write::WriteArgs, state::db_client};
//
impl Resolve<WriteArgs> for CreateOnboardingKey {
#[instrument(name = "CreateServerOnboardingKey", skip(self, admin))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<CreateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let keys = if let Some(private_key) = self.private_key {
EncodedKeyPair::from_private_key(&private_key)?
} else {
EncodedKeyPair::generate()?
};
let onboarding_key = OnboardingKey {
public_key: keys.public.into_inner(),
name: self.name,
enabled: true,
onboarded: Default::default(),
created_at: komodo_timestamp(),
expires: self.expires,
tags: self.tags,
copy_server: self.copy_server,
create_builder: self.create_builder,
};
let db = db_client();
// Create the key
db.onboarding_keys
.insert_one(&onboarding_key)
.await
.context(
"Failed to create Server onboarding key on database",
)?;
let created = db
.onboarding_keys
.find_one(doc! { "public_key": &onboarding_key.public_key })
.await
.context("Failed to query database for Server onboarding keys")?
.context(
"No Server onboarding key found on database after create",
)?;
Ok(CreateOnboardingKeyResponse {
private_key: keys.private.into_inner(),
created,
})
}
}
//
impl Resolve<WriteArgs> for UpdateOnboardingKey {
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UpdateOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let query = doc! { "public_key": &self.public_key };
// No changes
if self.is_none() {
return db_client()
.onboarding_keys
.find_one(query)
.await
.context("Failed to query database for onboarding key")?
.context("No matching onboarding key found")
.status_code(StatusCode::NOT_FOUND);
}
let mut update = Document::new();
if let Some(enabled) = self.enabled {
update.insert("enabled", enabled);
}
if let Some(name) = self.name {
update.insert("name", name);
}
if let Some(expires) = self.expires {
update.insert("expires", expires);
}
if let Some(tags) = self.tags {
update.insert("tags", tags);
}
if let Some(copy_server) = self.copy_server {
update.insert("copy_server", copy_server);
}
if let Some(create_builder) = self.create_builder {
update.insert("create_builder", create_builder);
}
db_client()
.onboarding_keys
.update_one(query.clone(), doc! { "$set": update })
.await
.context("Failed to update onboarding key on database")?;
db_client()
.onboarding_keys
.find_one(query)
.await
.context("Failed to query database for onboarding key")?
.context("No matching onboarding key found")
.status_code(StatusCode::NOT_FOUND)
}
}
//
impl Resolve<WriteArgs> for DeleteOnboardingKey {
#[instrument(name = "DeleteServerOnboardingKey", skip(admin))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<DeleteOnboardingKeyResponse> {
if !admin.admin {
return Err(
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
let db = db_client();
let query = doc! { "public_key": &self.public_key };
let creation_key = db
.onboarding_keys
.find_one(query.clone())
.await
.context("Failed to query database for Server onboarding keys")?
.context("Server onboarding key matching provided public key not found")
.status_code(StatusCode::NOT_FOUND)?;
db.onboarding_keys.delete_one(query).await.context(
"Failed to delete Server onboarding key from database",
)?;
Ok(creation_key)
}
}

View File

@@ -16,7 +16,8 @@ impl Resolve<WriteArgs> for CreateProcedure {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<CreateProcedureResponse> {
resource::create::<Procedure>(&self.name, self.config, user).await
resource::create::<Procedure>(&self.name, self.config, None, user)
.await
}
}
@@ -33,8 +34,13 @@ impl Resolve<WriteArgs> for CopyProcedure {
PermissionLevel::Write.into(),
)
.await?;
resource::create::<Procedure>(&self.name, config.into(), user)
.await
resource::create::<Procedure>(
&self.name,
config.into(),
None,
user,
)
.await
}
}

View File

@@ -42,7 +42,8 @@ impl Resolve<WriteArgs> for CreateRepo {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Repo> {
resource::create::<Repo>(&self.name, self.config, user).await
resource::create::<Repo>(&self.name, self.config, None, user)
.await
}
}
@@ -58,7 +59,8 @@ impl Resolve<WriteArgs> for CopyRepo {
PermissionLevel::Read.into(),
)
.await?;
resource::create::<Repo>(&self.name, config.into(), user).await
resource::create::<Repo>(&self.name, config.into(), None, user)
.await
}
}
@@ -127,7 +129,8 @@ impl Resolve<WriteArgs> for RenameRepo {
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let log = match periphery_client(&server)?
let log = match periphery_client(&server)
.await?
.request(api::git::RenameRepo {
curr_name: to_path_compatible_name(&repo.name),
new_name: name.clone(),

View File

@@ -1,11 +1,11 @@
use anyhow::Context;
use formatting::format_serror;
use formatting::{bold, format_serror};
use komodo_client::{
api::write::*,
entities::{
NoData, Operation,
permission::PermissionLevel,
server::Server,
server::{Server, ServerInfo},
to_docker_compatible_name,
update::{Update, UpdateStatus},
},
@@ -19,7 +19,7 @@ use crate::{
update::{add_update, make_update, update_update},
},
permission::get_check_permissions,
resource,
resource::{self, update_server_public_key},
};
use super::WriteArgs;
@@ -30,7 +30,16 @@ impl Resolve<WriteArgs> for CreateServer {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Server> {
resource::create::<Server>(&self.name, self.config, user).await
resource::create::<Server>(
&self.name,
self.config,
self.public_key.map(|public_key| ServerInfo {
public_key,
..Default::default()
}),
user,
)
.await
}
}
@@ -47,7 +56,16 @@ impl Resolve<WriteArgs> for CopyServer {
)
.await?;
resource::create::<Server>(&self.name, config.into(), user).await
resource::create::<Server>(
&self.name,
config.into(),
self.public_key.map(|public_key| ServerInfo {
public_key,
..Default::default()
}),
user,
)
.await
}
}
@@ -91,7 +109,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
let mut update =
make_update(&server, Operation::CreateNetwork, user);
@@ -99,7 +117,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::CreateNetwork {
.request(api::docker::CreateNetwork {
name: to_docker_compatible_name(&self.name),
driver: None,
})
@@ -108,7 +126,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"create network",
format_serror(&e.context("failed to create network").into()),
format_serror(&e.context("Failed to create network").into()),
),
};
@@ -132,7 +150,7 @@ impl Resolve<WriteArgs> for CreateTerminal {
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateTerminal {
@@ -141,7 +159,7 @@ impl Resolve<WriteArgs> for CreateTerminal {
recreate: self.recreate,
})
.await
.context("Failed to create terminal on periphery")?;
.context("Failed to create terminal on Periphery")?;
Ok(NoData {})
}
@@ -160,14 +178,14 @@ impl Resolve<WriteArgs> for DeleteTerminal {
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on periphery")?;
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
@@ -186,13 +204,76 @@ impl Resolve<WriteArgs> for DeleteAllTerminals {
)
.await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on periphery")?;
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for UpdateServerPublicKey {
#[instrument(name = "UpdateServerPublicKey", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let server = get_check_permissions::<Server>(
&self.server,
&args.user,
PermissionLevel::Write.into(),
)
.await?;
update_server_public_key(&server.id, &self.public_key).await?;
let mut update =
make_update(&server, Operation::UpdateServerKey, &args.user);
update.push_simple_log(
"Update Server Public Key",
format!("Public key updated to {}", bold(&self.public_key)),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
//
impl Resolve<WriteArgs> for RotateServerKeys {
#[instrument(name = "RotateServerPrivateKey", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let server = get_check_permissions::<Server>(
&self.server,
&args.user,
PermissionLevel::Write.into(),
)
.await?;
let periphery = periphery_client(&server).await?;
let public_key = periphery
.request(api::keys::RotatePrivateKey {})
.await
.context("Failed to rotate Periphery private key")?
.public_key;
UpdateServerPublicKey {
server: server.id,
public_key,
}
.resolve(args)
.await
}
}

View File

@@ -51,7 +51,8 @@ impl Resolve<WriteArgs> for CreateStack {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Stack> {
resource::create::<Stack>(&self.name, self.config, user).await
resource::create::<Stack>(&self.name, self.config, None, user)
.await
}
}
@@ -68,7 +69,8 @@ impl Resolve<WriteArgs> for CopyStack {
)
.await?;
resource::create::<Stack>(&self.name, config.into(), user).await
resource::create::<Stack>(&self.name, config.into(), None, user)
.await
}
}
@@ -170,7 +172,8 @@ async fn write_stack_file_contents_on_host(
.into(),
);
}
match periphery_client(&server)?
match periphery_client(&server)
.await?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
@@ -424,7 +427,8 @@ impl Resolve<WriteArgs> for RefreshStackCache {
(vec![], None, None, None, None)
} else if let Some(server) = server {
let GetComposeContentsOnHostResponse { contents, errors } =
match periphery_client(&server)?
match periphery_client(&server)
.await?
.request(GetComposeContentsOnHost {
file_paths: stack.all_file_dependencies(),
name: stack.name.clone(),

View File

@@ -68,8 +68,13 @@ impl Resolve<WriteArgs> for CreateResourceSync {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
resource::create::<ResourceSync>(&self.name, self.config, user)
.await
resource::create::<ResourceSync>(
&self.name,
self.config,
None,
user,
)
.await
}
}
@@ -86,8 +91,13 @@ impl Resolve<WriteArgs> for CopyResourceSync {
PermissionLevel::Write.into(),
)
.await?;
resource::create::<ResourceSync>(&self.name, config.into(), user)
.await
resource::create::<ResourceSync>(
&self.name,
config.into(),
None,
user,
)
.await
}
}

View File

@@ -26,7 +26,7 @@ impl Resolve<WriteArgs> for CreateUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -64,7 +64,7 @@ impl Resolve<WriteArgs> for RenameUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -93,7 +93,7 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -129,7 +129,7 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -176,7 +176,7 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -223,7 +223,7 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -273,7 +273,7 @@ impl Resolve<WriteArgs> for SetEveryoneUserGroup {
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(
anyhow!("This call is admin-only")
anyhow!("This call is admin only")
.status_code(StatusCode::FORBIDDEN),
);
}

View File

@@ -84,6 +84,8 @@ pub async fn launch_ec2_instance(
assign_public_ip,
use_public_ip,
user_data,
periphery_public_key: _,
insecure_tls: _,
port: _,
use_https: _,
git_providers: _,

View File

@@ -4,6 +4,8 @@ pub mod aws;
pub enum BuildCleanupData {
/// Nothing to clean up
Server,
/// Cleanup Periphery connection
Url,
/// Clean up AWS instance
Aws { instance_id: String, region: String },
}

View File

@@ -16,6 +16,94 @@ use komodo_client::entities::{
},
logger::LogConfig,
};
use noise::key::{SpkiPublicKey, load_maybe_generate_private_key};
/// Should call in startup to ensure Core errors without valid private key.
pub fn core_private_key() -> &'static String {
static CORE_PRIVATE_KEY: OnceLock<String> = OnceLock::new();
CORE_PRIVATE_KEY.get_or_init(|| {
let config = core_config();
if let Some(path) = config.private_key.strip_prefix("file:") {
load_maybe_generate_private_key(path).unwrap()
} else {
config.private_key.clone()
}
})
}
/// Should call in startup to ensure Core errors without valid private key.
pub fn core_public_key() -> &'static String {
static CORE_PUBLIC_KEY: OnceLock<String> = OnceLock::new();
CORE_PUBLIC_KEY.get_or_init(|| {
SpkiPublicKey::from_private_key(core_private_key())
.context("Got invalid private key")
.unwrap()
.into_inner()
})
}
pub fn core_connection_query() -> &'static String {
static CORE_HOSTNAME: OnceLock<String> = OnceLock::new();
CORE_HOSTNAME.get_or_init(|| {
let host = url::Url::parse(&core_config().host)
.context("Failed to parse config field 'host' as URL")
.unwrap()
.host()
.context(
"Failed to parse config field 'host' | missing host part",
)
.unwrap()
.to_string();
format!("core={}", urlencoding::encode(&host))
})
}
pub fn periphery_public_keys() -> Option<&'static [SpkiPublicKey]> {
static PERIPHERY_PUBLIC_KEYS: OnceLock<Option<Vec<SpkiPublicKey>>> =
OnceLock::new();
PERIPHERY_PUBLIC_KEYS
.get_or_init(|| {
core_config().periphery_public_keys.as_ref().map(
|public_keys| {
public_keys
.iter()
.flat_map(|public_key| {
let (path, maybe_pem) = if let Some(path) =
public_key.strip_prefix("file:")
{
match std::fs::read_to_string(path).with_context(
|| format!("Failed to read periphery public key at {path:?}"),
) {
Ok(public_key) => (Some(path), public_key),
Err(e) => {
warn!("{e:#}");
return None;
}
}
} else {
(None, public_key.clone())
};
match SpkiPublicKey::from_maybe_pem(&maybe_pem) {
Ok(public_key) => Some(public_key),
Err(e) => {
warn!(
"Failed to read periphery public key{} | {e:#}",
if let Some(path) = path {
format!("at {path:?}")
} else {
String::new()
}
);
None
}
}
})
.collect()
},
)
})
.as_deref()
}
pub fn core_config() -> &'static CoreConfig {
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
@@ -88,9 +176,11 @@ pub fn core_config() -> &'static CoreConfig {
// recreating CoreConfig here makes sure apply all env overrides applied.
CoreConfig {
// Secret things overridden with file
jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret),
private_key: maybe_read_item_from_file(env.komodo_private_key_file, env.komodo_private_key)
.unwrap_or(config.private_key),
passkey: maybe_read_item_from_file(env.komodo_passkey_file, env.komodo_passkey)
.unwrap_or(config.passkey),
.or(config.passkey),
jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret),
webhook_secret: maybe_read_item_from_file(env.komodo_webhook_secret_file, env.komodo_webhook_secret)
.unwrap_or(config.webhook_secret),
database: DatabaseConfig {
@@ -177,8 +267,9 @@ pub fn core_config() -> &'static CoreConfig {
port: env.komodo_port.unwrap_or(config.port),
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
timezone: env.komodo_timezone.unwrap_or(config.timezone),
first_server: env.komodo_first_server.or(config.first_server),
first_server_name: env.komodo_first_server_name.unwrap_or(config.first_server_name),
periphery_public_keys: env.komodo_periphery_public_keys.or(config.periphery_public_keys),
first_server_address: env.komodo_first_server_address.or(config.first_server_address),
first_server_name: env.komodo_first_server_name.or(config.first_server_name),
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
jwt_ttl: env
.komodo_jwt_ttl

View File

@@ -0,0 +1,186 @@
use std::{sync::Arc, time::Duration};
use anyhow::{Context, anyhow};
use periphery_client::CONNECTION_RETRY_SECONDS;
use serror::{deserialize_error_bytes, serialize_error_bytes};
use transport::{
MessageState,
auth::{
AddressConnectionIdentifiers, ClientLoginFlow,
ConnectionIdentifiers,
},
fix_ws_address,
websocket::{Websocket, tungstenite::TungsteniteWebsocket},
};
use crate::{
config::{core_config, core_connection_query},
periphery::ConnectionChannels,
state::periphery_connections,
};
use super::{PeripheryConnection, PeripheryConnectionArgs};
impl PeripheryConnectionArgs<'_> {
pub async fn spawn_client_connection(
self,
id: String,
insecure: bool,
passkey: String,
) -> anyhow::Result<Arc<ConnectionChannels>> {
let Some(address) = self.address else {
return Err(anyhow!(
"Cannot spawn client connection with empty address"
));
};
let address = fix_ws_address(address);
let identifiers =
AddressConnectionIdentifiers::extract(&address)?;
let endpoint = format!("{address}/?{}", core_connection_query());
let (connection, mut receiver) =
periphery_connections().insert(id.clone(), self).await;
let channels = connection.channels.clone();
tokio::spawn(async move {
loop {
let ws = tokio::select! {
ws = TungsteniteWebsocket::connect_maybe_tls_insecure(
&endpoint,
insecure && endpoint.starts_with("wss"),
) => ws,
_ = connection.cancel.cancelled() => {
break
}
};
let (mut socket, accept) = match ws {
Ok(res) => res,
Err(e) => {
connection.set_error(e.error).await;
tokio::time::sleep(Duration::from_secs(
CONNECTION_RETRY_SECONDS,
))
.await;
continue;
}
};
let identifiers = identifiers.build(
accept.as_bytes(),
core_connection_query().as_bytes(),
);
if let Err(e) = connection
.client_login(&mut socket, identifiers, &passkey)
.await
{
connection.set_error(e).await;
tokio::time::sleep(Duration::from_secs(
CONNECTION_RETRY_SECONDS,
))
.await;
continue;
};
connection.handle_socket(socket, &mut receiver).await
}
});
Ok(channels)
}
}
impl PeripheryConnection {
/// Custom Core -> Periphery side only login wrapper
/// to implement passkey support for backward compatibility
async fn client_login(
&self,
socket: &mut TungsteniteWebsocket,
identifiers: ConnectionIdentifiers<'_>,
// for legacy auth
passkey: &str,
) -> anyhow::Result<()> {
// Get the required auth type
let bytes = socket
.recv_bytes()
.with_timeout(Duration::from_secs(2))
.await?
.context("Failed to receive login type indicator")?;
match bytes.iter().as_slice() {
// Noise auth
&[0] => {
self
.handle_login::<_, ClientLoginFlow>(socket, identifiers)
.await
}
// Passkey auth
&[1] => handle_passkey_login(socket, passkey).await,
other => Err(anyhow!(
"Receieved invalid login type pattern: {other:?}"
)),
}
}
}
async fn handle_passkey_login(
socket: &mut TungsteniteWebsocket,
// for legacy auth
passkey: &str,
) -> anyhow::Result<()> {
let res = async {
let mut passkey = if passkey.is_empty() {
core_config()
.passkey
.as_deref()
.context("Periphery requires passkey auth")?
.as_bytes()
.to_vec()
} else {
passkey.as_bytes().to_vec()
};
passkey.push(MessageState::Successful.as_byte());
socket
.send(passkey.into())
.await
.context("Failed to send passkey")?;
// Receive login state message and return based on value
let state_msg = socket
.recv_bytes()
.await
.context("Failed to receive authentication state message")?;
let state = state_msg.last().context(
"Authentication state message did not contain state byte",
)?;
match MessageState::from_byte(*state) {
MessageState::Successful => anyhow::Ok(()),
_ => Err(deserialize_error_bytes(
&state_msg[..(state_msg.len() - 1)],
)),
}
}
.await;
if let Err(e) = res {
let mut bytes = serialize_error_bytes(&e);
bytes.push(MessageState::Failed.as_byte());
if let Err(e) = socket
.send(bytes.into())
.await
.context("Failed to send login failed to client")
{
// Log additional error
warn!("{e:#}");
}
// Close socket
let _ = socket.close(None).await;
// Return the original error
Err(e)
} else {
Ok(())
}
}

View File

@@ -0,0 +1,480 @@
use std::{
sync::{
Arc,
atomic::{self, AtomicBool},
},
time::Duration,
};
use anyhow::anyhow;
use bytes::Bytes;
use cache::CloneCache;
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use komodo_client::entities::{
builder::{AwsBuilderConfig, UrlBuilderConfig},
optional_str,
server::Server,
};
use serror::serror_into_anyhow_error;
use tokio::sync::{
RwLock,
mpsc::{Sender, error::SendError},
};
use tokio_util::sync::CancellationToken;
use transport::{
auth::{
ConnectionIdentifiers, LoginFlow, LoginFlowArgs,
PublicKeyValidator,
},
bytes::id_from_transport_bytes,
channel::{BufferedReceiver, buffered_channel},
websocket::{
Websocket, WebsocketMessage, WebsocketReceiver as _,
WebsocketSender as _,
},
};
use crate::{
config::{core_private_key, periphery_public_keys},
periphery::ConnectionChannels,
state::db_client,
};
pub mod client;
pub mod server;
#[derive(Default)]
pub struct PeripheryConnections(
CloneCache<String, Arc<PeripheryConnection>>,
);
impl PeripheryConnections {
/// Insert a recreated connection.
/// Ensures the fields which must be persisted between
/// connection recreation are carried over.
pub async fn insert(
&self,
server_id: String,
args: PeripheryConnectionArgs<'_>,
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
let (connection, receiver) = if let Some(existing_connection) =
self.0.remove(&server_id).await
{
existing_connection.with_new_args(args)
} else {
PeripheryConnection::new(args)
};
self.0.insert(server_id, connection.clone()).await;
(connection, receiver)
}
pub async fn get(
&self,
server_id: &String,
) -> Option<Arc<PeripheryConnection>> {
self.0.get(server_id).await
}
/// Remove and cancel connection
pub async fn remove(
&self,
server_id: &String,
) -> Option<Arc<PeripheryConnection>> {
self
.0
.remove(server_id)
.await
.inspect(|connection| connection.cancel())
}
}
/// The configurable args of a connection
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct PeripheryConnectionArgs<'a> {
/// Usually the server id
pub id: &'a str,
pub address: Option<&'a str>,
periphery_public_key: Option<&'a str>,
}
impl PublicKeyValidator for PeripheryConnectionArgs<'_> {
type ValidationResult = String;
async fn validate(
&self,
public_key: String,
) -> anyhow::Result<Self::ValidationResult> {
let invalid_error = || {
spawn_update_attempted_public_key(
self.id.to_string(),
Some(public_key.clone()),
);
let e = anyhow!("{public_key} is invalid")
.context(
"Ensure public key matches configured Periphery Public Key",
)
.context("Core failed to validate Periphery public key");
e
};
let core_to_periphery = self.address.is_some();
match (self.periphery_public_key, core_to_periphery) {
// The key matches expected.
(Some(expected), _) if public_key == expected => Ok(public_key),
// Explicit auth failed.
(Some(_), _) => Err(invalid_error()),
// Core -> Periphery connections with no explicit
// Periphery public key are not validated.
(None, true) => Ok(public_key),
// Periphery -> Core connections with no explicit
// Periphery public key can fall back to Core config `periphery_public_keys` if defined.
(None, false) => {
let expected =
periphery_public_keys().ok_or_else(invalid_error)?;
if expected
.iter()
.any(|expected| public_key == expected.as_str())
{
Ok(public_key)
} else {
Err(invalid_error())
}
}
}
}
}
impl<'a> PeripheryConnectionArgs<'a> {
pub fn from_server(server: &'a Server) -> Self {
Self {
id: &server.id,
address: optional_str(&server.config.address),
periphery_public_key: optional_str(&server.info.public_key),
}
}
pub fn from_url_builder(
id: &'a str,
config: &'a UrlBuilderConfig,
) -> Self {
Self {
id,
address: optional_str(&config.address),
periphery_public_key: optional_str(
&config.periphery_public_key,
),
}
}
pub fn from_aws_builder(
id: &'a str,
address: &'a str,
config: &'a AwsBuilderConfig,
) -> Self {
Self {
id,
address: Some(address),
periphery_public_key: optional_str(
&config.periphery_public_key,
),
}
}
pub fn to_owned(self) -> OwnedPeripheryConnectionArgs {
OwnedPeripheryConnectionArgs {
id: self.id.to_string(),
address: self.address.map(str::to_string),
periphery_public_key: self
.periphery_public_key
.map(str::to_string),
}
}
pub fn matches<'b>(
self,
args: impl Into<PeripheryConnectionArgs<'b>>,
) -> bool {
self == args.into()
}
}
#[derive(Debug, Clone)]
pub struct OwnedPeripheryConnectionArgs {
/// Usually the Server id.
pub id: String,
/// Specify outbound connection address.
/// Inbound connections have this as None
pub address: Option<String>,
/// The public key to expect Periphery to have.
/// If None, must have 'periphery_public_keys' set
/// in Core config, or will error
pub periphery_public_key: Option<String>,
}
impl OwnedPeripheryConnectionArgs {
pub fn borrow(&self) -> PeripheryConnectionArgs<'_> {
PeripheryConnectionArgs {
id: &self.id,
address: self.address.as_deref(),
periphery_public_key: self.periphery_public_key.as_deref(),
}
}
}
impl From<PeripheryConnectionArgs<'_>>
for OwnedPeripheryConnectionArgs
{
fn from(value: PeripheryConnectionArgs<'_>) -> Self {
value.to_owned()
}
}
impl<'a> From<&'a OwnedPeripheryConnectionArgs>
for PeripheryConnectionArgs<'a>
{
fn from(value: &'a OwnedPeripheryConnectionArgs) -> Self {
value.borrow()
}
}
#[derive(Debug)]
pub struct PeripheryConnection {
/// The connection args
pub args: OwnedPeripheryConnectionArgs,
/// Send and receive bytes over the connection socket.
pub sender: Sender<Bytes>,
/// Cancel the connection
pub cancel: CancellationToken,
/// Whether Periphery is currently connected.
pub connected: AtomicBool,
// These fields must be maintained if new connection replaces old
// at the same server id.
/// Stores latest connection error
pub error: Arc<RwLock<Option<serror::Serror>>>,
/// Forward bytes from Periphery to specific channel handlers.
pub channels: Arc<ConnectionChannels>,
}
impl PeripheryConnection {
pub fn new(
args: impl Into<OwnedPeripheryConnectionArgs>,
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
let (sender, receiever) = buffered_channel();
(
PeripheryConnection {
sender,
args: args.into(),
cancel: CancellationToken::new(),
connected: AtomicBool::new(false),
error: Default::default(),
channels: Default::default(),
}
.into(),
receiever,
)
}
pub fn with_new_args(
&self,
args: impl Into<OwnedPeripheryConnectionArgs>,
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
// Ensure this connection is cancelled.
self.cancel();
let (sender, receiever) = buffered_channel();
(
PeripheryConnection {
sender,
args: args.into(),
cancel: CancellationToken::new(),
connected: AtomicBool::new(false),
error: self.error.clone(),
channels: self.channels.clone(),
}
.into(),
receiever,
)
}
pub async fn handle_login<W: Websocket, L: LoginFlow>(
&self,
socket: &mut W,
identifiers: ConnectionIdentifiers<'_>,
) -> anyhow::Result<()> {
L::login(LoginFlowArgs {
socket,
identifiers,
private_key: core_private_key(),
public_key_validator: self.args.borrow(),
})
.await?;
// Clear attempted public key after successful login
spawn_update_attempted_public_key(self.args.id.clone(), None);
Ok(())
}
pub async fn handle_socket<W: Websocket>(
&self,
socket: W,
receiver: &mut BufferedReceiver<Bytes>,
) {
let cancel = self.cancel.child_token();
self.set_connected(true);
self.clear_error().await;
let (mut ws_write, mut ws_read) = socket.split();
let forward_writes = async {
loop {
let next = tokio::select! {
next = receiver.recv() => next,
_ = cancel.cancelled() => break,
};
let message = match next {
Some(request) => Bytes::copy_from_slice(request),
// Sender Dropped (shouldn't happen, a reference is held on 'connection').
None => break,
};
match ws_write.send(message).await {
Ok(_) => receiver.clear_buffer(),
Err(e) => {
self.set_error(e.into()).await;
break;
}
}
}
// Cancel again if not already
let _ = ws_write.close(None).await;
cancel.cancel();
};
let handle_reads = async {
loop {
let next = tokio::select! {
next = ws_read.recv() => next,
_ = cancel.cancelled() => break,
};
match next {
Ok(WebsocketMessage::Binary(bytes)) => {
self.handle_incoming_bytes(bytes).await
}
Ok(WebsocketMessage::Close(_))
| Ok(WebsocketMessage::Closed) => {
self.set_error(anyhow!("Connection closed")).await;
break;
}
Err(e) => {
self.set_error(e.into()).await;
}
};
}
// Cancel again if not already
cancel.cancel();
};
tokio::join!(forward_writes, handle_reads);
self.set_connected(false);
}
pub async fn handle_incoming_bytes(&self, bytes: Bytes) {
let id = match id_from_transport_bytes(&bytes) {
Ok(res) => res,
Err(e) => {
// TODO: handle better
warn!("Failed to read id | {e:#}");
return;
}
};
let Some(channel) = self.channels.get(&id).await else {
// TODO: handle better
debug!("Failed to send response | No response channel found");
return;
};
if let Err(e) = channel.send(bytes).await {
// TODO: handle better
warn!("Failed to send response | Channel failure | {e:#}");
}
}
pub async fn send(
&self,
value: Bytes,
) -> Result<(), SendError<Bytes>> {
self.sender.send(value).await
}
pub fn set_connected(&self, connected: bool) {
self.connected.store(connected, atomic::Ordering::Relaxed);
}
pub fn connected(&self) -> bool {
self.connected.load(atomic::Ordering::Relaxed)
}
/// Polls connected 3 times (500ms in between) before bailing.
pub async fn bail_if_not_connected(&self) -> anyhow::Result<()> {
const POLL_TIMES: usize = 3;
for i in 0..POLL_TIMES {
if self.connected() {
return Ok(());
}
if i < POLL_TIMES - 1 {
tokio::time::sleep(Duration::from_millis(500)).await;
}
}
if let Some(e) = self.error().await {
Err(serror_into_anyhow_error(e))
} else {
Err(anyhow!("Server is not currently connected"))
}
}
pub async fn error(&self) -> Option<serror::Serror> {
self.error.read().await.clone()
}
pub async fn set_error(&self, e: anyhow::Error) {
let mut error = self.error.write().await;
*error = Some(e.into());
}
pub async fn clear_error(&self) {
let mut error = self.error.write().await;
*error = None;
}
pub fn cancel(&self) {
self.cancel.cancel();
}
}
/// Spawn task to set the 'attempted_public_key'
/// for easy manual connection acceptance later on.
fn spawn_update_attempted_public_key(
id: String,
public_key: impl Into<Option<String>>,
) {
let public_key = public_key.into();
tokio::spawn(async move {
if let Err(e) = update_one_by_id(
&db_client().servers,
&id,
doc! {
"$set": {
"info.attempted_public_key": &public_key.as_deref().unwrap_or_default(),
}
},
None,
)
.await
{
warn!(
"Failed to update attempted public_key for Server {id} | {e:?}"
);
};
});
}

View File

@@ -0,0 +1,349 @@
use std::{str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use axum::{
extract::{Query, WebSocketUpgrade},
http::{HeaderMap, StatusCode},
response::Response,
};
use bytes::Bytes;
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
use komodo_client::{
api::write::{CreateBuilder, CreateServer, UpdateResourceMeta},
entities::{
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
onboarding_key::OnboardingKey,
server::{PartialServerConfig, Server},
user::system_user,
},
};
use resolver_api::Resolve;
use serror::{AddStatusCode, AddStatusCodeError};
use transport::{
MessageState, PeripheryConnectionQuery,
auth::{
HeaderConnectionIdentifiers, LoginFlow, LoginFlowArgs,
PublicKeyValidator, ServerLoginFlow,
},
websocket::{Websocket, axum::AxumWebsocket},
};
use crate::{
api::write::WriteArgs,
config::core_private_key,
helpers::query::id_or_name_filter,
resource::KomodoResource,
state::{db_client, periphery_connections},
};
use super::PeripheryConnectionArgs;
pub async fn handler(
Query(PeripheryConnectionQuery {
server: server_query,
}): Query<PeripheryConnectionQuery>,
mut headers: HeaderMap,
ws: WebSocketUpgrade,
) -> serror::Result<Response> {
let identifiers =
HeaderConnectionIdentifiers::extract(&mut headers)
.status_code(StatusCode::UNAUTHORIZED)?;
if server_query.is_empty() {
return Err(
anyhow!("Must provide non-empty server specifier")
.status_code(StatusCode::UNAUTHORIZED),
);
}
// Handle connection vs. onboarding flow.
match Server::coll()
.find_one(id_or_name_filter(&server_query))
.await
.context("Failed to query database for Server")?
{
Some(server) => {
existing_server_handler(server_query, server, identifiers, ws)
.await
}
None if ObjectId::from_str(&server_query).is_err() => {
onboard_server_handler(server_query, identifiers, ws).await
}
None => Err(
anyhow!("Must provide name based Server specifier for onboarding flow, name cannot be valid ObjectId (hex)")
.status_code(StatusCode::UNAUTHORIZED),
),
}
}
async fn existing_server_handler(
server_query: String,
server: Server,
identifiers: HeaderConnectionIdentifiers,
ws: WebSocketUpgrade,
) -> serror::Result<Response> {
if !server.config.enabled {
return Err(anyhow!("Server is Disabled."))
.status_code(StatusCode::BAD_REQUEST);
}
if !server.config.address.is_empty() {
return Err(anyhow!(
"Server is configured to use a Core -> Periphery connection."
))
.status_code(StatusCode::BAD_REQUEST);
}
let connections = periphery_connections();
// Ensure connected server can't get bumped off the connection.
// Treat this as authorization issue.
if let Some(existing_connection) = connections.get(&server.id).await
&& existing_connection.connected()
{
return Err(
anyhow!("A Server '{server_query}' is already connected")
.status_code(StatusCode::UNAUTHORIZED),
);
}
let (connection, mut receiver) = periphery_connections()
.insert(
server.id.clone(),
PeripheryConnectionArgs::from_server(&server),
)
.await;
Ok(ws.on_upgrade(|socket| async move {
let query =
format!("server={}", urlencoding::encode(&server_query));
let mut socket = AxumWebsocket(socket);
if let Err(e) = socket.send(Bytes::from_owner([0])).await.context(
"Failed to send the login flow indicator over connnection",
) {
connection.set_error(e).await;
return;
};
if let Err(e) = connection
.handle_login::<_, ServerLoginFlow>(
&mut socket,
identifiers.build(query.as_bytes()),
)
.await
{
connection.set_error(e).await;
return;
}
connection.handle_socket(socket, &mut receiver).await
}))
}
async fn onboard_server_handler(
server_query: String,
identifiers: HeaderConnectionIdentifiers,
ws: WebSocketUpgrade,
) -> serror::Result<Response> {
Ok(ws.on_upgrade(|socket| async move {
let query =
format!("server={}", urlencoding::encode(&server_query));
let mut socket = AxumWebsocket(socket);
if let Err(e) = socket.send(Bytes::from_owner([1])).await.context(
"Failed to send the login flow indicator over connnection",
).context("Server onboarding error") {
warn!("{e:#}");
return;
};
let onboarding_key = match ServerLoginFlow::login(LoginFlowArgs {
socket: &mut socket,
identifiers: identifiers.build(query.as_bytes()),
private_key: core_private_key(),
public_key_validator: CreationKeyValidator,
})
.await
{
Ok(onboarding_key) => onboarding_key,
Err(e) => {
debug!("Server {server_query} failed to onboard | {e:#}");
return;
}
};
let res = socket
.recv_bytes()
.with_timeout(Duration::from_secs(2))
.await
.and_then(|res| {
res.and_then(|public_key_bytes| {
String::from_utf8(public_key_bytes.into())
.context("Public key bytes are not valid utf8")
})
});
// Post onboarding login 1: Receive public key
let public_key = match res
{
Ok(public_key) => public_key,
Err(e) => {
warn!("Server {server_query} failed to onboard | failed to receive Server public key | {e:#}");
return;
}
};
let server_id = match create_server_maybe_builder(
server_query,
public_key,
onboarding_key.copy_server,
onboarding_key.tags,
onboarding_key.create_builder
).await {
Ok(server_id) => server_id,
Err(e) => {
warn!("{e:#}");
if let Err(e) = socket
.send_error(&e)
.await
.context("Failed to send Server creation failed to client")
{
// Log additional error
warn!("{e:#}");
}
return;
}
};
if let Err(e) = socket
.send(MessageState::Successful.into())
.await
.context("Failed to send Server creation successful to client")
{
// Log additional error
warn!("{e:#}");
}
// Server created, close and trigger reconnect
// and handling using existing server handler.
let _ = socket.close(None).await;
// Add the server to onboarding key "Onboarded"
let res = db_client()
.onboarding_keys
.update_one(
doc! { "public_key": &onboarding_key.public_key },
doc! { "$push": { "onboarded": server_id } },
).await;
if let Err(e) = res {
warn!("Failed to update onboarding key 'onboarded' | {e:?}");
}
}))
}
async fn create_server_maybe_builder(
server_query: String,
public_key: String,
copy_server: String,
tags: Vec<String>,
create_builder: bool,
) -> anyhow::Result<String> {
let config = if copy_server.is_empty() {
PartialServerConfig {
enabled: Some(true),
..Default::default()
}
} else {
let config = match db_client().servers.find_one(id_or_name_filter(&copy_server)).await {
Ok(Some(server)) => server.config,
Ok(None) => {
warn!("Server onboarding: Failed to find Server {}", copy_server);
Default::default()
}
Err(e) => {
warn!("Failed to query database for onboarding key 'copy_server' | {e:?}");
Default::default()
}
};
PartialServerConfig {
enabled: Some(true),
address: None,
..config.into()
}
};
let args = WriteArgs {
user: system_user().to_owned(),
};
let server = CreateServer {
name: server_query.clone(),
config,
public_key: Some(public_key),
}
.resolve(&args)
.await
.map_err(|e| e.error)
.context("Server onboarding flow failed at Server creation")?;
// Don't need to fail, only warn on this
if let Err(e) = (UpdateResourceMeta {
target: (&server).into(),
tags: Some(tags),
description: None,
template: None,
})
.resolve(&args)
.await
.map_err(|e| e.error)
.context("Server onboarding flow failed at Server creation")
{
warn!("{e:#}");
};
if create_builder {
// Don't need to fail, only warn on this
if let Err(e) = (CreateBuilder {
name: server_query,
config: PartialBuilderConfig::Server(
PartialServerBuilderConfig {
server_id: Some(server.id.clone()),
},
),
})
.resolve(&args)
.await
.map_err(|e| e.error)
.context("Server onboarding flow failed at Builder creation")
{
warn!("{e:#}");
};
}
Ok(server.id)
}
struct CreationKeyValidator;
impl PublicKeyValidator for CreationKeyValidator {
type ValidationResult = OnboardingKey;
async fn validate(
&self,
public_key: String,
) -> anyhow::Result<Self::ValidationResult> {
let onboarding_key = db_client()
.onboarding_keys
.find_one(doc! { "public_key": &public_key })
.await
.context("Failed to query database for Server onboarding keys")?
.context("Matching Server onboarding key not found")?;
if onboarding_key.enabled {
Ok(onboarding_key)
} else {
Err(anyhow!("Onboarding key is disabled"))
}
}
}

View File

@@ -1,6 +1,7 @@
use std::sync::{Arc, Mutex};
use anyhow::anyhow;
use cache::CloneCache;
use komodo_client::{
busy::Busy,
entities::{
@@ -12,20 +13,19 @@ use komodo_client::{
},
};
use super::cache::Cache;
#[derive(Default)]
pub struct ActionStates {
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
pub server: CloneCache<String, Arc<ActionState<ServerActionState>>>,
pub stack: CloneCache<String, Arc<ActionState<StackActionState>>>,
pub deployment:
Cache<String, Arc<ActionState<DeploymentActionState>>>,
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
CloneCache<String, Arc<ActionState<DeploymentActionState>>>,
pub build: CloneCache<String, Arc<ActionState<BuildActionState>>>,
pub repo: CloneCache<String, Arc<ActionState<RepoActionState>>>,
pub procedure:
Cache<String, Arc<ActionState<ProcedureActionState>>>,
pub action: Cache<String, Arc<ActionState<ActionActionState>>>,
pub sync: Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
CloneCache<String, Arc<ActionState<ProcedureActionState>>>,
pub action: CloneCache<String, Arc<ActionState<ActionActionState>>>,
pub sync:
CloneCache<String, Arc<ActionState<ResourceSyncActionState>>>,
}
/// Need to be able to check "busy" with write lock acquired.

View File

@@ -1,6 +1,7 @@
use std::time::Duration;
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::oid::ObjectId;
use formatting::muted;
use komodo_client::entities::{
Version,
@@ -9,10 +10,7 @@ use komodo_client::entities::{
server::Server,
update::{Log, Update},
};
use periphery_client::{
PeripheryClient,
api::{self, GetVersionResponse},
};
use periphery_client::api::{self, GetVersionResponse};
use crate::{
cloud::{
@@ -22,8 +20,9 @@ use crate::{
terminate_ec2_instance_with_retry,
},
},
config::core_config,
connection::PeripheryConnectionArgs,
helpers::update::update_update,
periphery::PeripheryClient,
resource,
};
@@ -47,27 +46,29 @@ pub async fn get_builder_periphery(
"Builder has not yet configured an address"
));
}
// TODO: Dont use builder id, or will be problems
// with simultaneous spawned builders.
let periphery = PeripheryClient::new(
config.address,
if config.passkey.is_empty() {
core_config().passkey.clone()
} else {
config.passkey
},
Duration::from_secs(3),
);
PeripheryConnectionArgs::from_url_builder(
&ObjectId::new().to_hex(),
&config,
),
config.insecure_tls,
&config.passkey,
)
.await?;
periphery
.health_check()
.await
.context("Url Builder failed health check")?;
Ok((periphery, BuildCleanupData::Server))
Ok((periphery, BuildCleanupData::Url))
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!("Builder has not configured a server"));
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
Ok((periphery, BuildCleanupData::Server))
}
BuilderConfig::Aws(config) => {
@@ -90,10 +91,8 @@ async fn get_aws_builder(
let Ec2Instance { instance_id, ip } =
launch_ec2_instance(&instance_name, &config).await?;
info!("ec2 instance launched");
let log = Log {
stage: "start build instance".to_string(),
stage: "Start Build Instance".to_string(),
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
@@ -105,14 +104,21 @@ async fn get_aws_builder(
update_update(update.clone()).await?;
let protocol = if config.use_https { "https" } else { "http" };
let protocol = if config.use_https { "wss" } else { "ws" };
// TODO: Handle ad-hoc (non server) periphery connections. These don't have ids.
let periphery_address =
format!("{protocol}://{ip}:{}", config.port);
let periphery = PeripheryClient::new(
&periphery_address,
&core_config().passkey,
Duration::from_secs(3),
);
PeripheryConnectionArgs::from_aws_builder(
&ObjectId::new().to_hex(),
&periphery_address,
&config,
),
config.insecure_tls,
"",
)
.await?;
let start_connect_ts = komodo_timestamp();
let mut res = Ok(GetVersionResponse {
@@ -166,6 +172,7 @@ async fn get_aws_builder(
#[instrument(skip(update))]
pub async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
@@ -173,10 +180,14 @@ pub async fn cleanup_builder_instance(
BuildCleanupData::Server => {
// Nothing to clean up
}
BuildCleanupData::Url => {
periphery.cleanup().await;
}
BuildCleanupData::Aws {
instance_id,
region,
} => {
periphery.cleanup().await;
let _instance_id = instance_id.clone();
tokio::spawn(async move {
let _ =

View File

@@ -1,83 +0,0 @@
use std::{collections::HashMap, hash::Hash};
use tokio::sync::RwLock;
#[derive(Default)]
pub struct Cache<K: PartialEq + Eq + Hash, T: Clone + Default> {
cache: RwLock<HashMap<K, T>>,
}
impl<
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default,
> Cache<K, T>
{
#[instrument(level = "debug", skip(self))]
pub async fn get(&self, key: &K) -> Option<T> {
self.cache.read().await.get(key).cloned()
}
#[instrument(level = "debug", skip(self))]
pub async fn get_or_insert_default(&self, key: &K) -> T {
let mut lock = self.cache.write().await;
match lock.get(key).cloned() {
Some(item) => item,
None => {
let item: T = Default::default();
lock.insert(key.clone(), item.clone());
item
}
}
}
#[instrument(level = "debug", skip(self))]
pub async fn get_list(&self) -> Vec<T> {
let cache = self.cache.read().await;
cache.values().cloned().collect()
}
#[instrument(level = "debug", skip(self))]
pub async fn insert<Key>(&self, key: Key, val: T)
where
T: std::fmt::Debug,
Key: Into<K> + std::fmt::Debug,
{
self.cache.write().await.insert(key.into(), val);
}
// #[instrument(level = "debug", skip(self, handler))]
// pub async fn update_entry<Key>(
// &self,
// key: Key,
// handler: impl Fn(&mut T),
// ) where
// Key: Into<K> + std::fmt::Debug,
// {
// let mut cache = self.cache.write().await;
// handler(cache.entry(key.into()).or_default());
// }
// #[instrument(level = "debug", skip(self))]
// pub async fn clear(&self) {
// self.cache.write().await.clear();
// }
#[instrument(level = "debug", skip(self))]
pub async fn remove(&self, key: &K) {
self.cache.write().await.remove(key);
}
}
// impl<
// K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
// T: Clone + Default + Busy,
// > Cache<K, T>
// {
// #[instrument(level = "debug", skip(self))]
// pub async fn busy(&self, id: &K) -> bool {
// match self.get(id).await {
// Some(state) => state.busy(),
// None => false,
// }
// }
// }

View File

@@ -1,4 +1,4 @@
use std::{fmt::Write, time::Duration};
use std::fmt::Write;
use anyhow::{Context, anyhow};
use database::mongo_indexed::Document;
@@ -15,15 +15,16 @@ use komodo_client::entities::{
stack::Stack,
user::User,
};
use periphery_client::PeripheryClient;
use rand::Rng;
use crate::{config::core_config, state::db_client};
use crate::{
config::core_config, connection::PeripheryConnectionArgs,
periphery::PeripheryClient, state::db_client,
};
pub mod action_state;
pub mod all_resources;
pub mod builder;
pub mod cache;
pub mod channel;
pub mod maintenance;
pub mod matcher;
@@ -185,24 +186,18 @@ pub async fn registry_token(
//
pub fn periphery_client(
pub async fn periphery_client(
server: &Server,
) -> anyhow::Result<PeripheryClient> {
if !server.config.enabled {
return Err(anyhow!("server not enabled"));
}
let client = PeripheryClient::new(
&server.config.address,
if server.config.passkey.is_empty() {
&core_config().passkey
} else {
&server.config.passkey
},
Duration::from_secs(server.config.timeout_seconds as u64),
);
Ok(client)
PeripheryClient::new(
PeripheryConnectionArgs::from_server(server),
server.config.insecure_tls,
&server.config.passkey,
)
.await
}
#[instrument]

View File

@@ -1209,6 +1209,23 @@ async fn execute_execution(
)
.await?
}
Execution::RotateAllServerKeys(req) => {
let req = ExecuteRequest::RotateAllServerKeys(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RotateAllServerKeys(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.await
.map_err(|e| e.error)
.context("Failed at RotateAllServerKeys"),
&update_id,
)
.await?
}
Execution::Sleep(req) => {
let duration = Duration::from_millis(req.duration_ms as u64);
tokio::time::sleep(duration).await;

View File

@@ -4,7 +4,7 @@ use async_timing_util::{
};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use futures::{StreamExt, stream::FuturesUnordered};
use periphery_client::api::image::PruneImages;
use periphery_client::api::docker::PruneImages;
use crate::{config::core_config, state::db_client};
@@ -41,7 +41,10 @@ async fn prune_images() -> anyhow::Result<()> {
.map(|server| async move {
(
async {
periphery_client(&server)?.request(PruneImages {}).await
periphery_client(&server)
.await?
.request(PruneImages {})
.await
}
.await,
server,
@@ -51,8 +54,8 @@ async fn prune_images() -> anyhow::Result<()> {
while let Some((res, server)) = futures.next().await {
if let Err(e) = res {
error!(
"failed to prune images on server {} ({}) | {e:#}",
warn!(
"failed to prune images on Server {} ({}) | {e:#}",
server.name, server.id
)
}

View File

@@ -1,11 +1,6 @@
use std::{
collections::HashMap,
str::FromStr,
sync::{Arc, OnceLock},
};
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use async_timing_util::{ONE_MIN_MS, unix_timestamp_ms};
use database::mungos::{
find::find_collect,
mongodb::{
@@ -30,7 +25,6 @@ use komodo_client::{
repo::Repo,
server::{Server, ServerState},
stack::{Stack, StackServiceNames, StackState},
stats::SystemInformation,
sync::ResourceSync,
tag::Tag,
update::Update,
@@ -39,8 +33,6 @@ use komodo_client::{
variable::Variable,
},
};
use periphery_client::api::stats;
use tokio::sync::Mutex;
use crate::{
config::core_config,
@@ -54,8 +46,6 @@ use crate::{
},
};
use super::periphery_client;
// user: Id or username
#[instrument(level = "debug")]
pub async fn get_user(user: &str) -> anyhow::Result<User> {
@@ -84,10 +74,11 @@ pub async fn get_server_state(server: &Server) -> ServerState {
if !server.config.enabled {
return ServerState::Disabled;
}
// Unwrap ok: Server disabled check above
match super::periphery_client(server)
.unwrap()
.request(periphery_client::api::GetHealth {})
let Ok(periphery) = super::periphery_client(server).await else {
return ServerState::NotOk;
};
match periphery
.request(periphery_client::api::GetVersion {})
.await
{
Ok(_) => ServerState::Ok,
@@ -413,39 +404,6 @@ pub async fn get_variables_and_secrets()
Ok(VariablesAndSecrets { variables, secrets })
}
// This protects the peripheries from spam requests
const SYSTEM_INFO_EXPIRY: u128 = ONE_MIN_MS;
type SystemInfoCache =
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
fn system_info_cache() -> &'static SystemInfoCache {
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
OnceLock::new();
SYSTEM_INFO_CACHE.get_or_init(Default::default)
}
pub async fn get_system_info(
server: &Server,
) -> anyhow::Result<SystemInformation> {
let mut lock = system_info_cache().lock().await;
let res = match lock.get(&server.id) {
Some(cached) if cached.1 > unix_timestamp_ms() => {
cached.0.clone()
}
_ => {
let stats = periphery_client(server)?
.request(stats::GetSystemInformation {})
.await?;
lock.insert(
server.id.clone(),
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
.into(),
);
stats
}
};
Ok(res)
}
/// Get last time procedure / action was run using Update query.
/// Ignored whether run was successful.
pub async fn get_last_run_at<R: KomodoResource>(

View File

@@ -520,6 +520,9 @@ pub async fn init_execution_update(
ExecuteRequest::GlobalAutoUpdate(_data) => {
(Operation::GlobalAutoUpdate, ResourceTarget::system())
}
ExecuteRequest::RotateAllServerKeys(_data) => {
(Operation::RotateAllServerKeys, ResourceTarget::system())
}
};
let mut update = make_update(target, operation, user);

View File

@@ -2,10 +2,11 @@ use std::sync::Arc;
use anyhow::anyhow;
use axum::{Router, http::HeaderMap};
use cache::CloneCache;
use komodo_client::entities::resource::Resource;
use tokio::sync::Mutex;
use crate::{helpers::cache::Cache, resource::KomodoResource};
use crate::resource::KomodoResource;
mod integrations;
mod resources;
@@ -19,7 +20,7 @@ pub fn router() -> Router {
.nest("/gitlab", router::router::<gitlab::Gitlab>())
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
type ListenerLockCache = CloneCache<String, Arc<Mutex<()>>>;
/// Implemented for all resources which can recieve webhook.
trait CustomSecret: KomodoResource {

View File

@@ -1,3 +1,5 @@
#![recursion_limit = "256"]
#[macro_use]
extern crate tracing;
@@ -11,17 +13,19 @@ use tower_http::{
services::{ServeDir, ServeFile},
};
use crate::config::core_config;
use crate::config::{core_config, core_public_key};
mod alert;
mod api;
mod auth;
mod cloud;
mod config;
mod connection;
mod helpers;
mod listener;
mod monitor;
mod network;
mod periphery;
mod permission;
mod resource;
mod schedule;
@@ -36,12 +40,6 @@ async fn app() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
let config = core_config();
logger::init(&config.logging)?;
if let Err(e) =
rustls::crypto::aws_lc_rs::default_provider().install_default()
{
error!("Failed to install default crypto provider | {e:?}");
std::process::exit(1);
};
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
@@ -55,6 +53,13 @@ async fn app() -> anyhow::Result<()> {
(false, false) => info!("{:?}", config.sanitized()),
}
// Init + log public key. Will crash if invalid private key here.
info!("Public Key: {}", core_public_key());
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("Failed to install default crypto provider");
// Init jwt client to crash on failure
state::jwt_client();
tokio::join!(
@@ -122,9 +127,6 @@ async fn app() -> anyhow::Result<()> {
if config.ssl_enabled {
info!("🔒 Core SSL Enabled");
rustls::crypto::ring::default_provider()
.install_default()
.expect("failed to install default rustls CryptoProvider");
info!("Komodo Core starting on https://{socket_addr}");
let ssl_config = RustlsConfig::from_pem_file(
&config.ssl_cert_file,

View File

@@ -20,7 +20,7 @@ pub async fn alert_deployments(
) {
let mut alerts = Vec::<Alert>::new();
let action_states = action_states();
for status in deployment_status_cache().get_list().await {
for status in deployment_status_cache().get_values().await {
// Don't alert if prev None
let Some(prev) = status.prev else {
continue;

View File

@@ -78,7 +78,7 @@ pub async fn alert_servers(
ts: i64,
mut servers: HashMap<String, Server>,
) {
let server_statuses = server_status_cache().get_list().await;
let server_statuses = server_status_cache().get_values().await;
let (open_alerts, open_disk_alerts) = match get_open_alerts().await
{
@@ -182,17 +182,24 @@ pub async fn alert_servers(
// SERVER VERSION MISMATCH
// ===================
let core_version = env!("CARGO_PKG_VERSION");
let has_version_mismatch = server_status.state == ServerState::Ok
&& !server_status.version.is_empty()
&& server_status.version != "Unknown"
&& server_status.version != core_version;
let mismatched_server_version =
if server_status.state != ServerState::Ok {
None
} else if let Some(version) =
server_status.periphery_info.as_ref().map(|i| &i.version)
&& version != core_version
{
Some(version)
} else {
None
};
let version_alert = server_alerts.as_ref().and_then(|alerts| {
alerts.get(&AlertDataVariant::ServerVersionMismatch)
});
match (has_version_mismatch, version_alert) {
(true, None) => {
match (mismatched_server_version, version_alert) {
(Some(version), None) => {
// Only open version mismatch alert if not in maintenance and buffer is ready
if !in_maintenance
&& buffer.ready_to_open(
@@ -211,7 +218,7 @@ pub async fn alert_servers(
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.config.region),
server_version: server_status.version.clone(),
server_version: version.clone(),
core_version: core_version.to_string(),
},
};
@@ -220,27 +227,27 @@ pub async fn alert_servers(
.push((alert, server.config.send_version_mismatch_alerts))
}
}
(true, Some(alert)) => {
(Some(version), Some(alert)) => {
// Update existing alert with current version info
let mut alert = alert.clone();
alert.data = AlertData::ServerVersionMismatch {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.config.region),
server_version: server_status.version.clone(),
server_version: version.clone(),
core_version: core_version.to_string(),
};
// Don't send notification for updates
alerts_to_update.push((alert, false));
}
(false, Some(alert)) => {
(None, Some(alert)) => {
// Version is now correct, close the alert
alert_ids_to_close.push((
alert.clone(),
server.config.send_version_mismatch_alerts,
));
}
(false, None) => {
(None, None) => {
// Reset buffer state when no mismatch and no alert
buffer.reset(
server_status.id.clone(),
@@ -282,7 +289,7 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
@@ -304,7 +311,7 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
@@ -320,7 +327,7 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
@@ -361,12 +368,12 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_total_gb)
.unwrap_or(0.0),
used_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
@@ -388,12 +395,12 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_total_gb)
.unwrap_or(0.0),
used_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
@@ -409,12 +416,12 @@ pub async fn alert_servers(
name: server.name.clone(),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_total_gb)
.unwrap_or(0.0),
used_gb: server_status
.stats
.system_stats
.as_ref()
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
@@ -452,7 +459,7 @@ pub async fn alert_servers(
)
{
let disk =
server_status.stats.as_ref().and_then(|stats| {
server_status.system_stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
let alert = Alert {
@@ -487,7 +494,7 @@ pub async fn alert_servers(
// modify alert level only if it has increased and not in maintenance
if !in_maintenance && health.level < alert.level {
let disk =
server_status.stats.as_ref().and_then(|stats| {
server_status.system_stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = health.level;
@@ -505,9 +512,10 @@ pub async fn alert_servers(
}
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
let disk =
server_status.system_stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = health.level;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),

View File

@@ -19,7 +19,7 @@ pub async fn alert_stacks(
) {
let action_states = action_states();
let mut alerts = Vec::<Alert>::new();
for status in stack_status_cache().get_list().await {
for status in stack_status_cache().get_values().await {
// Don't alert if prev None
let Some(prev) = status.prev else {
continue;

View File

@@ -7,11 +7,11 @@ use komodo_client::entities::{
},
repo::Repo,
server::{
Server, ServerConfig, ServerHealth, ServerHealthState,
ServerState,
PeripheryInformation, Server, ServerConfig, ServerHealth,
ServerHealthState, ServerState,
},
stack::{ComposeProject, Stack, StackState},
stats::{SingleDiskUsage, SystemStats},
stats::{SingleDiskUsage, SystemInformation, SystemStats},
};
use serror::Serror;
@@ -103,20 +103,23 @@ type DockerLists = (
pub async fn insert_server_status(
server: &Server,
state: ServerState,
version: String,
stats: Option<SystemStats>,
periphery_info: Option<PeripheryInformation>,
system_info: Option<SystemInformation>,
system_stats: Option<SystemStats>,
(containers, networks, images, volumes, projects): DockerLists,
err: impl Into<Option<Serror>>,
) {
let health = stats.as_ref().map(|s| get_server_health(server, s));
let health =
system_stats.as_ref().map(|s| get_server_health(server, s));
server_status_cache()
.insert(
server.id.clone(),
CachedServerStatus {
id: server.id.clone(),
state,
version,
stats,
periphery_info,
system_info,
system_stats,
health,
containers,
networks,

View File

@@ -1,51 +0,0 @@
use komodo_client::entities::{
docker::{
container::ContainerListItem, image::ImageListItem,
network::NetworkListItem, volume::VolumeListItem,
},
stack::ComposeProject,
};
use periphery_client::{
PeripheryClient,
api::{GetDockerLists, GetDockerListsResponse},
};
pub async fn get_docker_lists(
periphery: &PeripheryClient,
) -> anyhow::Result<(
Vec<ContainerListItem>,
Vec<NetworkListItem>,
Vec<ImageListItem>,
Vec<VolumeListItem>,
Vec<ComposeProject>,
)> {
let GetDockerListsResponse {
containers,
networks,
images,
volumes,
projects,
} = periphery.request(GetDockerLists {}).await?;
// TODO: handle the errors
let (
mut containers,
mut networks,
mut images,
mut volumes,
mut projects,
) = (
containers.unwrap_or_default(),
networks.unwrap_or_default(),
images.unwrap_or_default(),
volumes.unwrap_or_default(),
projects.unwrap_or_default(),
);
containers.sort_by(|a, b| a.name.cmp(&b.name));
networks.sort_by(|a, b| a.name.cmp(&b.name));
images.sort_by(|a, b| a.name.cmp(&b.name));
volumes.sort_by(|a, b| a.name.cmp(&b.name));
projects.sort_by(|a, b| a.name.cmp(&b.name));
Ok((containers, networks, images, volumes, projects))
}

View File

@@ -1,29 +1,37 @@
use std::sync::{Arc, OnceLock};
use async_timing_util::wait_until_timelength;
use cache::CloneCache;
use database::mungos::{find::find_collect, mongodb::bson::doc};
use futures::future::join_all;
use helpers::insert_stacks_status_unknown;
use komodo_client::entities::{
deployment::DeploymentState,
build::Build,
deployment::{Deployment, DeploymentState},
docker::{
container::ContainerListItem, image::ImageListItem,
network::NetworkListItem, volume::VolumeListItem,
},
komodo_timestamp, optional_string,
server::{Server, ServerHealth, ServerState},
stack::{ComposeProject, StackService, StackState},
stats::SystemStats,
repo::Repo,
server::{PeripheryInformation, Server, ServerHealth, ServerState},
stack::{ComposeProject, Stack, StackService, StackState},
stats::{SystemInformation, SystemStats},
};
use periphery_client::api::{
self, PollStatusResponse, git::GetLatestCommit,
};
use periphery_client::api::{self, git::GetLatestCommit};
use serror::Serror;
use tokio::sync::Mutex;
use crate::{
config::core_config,
helpers::{cache::Cache, periphery_client},
helpers::periphery_client,
monitor::{alert::check_alerts, record::record_server_stats},
state::{db_client, deployment_status_cache, repo_status_cache},
state::{
db_client, deployment_status_cache, periphery_connections,
repo_status_cache,
},
};
use self::helpers::{
@@ -33,7 +41,6 @@ use self::helpers::{
mod alert;
mod helpers;
mod lists;
mod record;
mod resources;
@@ -47,9 +54,10 @@ pub struct History<Curr: Default, Prev> {
pub struct CachedServerStatus {
pub id: String,
pub state: ServerState,
pub version: String,
pub stats: Option<SystemStats>,
pub health: Option<ServerHealth>,
pub periphery_info: Option<PeripheryInformation>,
pub system_info: Option<SystemInformation>,
pub system_stats: Option<SystemStats>,
pub containers: Option<Vec<ContainerListItem>>,
pub networks: Option<Vec<NetworkListItem>>,
pub images: Option<Vec<ImageListItem>>,
@@ -122,8 +130,8 @@ async fn refresh_server_cache(ts: i64) {
/// Makes sure cache for server doesn't update too frequently / simultaneously.
/// If forced, will still block against simultaneous update.
fn update_cache_for_server_controller()
-> &'static Cache<String, Arc<Mutex<i64>>> {
static CACHE: OnceLock<Cache<String, Arc<Mutex<i64>>>> =
-> &'static CloneCache<String, Arc<Mutex<i64>>> {
static CACHE: OnceLock<CloneCache<String, Arc<Mutex<i64>>>> =
OnceLock::new();
CACHE.get_or_init(Default::default)
}
@@ -154,64 +162,34 @@ pub async fn update_cache_for_server(server: &Server, force: bool) {
*lock = now;
let (deployments, builds, repos, stacks) = tokio::join!(
find_collect(
&db_client().deployments,
doc! { "config.server_id": &server.id },
None,
),
find_collect(&db_client().builds, doc! {}, None,),
find_collect(
&db_client().repos,
doc! { "config.server_id": &server.id },
None,
),
find_collect(
&db_client().stacks,
doc! { "config.server_id": &server.id },
None,
)
);
let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let builds = builds.inspect_err(|e| error!("failed to get builds list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
let resources = UpdateCacheResources::load(server).await;
// Handle server disabled
if !server.config.enabled {
insert_deployments_status_unknown(deployments).await;
insert_stacks_status_unknown(stacks).await;
insert_repos_status_unknown(repos).await;
resources.insert_status_unknown().await;
insert_server_status(
server,
ServerState::Disabled,
String::from("unknown"),
None,
None,
None,
(None, None, None, None, None),
None,
)
.await;
periphery_connections().remove(&server.id).await;
return;
}
let Ok(periphery) = periphery_client(server) else {
error!(
"somehow periphery not ok to create. should not be reached."
);
return;
};
let version = match periphery.request(api::GetVersion {}).await {
Ok(version) => version.version,
let periphery = match periphery_client(server).await {
Ok(periphery) => periphery,
Err(e) => {
insert_deployments_status_unknown(deployments).await;
insert_stacks_status_unknown(stacks).await;
insert_repos_status_unknown(repos).await;
resources.insert_status_unknown().await;
insert_server_status(
server,
ServerState::NotOk,
String::from("Unknown"),
None,
None,
None,
(None, None, None, None, None),
Serror::from(&e),
@@ -221,82 +199,75 @@ pub async fn update_cache_for_server(server: &Server, force: bool) {
}
};
let stats = if server.config.stats_monitoring {
match periphery.request(api::stats::GetSystemStats {}).await {
Ok(stats) => Some(filter_volumes(server, stats)),
Err(e) => {
insert_deployments_status_unknown(deployments).await;
insert_stacks_status_unknown(stacks).await;
insert_repos_status_unknown(repos).await;
insert_server_status(
server,
ServerState::NotOk,
String::from("unknown"),
None,
(None, None, None, None, None),
Serror::from(&e),
)
.await;
return;
}
let PollStatusResponse {
periphery_info,
system_info,
system_stats,
mut containers,
networks,
images,
volumes,
projects,
} = match periphery
.request(api::PollStatus {
include_stats: server.config.stats_monitoring,
})
.await
{
Ok(info) => info,
Err(e) => {
resources.insert_status_unknown().await;
insert_server_status(
server,
ServerState::NotOk,
None,
None,
None,
(None, None, None, None, None),
Serror::from(&e),
)
.await;
return;
}
} else {
None
};
match lists::get_docker_lists(&periphery).await {
Ok((mut containers, networks, images, volumes, projects)) => {
containers.iter_mut().for_each(|container| {
container.server_id = Some(server.id.clone())
});
tokio::join!(
resources::update_deployment_cache(
server.name.clone(),
deployments,
&containers,
&images,
&builds,
),
resources::update_stack_cache(
server.name.clone(),
stacks,
&containers,
&images
),
);
insert_server_status(
server,
ServerState::Ok,
version,
stats,
(
Some(containers.clone()),
Some(networks),
Some(images),
Some(volumes),
Some(projects),
),
None,
)
.await;
}
Err(e) => {
insert_deployments_status_unknown(deployments).await;
insert_stacks_status_unknown(stacks).await;
insert_server_status(
server,
ServerState::Ok,
version,
stats,
(None, None, None, None, None),
Some(e.into()),
)
.await;
}
}
containers.iter_mut().for_each(|container| {
container.server_id = Some(server.id.clone())
});
tokio::join!(
resources::update_deployment_cache(
server.name.clone(),
resources.deployments,
&containers,
&images,
&resources.builds,
),
resources::update_stack_cache(
server.name.clone(),
resources.stacks,
&containers,
&images
),
);
insert_server_status(
server,
ServerState::Ok,
Some(periphery_info),
Some(system_info),
system_stats.map(|stats| filter_volumes(server, stats)),
(
Some(containers.clone()),
Some(networks),
Some(images),
Some(volumes),
Some(projects),
),
None,
)
.await;
let status_cache = repo_status_cache();
for repo in repos {
for repo in resources.repos {
let (latest_hash, latest_message) = periphery
.request(GetLatestCommit {
name: repo.name.clone(),
@@ -320,6 +291,54 @@ pub async fn update_cache_for_server(server: &Server, force: bool) {
}
}
struct UpdateCacheResources {
stacks: Vec<Stack>,
deployments: Vec<Deployment>,
builds: Vec<Build>,
repos: Vec<Repo>,
}
impl UpdateCacheResources {
pub async fn load(server: &Server) -> Self {
let (stacks, deployments, builds, repos) = tokio::join!(
find_collect(
&db_client().stacks,
doc! { "config.server_id": &server.id },
None,
),
find_collect(
&db_client().deployments,
doc! { "config.server_id": &server.id },
None,
),
find_collect(&db_client().builds, doc! {}, None,),
find_collect(
&db_client().repos,
doc! { "config.server_id": &server.id },
None,
),
);
let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let builds = builds.inspect_err(|e| error!("failed to get builds list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
Self {
stacks,
deployments,
builds,
repos,
}
}
pub async fn insert_status_unknown(self) {
insert_stacks_status_unknown(self.stacks).await;
insert_deployments_status_unknown(self.deployments).await;
insert_repos_status_unknown(self.repos).await;
}
}
fn filter_volumes(
server: &Server,
mut stats: SystemStats,

View File

@@ -6,11 +6,11 @@ use crate::state::{db_client, server_status_cache};
#[instrument(level = "debug")]
pub async fn record_server_stats(ts: i64) {
let status = server_status_cache().get_list().await;
let status = server_status_cache().get_values().await;
let records = status
.into_iter()
.filter_map(|status| {
let stats = status.stats.as_ref()?;
let stats = status.system_stats.as_ref()?;
let TotalDiskUsage {
used_gb: disk_used_gb,

View File

@@ -0,0 +1,205 @@
use std::{sync::Arc, time::Duration};
use anyhow::{Context, anyhow};
use bytes::Bytes;
use cache::CloneCache;
use periphery_client::api;
use resolver_api::HasResponse;
use serde::{Serialize, de::DeserializeOwned};
use serde_json::json;
use serror::deserialize_error_bytes;
use tokio::sync::mpsc::{self, Sender};
use tracing::warn;
use transport::{
MessageState,
bytes::{from_transport_bytes, to_transport_bytes},
};
use uuid::Uuid;
use crate::{
connection::{PeripheryConnection, PeripheryConnectionArgs},
state::periphery_connections,
};
pub mod terminal;
pub type ConnectionChannels = CloneCache<Uuid, Sender<Bytes>>;
#[derive(Debug)]
pub struct PeripheryClient {
pub id: String,
channels: Arc<ConnectionChannels>,
}
impl PeripheryClient {
pub async fn new(
args: PeripheryConnectionArgs<'_>,
insecure_tls: bool,
// deprecated.
passkey: &str,
) -> anyhow::Result<PeripheryClient> {
let connections = periphery_connections();
let id = args.id.to_string();
// Spawn client side connection if one doesn't exist.
let Some(connection) = connections.get(&id).await else {
if args.address.is_none() {
return Err(anyhow!("Server {id} is not connected"));
}
let channels = args
.spawn_client_connection(
id.clone(),
insecure_tls,
passkey.to_string(),
)
.await?;
return Ok(PeripheryClient { id, channels });
};
// Ensure the connection args are unchanged.
if args.matches(&connection.args) {
return Ok(PeripheryClient {
id,
channels: connection.channels.clone(),
});
}
// The args have changed.
if args.address.is_none() {
// Periphery -> Core connection
// Remove this connection, wait and see if client reconnects
connections.remove(&id).await;
tokio::time::sleep(Duration::from_millis(500)).await;
let connection = connections
.get(&id)
.await
.with_context(|| format!("Server {id} is not connected"))?;
Ok(PeripheryClient {
id,
channels: connection.channels.clone(),
})
} else {
// Core -> Periphery connection
let channels = args
.spawn_client_connection(
id.clone(),
insecure_tls,
passkey.to_string(),
)
.await?;
Ok(PeripheryClient { id, channels })
}
}
pub async fn cleanup(self) -> Option<Arc<PeripheryConnection>> {
periphery_connections().remove(&self.id).await
}
#[tracing::instrument(level = "debug", skip(self))]
pub async fn health_check(&self) -> anyhow::Result<()> {
self.request(api::GetHealth {}).await?;
Ok(())
}
#[tracing::instrument(
name = "PeripheryRequest",
skip(self),
level = "debug"
)]
pub async fn request<T>(
&self,
request: T,
) -> anyhow::Result<T::Response>
where
T: std::fmt::Debug + Serialize + HasResponse,
T::Response: DeserializeOwned,
{
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
// Polls connected 3 times before bailing
connection.bail_if_not_connected().await?;
let id = Uuid::new_v4();
let (response_sender, mut response_receiever) =
mpsc::channel(1000);
self.channels.insert(id, response_sender).await;
let req_type = T::req_type();
let data = serde_json::to_vec(&json!({
"type": req_type,
"params": request
}))
.context("Failed to serialize request to bytes")?;
if let Err(e) = connection
.send(to_transport_bytes(data, id, MessageState::Request))
.await
.context("Failed to send request over channel")
{
// cleanup
self.channels.remove(&id).await;
return Err(e);
}
// Poll for the associated response
loop {
let next = tokio::select! {
msg = response_receiever.recv() => msg,
// Periphery will send InProgress every 5s to avoid timeout
_ = tokio::time::sleep(Duration::from_secs(10)) => {
return Err(anyhow!("Response timed out"));
}
};
let bytes = match next {
Some(bytes) => bytes,
None => {
return Err(anyhow!(
"Sender dropped before response was recieved"
));
}
};
let (state, data) = match from_transport_bytes(bytes) {
Ok((data, _, state)) if !data.is_empty() => (state, data),
// Ignore no data cases
Ok(_) => continue,
Err(e) => {
warn!(
"Server {} | Received invalid message | {e:#}",
self.id
);
continue;
}
};
match state {
// TODO: improve the allocation in .to_vec
MessageState::Successful => {
// cleanup
self.channels.remove(&id).await;
return serde_json::from_slice(&data)
.context("Failed to parse successful response");
}
MessageState::Failed => {
// cleanup
self.channels.remove(&id).await;
return Err(deserialize_error_bytes(&data));
}
MessageState::InProgress => continue,
// Shouldn't be received by this receiver
other => {
// TODO: delete log
warn!(
"Server {} | Got other message over over response channel: {other:?}",
self.id
);
continue;
}
}
}
}
}

View File

@@ -0,0 +1,210 @@
use std::{
pin::Pin,
sync::Arc,
task::{self, Poll},
};
use anyhow::Context;
use bytes::Bytes;
use cache::CloneCache;
use futures::Stream;
use periphery_client::api::terminal::{
ConnectContainerExec, ConnectTerminal, END_OF_OUTPUT,
ExecuteContainerExec, ExecuteTerminal,
};
use tokio::sync::mpsc::{Receiver, Sender, channel};
use transport::bytes::data_from_transport_bytes;
use uuid::Uuid;
use crate::{
periphery::PeripheryClient, state::periphery_connections,
};
impl PeripheryClient {
pub async fn connect_terminal(
&self,
terminal: String,
) -> anyhow::Result<(Uuid, Sender<Bytes>, Receiver<Bytes>)> {
tracing::trace!(
"request | type: ConnectTerminal | terminal name: {terminal}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let id = self
.request(ConnectTerminal { terminal })
.await
.context("Failed to create terminal connection")?;
let (sender, receiever) = channel(1024);
connection.channels.insert(id, sender).await;
Ok((id, connection.sender.clone(), receiever))
}
pub async fn connect_container_exec(
&self,
container: String,
shell: String,
) -> anyhow::Result<(Uuid, Sender<Bytes>, Receiver<Bytes>)> {
tracing::trace!(
"request | type: ConnectContainerExec | container name: {container} | shell: {shell}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let id = self
.request(ConnectContainerExec { container, shell })
.await
.context("Failed to create container exec connection")?;
let (sender, receiever) = channel(1000);
connection.channels.insert(id, sender).await;
Ok((id, connection.sender.clone(), receiever))
}
/// Executes command on specified terminal,
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
/// sentinal value as the expected final line of the stream.
///
/// Example final line:
/// ```text
/// __KOMODO_EXIT_CODE:0
/// ```
///
/// This means the command exited with code 0 (success).
///
/// If this value is NOT the final item before stream closes, it means
/// the terminal exited mid command, before giving status. Example: running `exit`.
#[tracing::instrument(level = "debug", skip(self))]
pub async fn execute_terminal(
&self,
terminal: String,
command: String,
) -> anyhow::Result<
impl Stream<Item = anyhow::Result<Bytes>> + 'static,
> {
tracing::trace!(
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let id = self
.request(ExecuteTerminal { terminal, command })
.await
.context("Failed to create execute terminal connection")?;
let (sender, receiver) = channel(1000);
connection.channels.insert(id, sender).await;
Ok(ReceiverStream {
id,
receiver,
channels: connection.channels.clone(),
})
}
/// Executes command on specified container,
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
/// sentinal value as the expected final line of the stream.
///
/// Example final line:
/// ```text
/// __KOMODO_EXIT_CODE:0
/// ```
///
/// This means the command exited with code 0 (success).
///
/// If this value is NOT the final item before stream closes, it means
/// the container shell exited mid command, before giving status. Example: running `exit`.
#[tracing::instrument(level = "debug", skip(self))]
pub async fn execute_container_exec(
&self,
container: String,
shell: String,
command: String,
) -> anyhow::Result<ReceiverStream> {
tracing::trace!(
"sending request | type: ExecuteContainerExec | container: {container} | shell: {shell} | command: {command}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let id = self
.request(ExecuteContainerExec {
container,
shell,
command,
})
.await
.context("Failed to create execute terminal connection")?;
let (sender, receiver) = channel(1000);
connection.channels.insert(id, sender).await;
Ok(ReceiverStream {
id,
receiver,
channels: connection.channels.clone(),
})
}
}
pub struct ReceiverStream {
id: Uuid,
channels: Arc<CloneCache<Uuid, Sender<Bytes>>>,
receiver: Receiver<Bytes>,
}
impl Stream for ReceiverStream {
type Item = anyhow::Result<Bytes>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Self::Item>> {
match self
.receiver
.poll_recv(cx)
.map(|bytes| bytes.map(data_from_transport_bytes))
{
Poll::Ready(Some(Ok(bytes))) if bytes == END_OF_OUTPUT => {
self.cleanup();
Poll::Ready(None)
}
Poll::Ready(Some(Ok(bytes))) => Poll::Ready(Some(Ok(bytes))),
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => {
self.cleanup();
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
}
}
}
impl ReceiverStream {
fn cleanup(&self) {
// Not the prettiest but it should be fine
let channels = self.channels.clone();
let id = self.id;
tokio::spawn(async move {
channels.remove(&id).await;
});
}
}

View File

@@ -267,7 +267,7 @@ impl super::KomodoResource for Deployment {
);
return Ok(());
}
let periphery = match periphery_client(&server) {
let periphery = match periphery_client(&server).await {
Ok(periphery) => periphery,
Err(e) => {
// This case won't ever happen, as periphery_client only fallible if the server is disabled.

View File

@@ -79,6 +79,7 @@ pub use refresh::{
pub use repo::{
refresh_repo_state_cache, spawn_repo_state_refresh_loop,
};
pub use server::{rotate_server_keys, update_server_public_key};
/// Implement on each Komodo resource for common methods
pub trait KomodoResource {
@@ -240,10 +241,10 @@ pub async fn get<T: KomodoResource>(
T::coll()
.find_one(id_or_name_filter(id_or_name))
.await
.context("failed to query db for resource")?
.context("Failed to query db for resource")?
.with_context(|| {
format!(
"did not find any {} matching {id_or_name}",
"Did not find any {} matching {id_or_name}",
T::resource_type()
)
})
@@ -396,7 +397,7 @@ pub async fn list_full_for_user_using_document<T: KomodoResource>(
)
.await
.with_context(|| {
format!("failed to pull {}s from mongo", T::resource_type())
format!("Failed to pull {}s from mongo", T::resource_type())
})
}
@@ -416,7 +417,7 @@ pub async fn get_id_to_resource_map<T: KomodoResource>(
let res = find_collect(T::coll(), None, None)
.await
.with_context(|| {
format!("failed to pull {}s from mongo", T::resource_type())
format!("Failed to pull {}s from mongo", T::resource_type())
})?
.into_iter()
.filter(|resource| {
@@ -459,6 +460,7 @@ pub async fn get_id_to_resource_map<T: KomodoResource>(
pub async fn create<T: KomodoResource>(
name: &str,
mut config: T::PartialConfig,
info: Option<T::Info>,
user: &User,
) -> serror::Result<Resource<T::Config, T::Info>> {
if !T::user_can_create(user) {
@@ -517,7 +519,11 @@ pub async fn create<T: KomodoResource>(
template: Default::default(),
tags: Default::default(),
config: config.into(),
info: T::default_info().await?,
info: if let Some(info) = info {
info
} else {
T::default_info().await?
},
base_permission: PermissionLevel::None.into(),
updated_at: start_ts,
};
@@ -526,11 +532,11 @@ pub async fn create<T: KomodoResource>(
.insert_one(&resource)
.await
.with_context(|| {
format!("failed to add {} to db", T::resource_type())
format!("Failed to add {} to db", T::resource_type())
})?
.inserted_id
.as_object_id()
.context("inserted_id is not ObjectId")?
.context("Inserted_id is not ObjectId")?
.to_string();
let resource = get::<T>(&resource_id).await?;
@@ -547,18 +553,18 @@ pub async fn create<T: KomodoResource>(
let mut update = make_update(target, T::create_operation(), user);
update.start_ts = start_ts;
update.push_simple_log(
&format!("create {}", T::resource_type()),
&format!("Create {}", T::resource_type()),
format!(
"created {}\nid: {}\nname: {}",
"Created {}\nid: {}\nname: {}",
T::resource_type(),
resource.id,
resource.name
),
);
update.push_simple_log(
"config",
"Config",
serde_json::to_string_pretty(&resource.config)
.context("failed to serialize resource config to JSON")?,
.context("Failed to serialize resource config to JSON")?,
);
T::post_create(&resource, &mut update).await?;
@@ -754,7 +760,7 @@ pub async fn remove_tag_from_all<T: KomodoResource>(
T::coll()
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
.await
.context("failed to remove tag from resources")?;
.context("Failed to remove tag from resources")?;
Ok(())
}
@@ -871,7 +877,7 @@ pub async fn delete<T: KomodoResource>(
async {
if let Err(e) = T::post_delete(&resource, &mut update).await {
update
.push_error_log("post delete", format_serror(&e.into()));
.push_error_log("Post delete", format_serror(&e.into()));
}
},
delete_from_alerters::<T>(&resource.id)
@@ -944,7 +950,7 @@ where
.await
{
warn!(
"failed to delete_many permissions matching target {target:?} | {e:#}"
"Failed to delete_many permissions matching target {target:?} | {e:#}"
);
}
}
@@ -979,7 +985,7 @@ where
},
)
.await
.context("failed to remove resource from users recently viewed")
.context("Failed to remove resource from users recently viewed")
{
warn!("{e:#}");
}

View File

@@ -774,6 +774,13 @@ async fn validate_config(
));
}
}
Execution::RotateAllServerKeys(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot trigger rotate all server keys"
));
}
}
Execution::Sleep(_) => {}
}
}

View File

@@ -169,7 +169,7 @@ impl super::KomodoResource for Repo {
}
let server = super::get::<Server>(&repo.config.server_id).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
match periphery
.request(DeleteRepo {

View File

@@ -1,30 +1,41 @@
use std::str::FromStr;
use anyhow::Context;
use database::mungos::mongodb::{Collection, bson::doc};
use database::mungos::mongodb::{
Collection,
bson::{doc, oid::ObjectId},
};
use indexmap::IndexSet;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant, komodo_timestamp,
optional_string,
permission::SpecificPermission,
resource::Resource,
server::{
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
ServerListItem, ServerListItemInfo, ServerQuerySpecifics,
ServerInfo, ServerListItem, ServerListItemInfo,
ServerQuerySpecifics,
},
update::Update,
user::User,
};
use periphery_client::api;
use crate::{
config::core_config,
helpers::query::get_system_info,
helpers::periphery_client,
monitor::update_cache_for_server,
state::{action_states, db_client, server_status_cache},
state::{
action_states, db_client, periphery_connections,
server_status_cache,
},
};
impl super::KomodoResource for Server {
type Config = ServerConfig;
type PartialConfig = PartialServerConfig;
type ConfigDiff = ServerConfigDiff;
type Info = ();
type Info = ServerInfo;
type ListItem = ServerListItem;
type QuerySpecifics = ServerQuerySpecifics;
@@ -57,11 +68,21 @@ impl super::KomodoResource for Server {
server: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let status = server_status_cache().get(&server.id).await;
let (terminals_disabled, container_exec_disabled) =
get_system_info(&server)
.await
.map(|i| (i.terminals_disabled, i.container_exec_disabled))
.unwrap_or((true, true));
let (
version,
public_key,
terminals_disabled,
container_exec_disabled,
) = match status.as_ref().and_then(|s| s.periphery_info.as_ref())
{
Some(info) => (
Some(info.version.clone()),
Some(info.public_key.clone()),
info.terminals_disabled,
info.container_exec_disabled,
),
None => (None, None, true, true),
};
ServerListItem {
name: server.name,
id: server.id,
@@ -70,12 +91,11 @@ impl super::KomodoResource for Server {
resource_type: ResourceTargetVariant::Server,
info: ServerListItemInfo {
state: status.as_ref().map(|s| s.state).unwrap_or_default(),
version: status
.map(|s| s.version.clone())
.unwrap_or(String::from("Unknown")),
region: server.config.region,
address: server.config.address,
external_address: server.config.external_address,
address: optional_string(server.config.address),
external_address: optional_string(
server.config.external_address,
),
send_unreachable_alerts: server
.config
.send_unreachable_alerts,
@@ -85,6 +105,11 @@ impl super::KomodoResource for Server {
send_version_mismatch_alerts: server
.config
.send_version_mismatch_alerts,
version,
public_key,
attempted_public_key: optional_string(
server.info.attempted_public_key,
),
terminals_disabled,
container_exec_disabled,
},
@@ -212,6 +237,14 @@ impl super::KomodoResource for Server {
.await
.context("failed to close deleted server alerts")?;
let _ = db_client()
.onboarding_keys
.update_many(
doc! { "onboarded": &id },
doc! { "$pull": { "onboarded": &id } },
)
.await;
Ok(())
}
@@ -219,7 +252,41 @@ impl super::KomodoResource for Server {
resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
server_status_cache().remove(&resource.id).await;
tokio::join!(
server_status_cache().remove(&resource.id),
periphery_connections().remove(&resource.id),
);
Ok(())
}
}
pub async fn update_server_public_key(
server_id: &str,
public_key: &str,
) -> anyhow::Result<()> {
db_client()
.servers
.update_one(
doc! { "_id": ObjectId::from_str(server_id)? },
doc! { "$set": { "info.public_key": public_key } },
)
.await
.context("Failed to update Server public key on database")?;
Ok(())
}
/// Rotates Periphery keys and updates
/// `server.info.public_key` to match new public key.
/// Does so without making a specific update.
pub async fn rotate_server_keys(
server: &Server,
) -> anyhow::Result<()> {
let periphery = periphery_client(&server).await?;
let public_key = periphery
.request(api::keys::RotatePrivateKey {})
.await
.context("Failed to rotate Periphery private key")?
.public_key;
update_server_public_key(&server.id, &public_key).await?;
Ok(())
}

View File

@@ -327,7 +327,7 @@ impl super::KomodoResource for Stack {
return Ok(());
}
let periphery = match periphery_client(&server) {
let periphery = match periphery_client(&server).await {
Ok(periphery) => periphery,
Err(e) => {
// This case won't ever happen, as periphery_client only fallible if the server is disabled.

View File

@@ -7,11 +7,12 @@ use komodo_client::{
user::User,
},
};
use periphery_client::{PeripheryClient, api::compose::*};
use periphery_client::api::compose::*;
use crate::{
helpers::{periphery_client, update::update_update},
monitor::update_cache_for_server,
periphery::PeripheryClient,
state::action_states,
};
@@ -55,7 +56,7 @@ pub async fn execute_compose<T: ExecuteCompose>(
// Send update here for frontend to recheck action state
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let periphery = periphery_client(&server).await?;
if !services.is_empty() {
update.logs.push(Log::simple(

View File

@@ -3,14 +3,17 @@ use std::str::FromStr;
use colored::Colorize;
use database::mungos::{
find::find_collect,
mongodb::bson::{Document, doc, oid::ObjectId, to_document},
mongodb::bson::{
Document, doc, oid::ObjectId, to_bson, to_document,
},
};
use futures::future::join_all;
use komodo_client::{
api::{
auth::SignUpLocalUser,
execute::{
BackupCoreDatabase, Execution, GlobalAutoUpdate, RunAction,
BackupCoreDatabase, Execution, GlobalAutoUpdate,
RotateAllServerKeys, RunAction,
},
write::{
CreateBuilder, CreateProcedure, CreateServer, CreateTag,
@@ -22,7 +25,7 @@ use komodo_client::{
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
procedure::{EnabledExecution, ProcedureConfig, ProcedureStage},
server::{PartialServerConfig, Server},
server::{PartialServerConfig, Server, ServerInfo},
sync::ResourceSync,
tag::TagColor,
update::Log,
@@ -106,9 +109,10 @@ pub async fn on_startup() {
tokio::join!(
in_progress_update_cleanup(),
open_alert_cleanup(),
clean_up_server_templates(),
ensure_first_server_and_builder(),
ensure_init_user_and_resources(),
clean_up_server_templates(),
init_server_info(),
);
}
@@ -198,51 +202,58 @@ async fn open_alert_cleanup() {
/// Ensures a default server / builder exists with the defined address
async fn ensure_first_server_and_builder() {
let config = core_config();
let Some(address) = config.first_server.clone() else {
if config.first_server_name.is_none()
&& config.first_server_address.is_none()
{
// If neither defined, early return
return;
};
}
// Maybe create first Server / Builder
let db = db_client();
let Ok(server) = db
.servers
.find_one(Document::new())
.await
.inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}"))
// If any Server exists, exit early.
let Ok(None) =
db.servers.find_one(Document::new()).await.inspect_err(|e| {
error!(
"Failed to initialize first Server. Failed to query db. {e:?}"
)
})
else {
return;
};
let server = if let Some(server) = server {
server
} else {
match (CreateServer {
name: config.first_server_name.clone(),
config: PartialServerConfig {
address: Some(address),
enabled: Some(true),
..Default::default()
},
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
Ok(server) => server,
Err(e) => {
error!(
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
e.error
);
return;
}
// Use the same name for Server and Builder
let name = config.first_server_name.as_deref().unwrap_or("Local");
let server = match (CreateServer {
name: name.to_string(),
config: PartialServerConfig {
address: config.first_server_address.clone(),
enabled: Some(true),
..Default::default()
},
public_key: None,
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
Ok(server) => server,
Err(e) => {
error!(
"Failed to initialize first Server. Failed to CreateServer. {:#}",
e.error
);
return;
}
};
// If any builder exists, exit early.
let Ok(None) = db.builders
.find_one(Document::new()).await
.inspect_err(|e| error!("Failed to initialize 'first_builder' | Failed to query db | {e:?}")) else {
return;
};
if let Err(e) = (CreateBuilder {
name: config.first_server_name.clone(),
// Same name as Server
name: name.to_string(),
config: PartialBuilderConfig::Server(
PartialServerBuilderConfig {
server_id: Some(server.id),
@@ -413,6 +424,58 @@ async fn ensure_init_user_and_resources() {
);
}
}.await;
// RotateAllServerKeys
async {
let Ok(config) = ProcedureConfig::builder()
.stages(vec![ProcedureStage {
name: String::from("Stage 1"),
enabled: true,
executions: vec![
EnabledExecution {
execution: Execution::RotateAllServerKeys(RotateAllServerKeys {}),
enabled: true
}
]
}])
.schedule(String::from("Every day at 06:00"))
.build()
.inspect_err(|e| error!("Failed to initialize Server key rotation Procedure | Failed to build Procedure | {e:?}")) else {
return;
};
let procedure = match (CreateProcedure {
name: String::from("Rotate Server Keys"),
config: config.into(),
})
.resolve(&write_args)
.await
{
Ok(procedure) => procedure,
Err(e) => {
error!(
"Failed to initialize Server key rotation Procedure | Failed to create Procedure | {:#}",
e.error
);
return;
}
};
if let Err(e) = (UpdateResourceMeta {
target: ResourceTarget::Procedure(procedure.id),
tags: Some(default_tags.clone()),
description: Some(String::from(
"Rotates all currently connected Server keys.",
)),
template: None,
})
.resolve(&write_args)
.await
{
warn!(
"Failed to update Server key rotation Procedure tags / description | {:#}",
e.error
);
}
}.await;
}
/// v1.17.5 removes the ServerTemplate resource.
@@ -457,3 +520,25 @@ async fn clean_up_server_templates() {
},
);
}
/// v2 adds ServerInfo to ServerSchema.
/// Need to ensure it is initialized from null.
async fn init_server_info() {
let default_server_info = match to_bson(&ServerInfo::default()) {
Ok(info) => info,
Err(e) => {
error!("Failed to serialize ServerInfo to bson | {e:?}");
return;
}
};
if let Err(e) = db_client()
.servers
.update_many(
doc! { "info": null },
doc! { "$set": { "info": default_server_info } },
)
.await
{
error!("Failed to migrate ServerInfo to v2 | {e:?}");
}
}

View File

@@ -5,7 +5,7 @@ use std::{
use anyhow::Context;
use arc_swap::ArcSwap;
use database::Client;
use cache::CloneCache;
use komodo_client::entities::{
action::ActionState,
build::BuildState,
@@ -22,9 +22,9 @@ use octorust::auth::{
use crate::{
auth::jwt::JwtClient,
config::core_config,
connection::PeripheryConnections,
helpers::{
action_state::ActionStates, all_resources::AllResourcesById,
cache::Cache,
},
monitor::{
CachedDeploymentStatus, CachedRepoStatus, CachedServerStatus,
@@ -32,9 +32,9 @@ use crate::{
},
};
static DB_CLIENT: OnceLock<Client> = OnceLock::new();
static DB_CLIENT: OnceLock<database::Client> = OnceLock::new();
pub fn db_client() -> &'static Client {
pub fn db_client() -> &'static database::Client {
DB_CLIENT
.get()
.expect("db_client accessed before initialized")
@@ -42,11 +42,13 @@ pub fn db_client() -> &'static Client {
/// Must be called in app startup sequence.
pub async fn init_db_client() {
let client = Client::new(&core_config().database)
let client = database::Client::new(&core_config().database)
.await
.context("failed to initialize database client")
.unwrap();
DB_CLIENT.set(client).expect("db_clint");
DB_CLIENT
.set(client)
.expect("db_client initialized more than once");
}
pub fn jwt_client() -> &'static JwtClient {
@@ -60,6 +62,94 @@ pub fn jwt_client() -> &'static JwtClient {
})
}
/// server id => connection
pub fn periphery_connections() -> &'static PeripheryConnections {
static CONNECTIONS: OnceLock<PeripheryConnections> =
OnceLock::new();
CONNECTIONS.get_or_init(Default::default)
}
pub fn action_states() -> &'static ActionStates {
static ACTION_STATES: OnceLock<ActionStates> = OnceLock::new();
ACTION_STATES.get_or_init(ActionStates::default)
}
pub type ServerStatusCache =
CloneCache<String, Arc<CachedServerStatus>>;
pub fn server_status_cache() -> &'static ServerStatusCache {
static SERVER_STATUS_CACHE: OnceLock<ServerStatusCache> =
OnceLock::new();
SERVER_STATUS_CACHE.get_or_init(Default::default)
}
pub type StackStatusCache =
CloneCache<String, Arc<History<CachedStackStatus, StackState>>>;
pub fn stack_status_cache() -> &'static StackStatusCache {
static STACK_STATUS_CACHE: OnceLock<StackStatusCache> =
OnceLock::new();
STACK_STATUS_CACHE.get_or_init(Default::default)
}
/// Cache of ids to status
pub type DeploymentStatusCache = CloneCache<
String,
Arc<History<CachedDeploymentStatus, DeploymentState>>,
>;
/// Cache of ids to status
pub fn deployment_status_cache() -> &'static DeploymentStatusCache {
static DEPLOYMENT_STATUS_CACHE: OnceLock<DeploymentStatusCache> =
OnceLock::new();
DEPLOYMENT_STATUS_CACHE.get_or_init(Default::default)
}
pub type BuildStateCache = CloneCache<String, BuildState>;
pub fn build_state_cache() -> &'static BuildStateCache {
static BUILD_STATE_CACHE: OnceLock<BuildStateCache> =
OnceLock::new();
BUILD_STATE_CACHE.get_or_init(Default::default)
}
pub type RepoStatusCache = CloneCache<String, Arc<CachedRepoStatus>>;
pub fn repo_status_cache() -> &'static RepoStatusCache {
static REPO_STATUS_CACHE: OnceLock<RepoStatusCache> =
OnceLock::new();
REPO_STATUS_CACHE.get_or_init(Default::default)
}
pub type RepoStateCache = CloneCache<String, RepoState>;
pub fn repo_state_cache() -> &'static RepoStateCache {
static REPO_STATE_CACHE: OnceLock<RepoStateCache> = OnceLock::new();
REPO_STATE_CACHE.get_or_init(Default::default)
}
pub type ProcedureStateCache = CloneCache<String, ProcedureState>;
pub fn procedure_state_cache() -> &'static ProcedureStateCache {
static PROCEDURE_STATE_CACHE: OnceLock<ProcedureStateCache> =
OnceLock::new();
PROCEDURE_STATE_CACHE.get_or_init(Default::default)
}
pub type ActionStateCache = CloneCache<String, ActionState>;
pub fn action_state_cache() -> &'static ActionStateCache {
static ACTION_STATE_CACHE: OnceLock<ActionStateCache> =
OnceLock::new();
ACTION_STATE_CACHE.get_or_init(Default::default)
}
pub fn all_resources_cache() -> &'static ArcSwap<AllResourcesById> {
static ALL_RESOURCES: OnceLock<ArcSwap<AllResourcesById>> =
OnceLock::new();
ALL_RESOURCES.get_or_init(Default::default)
}
pub fn github_client()
-> Option<&'static HashMap<String, octorust::Client>> {
static GITHUB_CLIENT: OnceLock<
@@ -129,83 +219,3 @@ pub fn github_client()
})
.as_ref()
}
pub fn action_states() -> &'static ActionStates {
static ACTION_STATES: OnceLock<ActionStates> = OnceLock::new();
ACTION_STATES.get_or_init(ActionStates::default)
}
/// Cache of ids to status
pub type DeploymentStatusCache = Cache<
String,
Arc<History<CachedDeploymentStatus, DeploymentState>>,
>;
/// Cache of ids to status
pub fn deployment_status_cache() -> &'static DeploymentStatusCache {
static DEPLOYMENT_STATUS_CACHE: OnceLock<DeploymentStatusCache> =
OnceLock::new();
DEPLOYMENT_STATUS_CACHE.get_or_init(Default::default)
}
pub type StackStatusCache =
Cache<String, Arc<History<CachedStackStatus, StackState>>>;
pub fn stack_status_cache() -> &'static StackStatusCache {
static STACK_STATUS_CACHE: OnceLock<StackStatusCache> =
OnceLock::new();
STACK_STATUS_CACHE.get_or_init(Default::default)
}
pub type ServerStatusCache = Cache<String, Arc<CachedServerStatus>>;
pub fn server_status_cache() -> &'static ServerStatusCache {
static SERVER_STATUS_CACHE: OnceLock<ServerStatusCache> =
OnceLock::new();
SERVER_STATUS_CACHE.get_or_init(Default::default)
}
pub type RepoStatusCache = Cache<String, Arc<CachedRepoStatus>>;
pub fn repo_status_cache() -> &'static RepoStatusCache {
static REPO_STATUS_CACHE: OnceLock<RepoStatusCache> =
OnceLock::new();
REPO_STATUS_CACHE.get_or_init(Default::default)
}
pub type BuildStateCache = Cache<String, BuildState>;
pub fn build_state_cache() -> &'static BuildStateCache {
static BUILD_STATE_CACHE: OnceLock<BuildStateCache> =
OnceLock::new();
BUILD_STATE_CACHE.get_or_init(Default::default)
}
pub type RepoStateCache = Cache<String, RepoState>;
pub fn repo_state_cache() -> &'static RepoStateCache {
static REPO_STATE_CACHE: OnceLock<RepoStateCache> = OnceLock::new();
REPO_STATE_CACHE.get_or_init(Default::default)
}
pub type ProcedureStateCache = Cache<String, ProcedureState>;
pub fn procedure_state_cache() -> &'static ProcedureStateCache {
static PROCEDURE_STATE_CACHE: OnceLock<ProcedureStateCache> =
OnceLock::new();
PROCEDURE_STATE_CACHE.get_or_init(Default::default)
}
pub type ActionStateCache = Cache<String, ActionState>;
pub fn action_state_cache() -> &'static ActionStateCache {
static ACTION_STATE_CACHE: OnceLock<ActionStateCache> =
OnceLock::new();
ACTION_STATE_CACHE.get_or_init(Default::default)
}
pub fn all_resources_cache() -> &'static ArcSwap<AllResourcesById> {
static ALL_RESOURCES: OnceLock<ArcSwap<AllResourcesById>> =
OnceLock::new();
ALL_RESOURCES.get_or_init(Default::default)
}

View File

@@ -144,6 +144,7 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
let id = match crate::resource::create::<Self>(
&resource.name,
resource.config,
None,
sync_user(),
)
.await

View File

@@ -692,6 +692,7 @@ impl ResourceSyncTrait for Procedure {
Execution::ClearRepoCache(_) => {}
Execution::BackupCoreDatabase(_) => {}
Execution::GlobalAutoUpdate(_) => {}
Execution::RotateAllServerKeys(_) => {}
Execution::Sleep(_) => {}
}
}
@@ -822,6 +823,7 @@ impl ExecuteResourceSync for Procedure {
let id = match crate::resource::create::<Procedure>(
&name,
resource.config.clone(),
None,
sync_user(),
)
.await

View File

@@ -810,7 +810,8 @@ impl ToToml for Procedure {
| Execution::Sleep(_)
| Execution::ClearRepoCache(_)
| Execution::BackupCoreDatabase(_)
| Execution::GlobalAutoUpdate(_) => {}
| Execution::GlobalAutoUpdate(_)
| Execution::RotateAllServerKeys(_) => {}
}
}
}

View File

@@ -21,7 +21,7 @@ pub async fn terminal(
) -> impl IntoResponse {
ws.on_upgrade(|socket| async move {
let Some((mut client_socket, user)) =
super::ws_login(socket).await
super::user_ws_login(socket).await
else {
return;
};

View File

@@ -22,7 +22,7 @@ pub async fn terminal(
) -> impl IntoResponse {
ws.on_upgrade(|socket| async move {
let Some((mut client_socket, user)) =
super::ws_login(socket).await
super::user_ws_login(socket).await
else {
return;
};

View File

@@ -1,23 +1,29 @@
use crate::{
auth::{auth_api_key_check_enabled, auth_jwt_check_enabled},
helpers::query::get_user,
periphery::PeripheryClient,
state::periphery_connections,
};
use anyhow::anyhow;
use axum::{
Router,
extract::ws::{CloseFrame, Message, Utf8Bytes, WebSocket},
extract::ws::{Message, WebSocket},
routing::get,
};
use bytes::Bytes;
use futures::{SinkExt, StreamExt};
use komodo_client::{
entities::{server::Server, user::User},
ws::WsLoginMessage,
};
use tokio::net::TcpStream;
use tokio_tungstenite::{
MaybeTlsStream, WebSocketStream, tungstenite,
};
use periphery_client::api::terminal::DisconnectTerminal;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio_util::sync::CancellationToken;
use transport::{
MessageState,
bytes::{data_from_transport_bytes, to_transport_bytes},
};
use uuid::Uuid;
mod container;
mod deployment;
@@ -27,6 +33,9 @@ mod update;
pub fn router() -> Router {
Router::new()
// Periphery facing
.route("/periphery", get(crate::connection::server::handler))
// User facing
.route("/update", get(update::handler))
.route("/terminal", get(terminal::handler))
.route("/container/terminal", get(container::terminal))
@@ -35,7 +44,7 @@ pub fn router() -> Router {
}
#[instrument(level = "debug")]
async fn ws_login(
async fn user_ws_login(
mut socket: WebSocket,
) -> Option<(WebSocket, User)> {
let login_msg = match socket.recv().await {
@@ -131,7 +140,8 @@ async fn handle_container_terminal(
container: String,
shell: String,
) {
let periphery = match crate::helpers::periphery_client(server) {
let periphery = match crate::helpers::periphery_client(server).await
{
Ok(periphery) => periphery,
Err(e) => {
debug!("couldn't get periphery | {e:#}");
@@ -145,34 +155,40 @@ async fn handle_container_terminal(
trace!("connecting to periphery container exec websocket");
let periphery_socket = match periphery
.connect_container_exec(container, shell)
.await
{
Ok(ws) => ws,
Err(e) => {
debug!(
"Failed connect to periphery container exec websocket | {e:#}"
);
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
let (periphery_connection_id, periphery_sender, periphery_receiver) =
match periphery.connect_container_exec(container, shell).await {
Ok(ws) => ws,
Err(e) => {
debug!(
"Failed connect to periphery container exec websocket | {e:#}"
);
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connected to periphery container exec websocket");
core_periphery_forward_ws(client_socket, periphery_socket).await
forward_ws_channel(
periphery,
client_socket,
periphery_connection_id,
periphery_sender,
periphery_receiver,
)
.await
}
async fn core_periphery_forward_ws(
async fn forward_ws_channel(
periphery: PeripheryClient,
client_socket: axum::extract::ws::WebSocket,
periphery_socket: WebSocketStream<MaybeTlsStream<TcpStream>>,
periphery_connection_id: Uuid,
periphery_sender: Sender<Bytes>,
mut periphery_receiver: Receiver<Bytes>,
) {
let (mut periphery_send, mut periphery_receive) =
periphery_socket.split();
let (mut core_send, mut core_receive) = client_socket.split();
let cancel = CancellationToken::new();
@@ -188,15 +204,42 @@ async fn core_periphery_forward_ws(
}
};
match res {
Some(Ok(msg)) => {
if let Err(e) =
periphery_send.send(axum_to_tungstenite(msg)).await
Some(Ok(Message::Binary(data))) => {
if let Err(e) = periphery_sender
.send(to_transport_bytes(
data.into(),
periphery_connection_id,
MessageState::Terminal,
))
.await
{
debug!("Failed to send terminal message | {e:?}",);
cancel.cancel();
break;
};
}
Some(Ok(Message::Text(data))) => {
let data: Bytes = data.into();
if let Err(e) = periphery_sender
.send(to_transport_bytes(
data.into(),
periphery_connection_id,
MessageState::Terminal,
))
.await
{
debug!("Failed to send terminal message | {e:?}",);
cancel.cancel();
break;
};
}
// TODO: Disconnect from periphery when client disconnects
Some(Ok(Message::Close(_frame))) => {
cancel.cancel();
break;
}
// Ignore
Some(Ok(_)) => {}
Some(Err(_e)) => {
cancel.cancel();
break;
@@ -212,31 +255,23 @@ async fn core_periphery_forward_ws(
let periphery_to_core = async {
loop {
let res = tokio::select! {
res = periphery_receive.next() => res,
res = periphery_receiver.recv() => res.map(data_from_transport_bytes),
_ = cancel.cancelled() => {
trace!("periphery to core read: cancelled from inside");
break;
}
};
match res {
Some(Ok(msg)) => {
if let Err(e) =
core_send.send(tungstenite_to_axum(msg)).await
Some(Ok(bytes)) => {
if let Err(e) = core_send.send(Message::Binary(bytes)).await
{
debug!("{e:?}");
cancel.cancel();
break;
};
}
Some(Err(e)) => {
let _ = core_send
.send(Message::text(format!(
"ERROR: Failed to receive message from periphery | {e:?}"
)))
.await;
cancel.cancel();
break;
}
// No data, ignore
Some(Err(_e)) => {}
None => {
let _ = core_send.send(Message::text("STREAM EOF")).await;
cancel.cancel();
@@ -247,44 +282,21 @@ async fn core_periphery_forward_ws(
};
tokio::join!(core_to_periphery, periphery_to_core);
}
fn axum_to_tungstenite(msg: Message) -> tungstenite::Message {
match msg {
Message::Text(text) => tungstenite::Message::Text(
// TODO: improve this conversion cost from axum ws library
tungstenite::Utf8Bytes::from(text.to_string()),
),
Message::Binary(bytes) => tungstenite::Message::Binary(bytes),
Message::Ping(bytes) => tungstenite::Message::Ping(bytes),
Message::Pong(bytes) => tungstenite::Message::Pong(bytes),
Message::Close(close_frame) => {
tungstenite::Message::Close(close_frame.map(|cf| {
tungstenite::protocol::CloseFrame {
code: cf.code.into(),
reason: tungstenite::Utf8Bytes::from(cf.reason.to_string()),
}
}))
}
}
}
fn tungstenite_to_axum(msg: tungstenite::Message) -> Message {
match msg {
tungstenite::Message::Text(text) => {
Message::Text(Utf8Bytes::from(text.to_string()))
}
tungstenite::Message::Binary(bytes) => Message::Binary(bytes),
tungstenite::Message::Ping(bytes) => Message::Ping(bytes),
tungstenite::Message::Pong(bytes) => Message::Pong(bytes),
tungstenite::Message::Close(close_frame) => {
Message::Close(close_frame.map(|cf| CloseFrame {
code: cf.code.into(),
reason: Utf8Bytes::from(cf.reason.to_string()),
}))
}
tungstenite::Message::Frame(_) => {
unreachable!()
}
// Cleanup
if let Err(e) = periphery
.request(DisconnectTerminal {
id: periphery_connection_id,
})
.await
{
warn!(
"Failed to disconnect Periphery terminal forwarding | {e:#}",
)
}
if let Some(connection) =
periphery_connections().get(&periphery.id).await
{
connection.channels.remove(&periphery_connection_id).await;
}
}

View File

@@ -26,7 +26,7 @@ pub async fn terminal(
) -> impl IntoResponse {
ws.on_upgrade(|socket| async move {
let Some((mut client_socket, user)) =
super::ws_login(socket).await
super::user_ws_login(socket).await
else {
return;
};

View File

@@ -10,7 +10,7 @@ use komodo_client::{
use crate::{
helpers::periphery_client, permission::get_check_permissions,
ws::core_periphery_forward_ws,
ws::forward_ws_channel,
};
#[instrument(name = "ConnectTerminal", skip(ws))]
@@ -22,7 +22,7 @@ pub async fn handler(
) -> impl IntoResponse {
ws.on_upgrade(|socket| async move {
let Some((mut client_socket, user)) =
super::ws_login(socket).await
super::user_ws_login(socket).await
else {
return;
};
@@ -45,7 +45,7 @@ pub async fn handler(
}
};
let periphery = match periphery_client(&server) {
let periphery = match periphery_client(&server).await {
Ok(periphery) => periphery,
Err(e) => {
debug!("couldn't get periphery | {e:#}");
@@ -59,21 +59,31 @@ pub async fn handler(
trace!("connecting to periphery terminal websocket");
let periphery_socket =
match periphery.connect_terminal(terminal).await {
Ok(ws) => ws,
Err(e) => {
debug!("Failed connect to periphery terminal | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
let (
periphery_connection_id,
periphery_sender,
periphery_receiver,
) = match periphery.connect_terminal(terminal).await {
Ok(ws) => ws,
Err(e) => {
debug!("Failed connect to periphery terminal | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connected to periphery terminal websocket");
core_periphery_forward_ws(client_socket, periphery_socket).await
forward_ws_channel(
periphery,
client_socket,
periphery_connection_id,
periphery_sender,
periphery_receiver,
)
.await
})
}

View File

@@ -23,7 +23,7 @@ pub async fn handler(ws: WebSocketUpgrade) -> impl IntoResponse {
// handle http -> ws updgrade
ws.on_upgrade(|socket| async move {
let Some((socket, user)) = super::ws_login(socket).await else {
let Some((socket, user)) = super::user_ws_login(socket).await else {
return
};

View File

@@ -21,11 +21,13 @@ environment_file.workspace = true
environment.workspace = true
interpolate.workspace = true
formatting.workspace = true
transport.workspace = true
response.workspace = true
command.workspace = true
config.workspace = true
logger.workspace = true
cache.workspace = true
noise.workspace = true
git.workspace = true
# mogh
serror = { workspace = true, features = ["axum"] }
@@ -34,12 +36,13 @@ derive_variants.workspace = true
resolver_api.workspace = true
run_command.workspace = true
# external
pin-project-lite.workspace = true
serde_yaml_ng.workspace = true
tokio-stream.workspace = true
portable-pty.workspace = true
shell-escape.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
serde_json.workspace = true
serde_yaml_ng.workspace = true
tokio-util.workspace = true
arc-swap.workspace = true
colored.workspace = true
@@ -57,5 +60,3 @@ axum.workspace = true
clap.workspace = true
envy.workspace = true
uuid.workspace = true
rand.workspace = true
shell-escape.workspace = true

View File

@@ -1,6 +1,6 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
FROM rust:1.89.0-bullseye AS builder
FROM rust:1.90.0-bullseye AS builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -24,8 +24,13 @@ COPY --from=builder /builder/target/release/periphery /usr/local/bin/periphery
EXPOSE 8120
# Can mount config file to /config/*config*.toml and it will be picked up.
ENV PERIPHERY_CONFIG_PATHS="/config"
# Change the default in container to /config/keys to match Core
ENV PERIPHERY_PRIVATE_KEY="file:/config/keys/periphery.key"
CMD [ "periphery" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Periphery"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -27,8 +27,13 @@ RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/periphery && rm -r /app/arch
EXPOSE 8120
# Can mount config file to /config/*config*.toml and it will be picked up.
ENV PERIPHERY_CONFIG_PATHS="/config"
# Change the default in container to /config/keys to match Core
ENV PERIPHERY_PRIVATE_KEY="file:/config/keys/periphery.key"
CMD [ "periphery" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Periphery"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -16,8 +16,13 @@ COPY --from=binaries /periphery /usr/local/bin/periphery
EXPOSE 8120
# Can mount config file to /config/*config*.toml and it will be picked up.
ENV PERIPHERY_CONFIG_PATHS="/config"
# Change the default in container to /config/keys to match Core
ENV PERIPHERY_PRIVATE_KEY="file:/config/keys/periphery.key"
CMD [ "periphery" ]
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
LABEL org.opencontainers.image.description="Komodo Periphery"
LABEL org.opencontainers.image.licenses=GPL-3.0
LABEL org.opencontainers.image.licenses="GPL-3.0"

View File

@@ -25,12 +25,14 @@ use resolver_api::Resolve;
use tokio::fs;
use crate::{
build::{parse_build_args, parse_secret_args, write_dockerfile},
config::periphery_config,
docker::docker_login,
helpers::{parse_extra_args, parse_labels},
helpers::{format_extra_args, format_labels},
};
mod helpers;
use helpers::*;
impl Resolve<super::Args> for GetDockerfileContentsOnHost {
#[instrument(name = "GetDockerfileContentsOnHost", level = "debug")]
async fn resolve(
@@ -278,11 +280,11 @@ impl Resolve<super::Args> for build::Build {
let command_secret_args =
parse_secret_args(&secret_args, &build_path).await?;
let labels = parse_labels(
let labels = format_labels(
&environment_vars_from_str(labels).context("Invalid labels")?,
);
let extra_args = parse_extra_args(extra_args);
let extra_args = format_extra_args(extra_args);
let buildx = if *use_buildx { " buildx" } else { "" };

View File

@@ -1,26 +1,55 @@
use std::{fmt::Write, path::PathBuf};
use std::{
fmt::Write,
path::{Path, PathBuf},
};
use anyhow::{Context, anyhow};
use command::run_komodo_command;
use formatting::format_serror;
use komodo_client::entities::{
RepoExecutionArgs, repo::Repo, stack::Stack,
FileContents, RepoExecutionArgs,
repo::Repo,
stack::{Stack, StackRemoteFileContents},
to_path_compatible_name,
update::Log,
};
use periphery_client::api::{
compose::ComposeUpResponse, git::PullOrCloneRepo,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::config::periphery_config;
use crate::{
api::Args, config::periphery_config, docker::docker_login,
};
pub mod up;
pub mod write;
use super::docker_compose;
pub fn docker_compose() -> &'static str {
if periphery_config().legacy_compose_cli {
"docker-compose"
} else {
"docker compose"
pub async fn maybe_login_registry(
stack: &Stack,
registry_token: Option<String>,
logs: &mut Vec<Log>,
) {
if !stack.config.registry_provider.is_empty()
&& !stack.config.registry_account.is_empty()
&& let Err(e) = docker_login(
&stack.config.registry_provider,
&stack.config.registry_account,
registry_token.as_deref(),
)
.await
.with_context(|| {
format!(
"Domain: '{}' | Account: '{}'",
stack.config.registry_provider, stack.config.registry_account
)
})
.context("Failed to login to image registry")
{
logs.push(Log::error(
"Login to Registry",
format_serror(&e.into()),
));
}
}
@@ -53,7 +82,7 @@ pub fn env_file_args(
Ok(res)
}
pub async fn down(
pub async fn compose_down(
project: &str,
services: &[String],
res: &mut ComposeUpResponse,
@@ -89,6 +118,7 @@ pub async fn pull_or_clone_stack(
stack: &Stack,
repo: Option<&Repo>,
git_token: Option<String>,
req_args: &Args,
) -> anyhow::Result<PathBuf> {
if stack.config.files_on_host {
return Err(anyhow!(
@@ -135,9 +165,78 @@ pub async fn pull_or_clone_stack(
skip_secret_interp: Default::default(),
replacers: Default::default(),
}
.resolve(&crate::api::Args)
.resolve(req_args)
.await
.map_err(|e| e.error)?;
Ok(root)
}
pub async fn validate_files(
stack: &Stack,
run_directory: &Path,
res: &mut ComposeUpResponse,
) {
let file_paths = stack
.all_file_dependencies()
.into_iter()
.map(|file| {
(
// This will remove any intermediate uneeded '/./' in the path
run_directory
.join(&file.path)
.components()
.collect::<PathBuf>(),
file,
)
})
.collect::<Vec<_>>();
// First validate no missing files
for (full_path, file) in &file_paths {
if !full_path.exists() {
res.missing_files.push(file.path.clone());
}
}
if !res.missing_files.is_empty() {
res.logs.push(Log::error(
"Validate Files",
format_serror(
&anyhow!(
"Missing files: {}", res.missing_files.join(", ")
)
.context("Ensure the run_directory and all file paths are correct.")
.context("A file doesn't exist after writing stack.")
.into(),
),
));
return;
}
for (full_path, file) in file_paths {
let file_contents =
match fs::read_to_string(&full_path).await.with_context(|| {
format!("Failed to read file contents at {full_path:?}")
}) {
Ok(res) => res,
Err(e) => {
let error = format_serror(&e.into());
res
.logs
.push(Log::error("Read Compose File", error.clone()));
// This should only happen for repo stacks, ie remote error
res.remote_errors.push(FileContents {
path: file.path,
contents: error,
});
return;
}
};
res.file_contents.push(StackRemoteFileContents {
path: file.path,
contents: file_contents,
services: file.services,
requires: file.requires,
});
}
}

Some files were not shown because too many files have changed in this diff Show More