Compare commits

...

139 Commits

Author SHA1 Message Date
mbecker20
c5401de1c5 tweak user level tab view 2025-10-28 11:42:29 -07:00
mbecker20
7a3d9e0ef6 tweak description 2025-10-28 00:32:39 -07:00
mbecker20
595e3ece42 deploy 2.0.0-dev-86 2025-10-27 21:05:13 -07:00
mbecker20
a3bc895755 fix terminal disconnect 2025-10-27 21:04:46 -07:00
mbecker20
3e3def03ec terminal init properly lexes init command 2025-10-27 21:01:15 -07:00
mbecker20
bc672d9649 deploy 2.0.0-dev-85 2025-10-27 20:01:18 -07:00
mbecker20
ea6dee4d51 clippy lint 2025-10-27 19:13:43 -07:00
mbecker20
b985f18c74 deploy 2.0.0-dev-84 2025-10-27 19:12:54 -07:00
mbecker20
45909b2f04 pid1 reaper doesn't work, init: true should be required in compose 2025-10-27 19:06:50 -07:00
mbecker20
2b5a54ce89 deploy 2.0.0-dev-83 2025-10-27 18:31:56 -07:00
mbecker20
a18f33b95e formalize the terminal message variants 2025-10-27 18:31:30 -07:00
mbecker20
f35b00ea95 bump clap dependency 2025-10-27 16:18:30 -07:00
mbecker20
70fab08520 clean up terminal modules 2025-10-27 16:17:20 -07:00
mbecker20
0331780a5f rename variables shell -> command 2025-10-27 11:08:57 -07:00
mbecker20
06cdfd2bbc Terminal -> Terminals tabs 2025-10-27 02:53:06 -07:00
mbecker20
1555202569 Create Terminal don't auto set request after changed 2025-10-27 02:42:06 -07:00
mbecker20
5139622aad deploy 2.0.0-dev-82 2025-10-27 02:28:48 -07:00
mbecker20
61ce2ee3db improve new terminal 2025-10-27 02:04:15 -07:00
mbecker20
3171c14f2b comment on spawn process reaper 2025-10-27 01:41:06 -07:00
mbecker20
521db748d8 deploy 2.0.0-dev-81 2025-10-27 01:27:42 -07:00
mbecker20
35bf224080 deploy 2.0.0-dev-80 2025-10-27 01:21:44 -07:00
mbecker20
e0b31cfe51 CreateTerminal only shows resources which are actually available to connect to 2025-10-27 00:44:56 -07:00
mbecker20
0a890078b0 deploy 2.0.0-dev-79 2025-10-27 00:38:08 -07:00
mbecker20
df97ced7a4 deploy 2.0.0-dev-78 2025-10-27 00:03:26 -07:00
mbecker20
d4e5e2e6d8 add execute_<>_terminal convenience methods 2025-10-26 23:35:17 -07:00
mbecker20
19aa60dcb5 deploy 2.0.0-dev-77 2025-10-26 23:21:15 -07:00
mbecker20
fc19c53e6f deploy 2.0.0-dev-76 2025-10-26 23:00:59 -07:00
mbecker20
4f0af960db Big Terminal refactor + most commands run directly / bypass 'sh -c "..."' 2025-10-26 23:00:35 -07:00
mbecker20
e2ec5258fb add "New" kb shortcut 2025-10-23 23:55:24 -07:00
mbecker20
49b6545a02 reorder cli command list 2025-10-23 23:53:10 -07:00
mbecker20
0aabaa9e62 deploy 2.0.0-dev-75 2025-10-23 12:23:10 -07:00
mbecker20
dc65986eab binaries still built with bullseye for compat, but final images use trixie 2025-10-23 12:22:50 -07:00
mbecker20
1d8f28437d km attach <CONTAINER> 2025-10-23 12:22:02 -07:00
mbecker20
c1502e89c2 deploy 2.0.0-dev-74 2025-10-23 11:51:40 -07:00
mbecker20
0bd15fc442 ResourceQuery.names supports names or ids 2025-10-23 11:23:37 -07:00
mbecker20
5a3621b02e km exec 2025-10-23 01:55:50 -07:00
mbecker20
38192e2dac deploy 2.0.0-dev-73 2025-10-23 00:56:15 -07:00
mbecker20
5d271d5547 use Ping timeout to handle reconnect if for some reason network cuts but ws doesn't receive Close 2025-10-23 00:55:51 -07:00
mbecker20
11fb67a35b ssh use cancel token so stdout.write_all isn't cancelled mid-write, which leads to undefined behavior 2025-10-23 00:14:17 -07:00
mbecker20
a80499dcc4 improve stack config files responsive 2025-10-22 19:02:30 -07:00
mbecker20
8c76b8487f alert responsive, better Server terminal disabled 2025-10-22 13:48:08 -07:00
mbecker20
2b32d9042a deploy 2.0.0-dev-72 2025-10-22 01:00:19 -07:00
mbecker20
dc48f1f2ca deploy 2.0.0-dev-71 2025-10-22 00:50:02 -07:00
mbecker20
8e7b7bdcf1 deploy 2.0.0-dev-70 2025-10-22 00:44:54 -07:00
mbecker20
f11d64f72e add 'init' param to make 'execute_terminal' in single call possible 2025-10-22 00:44:33 -07:00
mbecker20
2ffae85180 dashboard table section headers link to resources page 2025-10-22 00:03:12 -07:00
mbecker20
bd79d0f1e0 km ssh <SERVER> [COMMAND] -n [NAME] 2025-10-21 23:55:36 -07:00
mbecker20
e890b1f675 deploy 2.0.0-dev-69 2025-10-21 23:32:18 -07:00
mbecker20
3b7de25c30 Shift + X - Terminals, Shift + N - New (Resource, Terminal) 2025-10-21 16:11:27 -07:00
mbecker20
793bb99f31 nav to terminal on create 2025-10-21 16:00:50 -07:00
mbecker20
d465c9f273 deploy 2.0.0-dev-68 2025-10-21 15:51:38 -07:00
mbecker20
ce641a8974 terminal page 2025-10-21 15:51:18 -07:00
mbecker20
1b89ceb122 deploy 2.0.0-dev-67 2025-10-21 02:50:21 -07:00
mbecker20
2dbc011d26 remove unneeded log on client terminal disconnect 2025-10-21 02:33:19 -07:00
mbecker20
246da88ae1 deploy 2.0.0-dev-66 2025-10-21 02:29:12 -07:00
mbecker20
a8c16f64b1 km ssh 2025-10-21 02:28:42 -07:00
mbecker20
a5b711a348 stack tabs localstorage increment 2025-10-20 20:35:08 -07:00
mbecker20
9666e9ad83 Fix monitoring table with proper server version component 2025-10-20 03:01:07 -07:00
mbecker20
7479640c73 add hover information for mysterious server header icons 2025-10-20 02:53:18 -07:00
mbecker20
4823825035 give websocket indicator info on hover 2025-10-20 02:35:12 -07:00
mbecker20
23897a7acf clippy 2025-10-20 02:16:52 -07:00
mbecker20
20d5588b5c deploy 2.0.0-dev-65 2025-10-20 02:15:15 -07:00
mbecker20
f7e15ccde5 progress on terminals page 2025-10-20 02:14:51 -07:00
mbecker20
cf7623b1fc combine all resources / table view into dashboard 2025-10-20 01:40:27 -07:00
mbecker20
d3c464c05d start Terminals management page 2025-10-20 00:42:45 -07:00
mbecker20
5c9d416aa4 prog on docs update 2025-10-19 23:33:41 -07:00
mbecker20
aabcd88312 update connect-servers docs 2025-10-19 23:07:50 -07:00
mbecker20
9d2624c6bc clarify root directory in periphery config file 2025-10-19 23:07:19 -07:00
mbecker20
ee11fb0b6c clean up setup script 2025-10-19 23:07:02 -07:00
mbecker20
45adfbddd0 mounting custom CA 2025-10-19 23:06:48 -07:00
mbecker20
d26d035dc6 clean up docs intro 2025-10-19 22:03:17 -07:00
mbecker20
e673ba0adf deploy 2.0.0-dev-64 2025-10-19 21:48:15 -07:00
mbecker20
f876facfa7 improve git status message / failure propogation 2025-10-19 21:47:29 -07:00
mbecker20
3a47d57478 container class px-[1.2rem] 2025-10-19 20:31:40 -07:00
mbecker20
a707028277 responsive tweaks 2025-10-19 20:07:30 -07:00
mbecker20
0c6276c677 fix Resources / Containers mobile 2025-10-19 19:51:28 -07:00
mbecker20
fc9c6706f1 keep more descriptive settings header mobile 2025-10-19 13:24:44 -07:00
mbecker20
7674269ce9 fix user dropdown not showing username mobile 2025-10-19 13:11:34 -07:00
mbecker20
3b511c5adc improve server terminal mobile responsiveness 2025-10-19 13:00:30 -07:00
mbecker20
87221a10e9 fix mobile ContainerTerminal responsiveness 2025-10-19 12:56:11 -07:00
mbecker20
450cb6a148 fix stack config files mobile responsiveness 2025-10-19 12:46:51 -07:00
mbecker20
f252cefb21 responsive server docker tab 2025-10-19 12:37:26 -07:00
mbecker20
7855e9d688 run dkf 2025-10-19 12:30:59 -07:00
mbecker20
feb263c15f more type safe tabs 2025-10-19 12:27:55 -07:00
mbecker20
4f8d1c22cc rest of tabs also use mobile friendly 2025-10-19 12:11:11 -07:00
mbecker20
60bd47834e deploy 2.0.0-dev-63 2025-10-19 11:48:09 -07:00
mbecker20
4d632a6b61 improve resources mobile tabs responsiveness 2025-10-19 11:47:47 -07:00
mbecker20
381dd76723 deploy 2.0.0-dev-62 2025-10-19 01:37:10 -07:00
mbecker20
077e28a5fe fix ConfigList too wide on mobile 2025-10-19 01:36:50 -07:00
mbecker20
6b02aaed7d hide core pubkey copy if origin not https 2025-10-19 01:28:45 -07:00
mbecker20
e466944c05 improve mobile settings view 2025-10-19 01:24:41 -07:00
mbecker20
8ff94b7465 deploy 2.0.0-dev-61 2025-10-19 00:35:26 -07:00
mbecker20
b17df5ed7b show host public ip 2025-10-19 00:34:52 -07:00
mbecker20
207dc30206 cli is distroless, no shell / update-ca-certificates 2025-10-18 22:12:44 -07:00
mbecker20
c3eb386bdb fix copy entrypoint 2025-10-18 22:07:16 -07:00
mbecker20
4279e46892 deploy 2.0.0-dev-60 2025-10-18 12:59:19 -07:00
mbecker20
8d3d2fee12 use entrypoint scripts to make update-ca-certificates consistent when using custom CMD 2025-10-18 12:58:55 -07:00
mbecker20
1df36c4266 deploy 2.0.0-dev-59 2025-10-18 11:36:07 -07:00
mbecker20
36f7ad33c7 core and periphery images auto run update-ca-certificates on start, only need to mount in. 2025-10-18 11:35:45 -07:00
mbecker20
ec34b2c139 deploy 2.0.0-dev-58 2025-10-18 11:02:11 -07:00
mbecker20
d14c28d1f2 new otel instrumentation 2025-10-18 11:01:47 -07:00
mbecker20
68f7a0e9ce all info menu to top of settings 2025-10-18 00:45:59 -07:00
mbecker20
50f0376f0a Add Core title and public key to top of Settings 2025-10-18 00:01:41 -07:00
mbecker20
bbd53747ad fix km ps -h description 2025-10-17 17:17:18 -07:00
mbecker20
6a2adf1f83 tweak logs 2025-10-16 01:06:37 -07:00
mbecker20
128b15b94f deploy 2.0.0-dev-57 2025-10-16 00:59:46 -07:00
mbecker20
8d74b377b7 more otel refinements 2025-10-16 00:59:20 -07:00
mbecker20
d7e972e5c6 stack ui doesn't show project missing when deploying 2025-10-15 23:49:26 -07:00
mbecker20
e5cb4aac5a Fix: Webhook triggered checks linked repo branch for build, stack, sync 2025-10-15 18:06:43 -07:00
mbecker20
d0f62f8326 rework tracing events / improve opentelemetry output 2025-10-15 01:41:18 -07:00
mbecker20
47c4091a4b onboarding key uses recognizable key 2025-10-14 16:57:35 -07:00
mbecker20
973480e2b3 remove all the unnecessary instrument debug 2025-10-14 00:33:53 -07:00
mbecker20
b9e1cc87d2 remove instrument from validate_cancel_repo_build 2025-10-13 23:52:55 -07:00
mbecker20
05d20c8603 deploy 2.0.0-dev-56 2025-10-13 22:05:07 -07:00
mbecker20
fe2d68a001 fix config loading 2025-10-13 22:04:42 -07:00
mbecker20
26fd5b2a6d deploy 2.0.0-dev-55 2025-10-13 20:30:40 -07:00
mbecker20
76457bcb61 apply env / shell interpolation as *final* config loading stage, to include env vars. 2025-10-13 20:26:13 -07:00
mbecker20
ebd2c2238d bump deps 2025-10-13 19:51:05 -07:00
mbecker20
b7fc1bef7b refine default env 2025-10-13 13:53:12 -07:00
mbecker20
50b9f2e1bf deploy 2.0.0-dev-54 2025-10-13 13:06:23 -07:00
mbecker20
41ce86f6ab deploy 2.0.0-dev-53 2025-10-12 20:00:47 -07:00
mbecker20
7a21c01e52 tweak 2025-10-12 19:59:09 -07:00
mbecker20
e63e282510 small clean up 2025-10-12 19:56:15 -07:00
mbecker20
5456b36c18 deploy 2.0.0-dev-52 2025-10-12 13:55:39 -07:00
mbecker20
fcfb58a7e9 periphery with server disabled can initialize core public key file 2025-10-12 13:55:15 -07:00
mbecker20
2203004a74 move periphery in memory state to state.rs 2025-10-12 13:15:52 -07:00
mbecker20
996fb49823 periphery server_enabled /version route 2025-10-12 12:56:29 -07:00
mbecker20
35d22c77a2 Core add non authed /version route 2025-10-12 12:55:14 -07:00
mbecker20
44ab89600f Simpllify Option + Result into one encoding layer 2025-10-12 03:24:00 -07:00
mbecker20
0900e48cb8 ntfy / pushover url interpolation 2025-10-12 01:34:07 -07:00
mbecker20
c530a46a27 deploy 2.0.0-dev-51 2025-10-12 01:09:35 -07:00
mbecker20
f69c8db3ea pass through whether Periphery docker daemon connection is ok 2025-10-12 01:08:45 -07:00
mbecker20
48f2f651e1 periphery runs with logs if couldn't connect to docker daemon 2025-10-12 01:05:20 -07:00
mbecker20
bdb5b4185e rename some websocket fields 2025-10-12 00:28:55 -07:00
mbecker20
42a7b8c19b move connection queries to periphery_client 2025-10-12 00:08:59 -07:00
mbecker20
ded17e4840 more encoding refine 2025-10-12 00:05:16 -07:00
mbecker20
80fb1e6889 more on encoding 2025-10-11 14:11:07 -07:00
mbecker20
1dc861f538 fix periphery keys init when config.private_key is not explicitly defined. 2025-10-11 12:00:29 -07:00
mbecker20
3da63395fd fix EncodedOption docs 2025-10-10 00:32:05 -07:00
309 changed files with 13407 additions and 8379 deletions

604
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
version = "2.0.0-dev-50"
version = "2.0.0-dev-86"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -39,9 +39,8 @@ noise = { path = "lib/noise" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.5.3", default-features = false }
slack = { version = "1.1.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
async_timing_util = "1.1.0"
@@ -54,12 +53,11 @@ mungos = "3.2.2"
svi = "1.2.0"
# ASYNC
reqwest = { version = "0.12.23", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.47.1", features = ["full"] }
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.48.0", features = ["full"] }
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
tokio-stream = { version = "0.1.17", features = ["sync"] }
pin-project-lite = "0.2.16"
futures = "0.3.31"
futures-util = "0.3.31"
arc-swap = "1.7.1"
@@ -72,7 +70,7 @@ axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
# SER/DE
ipnetwork = { version = "0.21.1", features = ["serde"] }
indexmap = { version = "2.11.4", features = ["serde"] }
indexmap = { version = "2.12.0", features = ["serde"] }
serde = { version = "1.0.227", features = ["derive"] }
strum = { version = "0.27.2", features = ["derive"] }
bson = { version = "2.15.0" } # must keep in sync with mongodb version
@@ -96,14 +94,14 @@ opentelemetry = "0.31.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.48", features = ["derive"] }
clap = { version = "4.5.50", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "10.0.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.32", features = ["aws-lc-rs"] }
jsonwebtoken = { version = "10.1.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.34", features = ["aws-lc-rs"] }
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
openidconnect = "4.0.1"
urlencoding = "2.1.3"
@@ -120,13 +118,17 @@ spki = "0.7.3"
der = "0.7.10"
# SYSTEM
hickory-resolver = "0.25.2"
portable-pty = "0.9.0"
shell-escape = "0.1.5"
crossterm = "0.29.0"
bollard = "0.19.3"
sysinfo = "0.37.1"
shlex = "1.3.0"
# CLOUD
aws-config = "1.8.8"
aws-sdk-ec2 = "1.172.0"
aws-sdk-ec2 = "1.176.0"
aws-credential-types = "1.2.8"
## CRON
@@ -138,11 +140,10 @@ croner = "3.0.0"
# MISC
async-compression = { version = "0.4.32", features = ["tokio", "gzip"] }
derive_builder = "0.20.2"
shell-escape = "0.1.5"
comfy-table = "7.2.1"
typeshare = "1.0.4"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.0.0"
regex = "1.11.3"
bytes = "1.10.1"
bytes = "1.10.1"
regex = "1.12.2"

4
action/deploy-fe.ts Normal file
View File

@@ -0,0 +1,4 @@
const cmd = "km run -y action deploy-komodo-fe-change";
new Deno.Command("bash", {
args: ["-c", cmd],
}).spawn();

View File

@@ -23,7 +23,9 @@ noise.workspace = true
# external
futures-util.workspace = true
comfy-table.workspace = true
tokio-util.workspace = true
serde_json.workspace = true
crossterm.workspace = true
serde_qs.workspace = true
wildcard.workspace = true
tracing.workspace = true

View File

@@ -61,7 +61,8 @@ async fn list_containers(
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default()
servers: Default::default(),
containers: Default::default(),
}),
)?;
@@ -145,7 +146,8 @@ pub async fn inspect_container(
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default()
servers: Default::default(),
containers: Default::default()
}),
)?;

View File

@@ -7,7 +7,7 @@ use komodo_client::{
api::read::{
ListActions, ListAlerters, ListBuilders, ListBuilds,
ListDeployments, ListProcedures, ListRepos, ListResourceSyncs,
ListSchedules, ListServers, ListStacks, ListTags,
ListSchedules, ListServers, ListStacks, ListTags, ListTerminals,
},
entities::{
ResourceTargetVariant,
@@ -35,6 +35,7 @@ use komodo_client::{
ResourceSyncListItem, ResourceSyncListItemInfo,
ResourceSyncState,
},
terminal::Terminal,
},
};
use serde::Serialize;
@@ -74,15 +75,18 @@ pub async fn handle(list: &args::list::List) -> anyhow::Result<()> {
Some(ListCommand::Syncs(filters)) => {
list_resources::<ResourceSyncListItem>(filters, false).await
}
Some(ListCommand::Terminals(filters)) => {
list_terminals(filters).await
}
Some(ListCommand::Schedules(filters)) => {
list_schedules(filters).await
}
Some(ListCommand::Builders(filters)) => {
list_resources::<BuilderListItem>(filters, false).await
}
Some(ListCommand::Alerters(filters)) => {
list_resources::<AlerterListItem>(filters, false).await
}
Some(ListCommand::Schedules(filters)) => {
list_schedules(filters).await
}
}
}
@@ -189,6 +193,26 @@ where
Ok(())
}
async fn list_terminals(
filters: &ResourceFilters,
) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
// let query = ResourceQuery::builder()
// .tags(filters.tags.clone())
// .templates(TemplatesQueryBehavior::Exclude)
// .build();
let terminals = client
.read(ListTerminals {
target: None,
use_names: true,
})
.await?;
if !terminals.is_empty() {
print_items(terminals, filters.format, filters.links)?;
}
Ok(())
}
async fn list_schedules(
filters: &ResourceFilters,
) -> anyhow::Result<()> {
@@ -1134,6 +1158,28 @@ impl PrintTable for ResourceListItem<AlerterListItemInfo> {
}
}
impl PrintTable for Terminal {
fn header(_links: bool) -> &'static [&'static str] {
&["Terminal", "Target", "Command", "Size", "Created"]
}
fn row(self, _links: bool) -> Vec<comfy_table::Cell> {
vec![
Cell::new(self.name).add_attribute(Attribute::Bold),
Cell::new(format!("{:?}", self.target)),
Cell::new(self.command),
Cell::new(if self.stored_size_kb < 1.0 {
format!("{:.1} KiB", self.stored_size_kb)
} else {
format!("{:.} KiB", self.stored_size_kb)
}),
Cell::new(
format_timetamp(self.created_at)
.unwrap_or_else(|_| String::from("Invalid created at")),
),
]
}
}
impl PrintTable for Schedule {
fn header(links: bool) -> &'static [&'static str] {
if links {
@@ -1146,7 +1192,7 @@ impl PrintTable for Schedule {
let next_run = if let Some(ts) = self.next_scheduled_run {
Cell::new(
format_timetamp(ts)
.unwrap_or(String::from("Invalid next ts")),
.unwrap_or_else(|_| String::from("Invalid next ts")),
)
.add_attribute(Attribute::Bold)
} else {

View File

@@ -18,6 +18,7 @@ pub mod container;
pub mod database;
pub mod execute;
pub mod list;
pub mod terminal;
pub mod update;
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {

View File

@@ -0,0 +1,334 @@
use anyhow::{Context, anyhow};
use colored::Colorize;
use komodo_client::{
api::{
read::{ListAllDockerContainers, ListServers},
terminal::InitTerminal,
},
entities::{
config::cli::args::terminal::{Attach, Connect, Exec},
server::ServerQuery,
terminal::{
ContainerTerminalMode, TerminalRecreateMode,
TerminalResizeMessage, TerminalStdinMessage,
},
},
ws::terminal::TerminalWebsocket,
};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio_util::sync::CancellationToken;
pub async fn handle_connect(
Connect {
server,
name,
command,
recreate,
}: &Connect,
) -> anyhow::Result<()> {
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_server_terminal(
server.to_string(),
Some(name.to_string()),
Some(InitTerminal {
command: command.clone(),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: None,
}),
)
.await
})
.await
}
pub async fn handle_exec(
Exec {
server,
container,
shell,
recreate,
}: &Exec,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: Some(shell.to_string()),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Exec),
}),
)
.await
})
.await
}
pub async fn handle_attach(
Attach {
server,
container,
recreate,
}: &Attach,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: None,
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Attach),
}),
)
.await
})
.await
}
async fn get_server(
server: Option<String>,
container: &str,
) -> anyhow::Result<String> {
if let Some(server) = server {
return Ok(server);
}
let client = super::komodo_client().await?;
let mut containers = client
.read(ListAllDockerContainers {
servers: Default::default(),
containers: vec![container.to_string()],
})
.await?;
if containers.is_empty() {
return Err(anyhow!(
"Did not find any container matching {container}"
));
}
if containers.len() == 1 {
return containers
.pop()
.context("Shouldn't happen")?
.server_id
.context("Container doesn't have server_id");
}
let servers = containers
.into_iter()
.flat_map(|container| container.server_id)
.collect::<Vec<_>>();
let servers = client
.read(ListServers {
query: ServerQuery::builder().names(servers).build(),
})
.await?
.into_iter()
.map(|server| format!("\t- {}", server.name.bold()))
.collect::<Vec<_>>()
.join("\n");
Err(anyhow!(
"Multiple containers matching '{}' on Servers:\n{servers}",
container.bold(),
))
}
async fn handle_terminal_forwarding<
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
>(
connect: C,
) -> anyhow::Result<()> {
// Need to forward multiple sources into ws write
let (write_tx, mut write_rx) =
tokio::sync::mpsc::channel::<TerminalStdinMessage>(1024);
// ================
// SETUP RESIZING
// ================
// Subscribe to SIGWINCH for resize messages
let mut sigwinch = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::window_change(),
)
.context("failed to register SIGWINCH handler")?;
// Send first resize messsage, bailing if it fails to get the size.
write_tx.send(resize_message()?).await?;
let cancel = CancellationToken::new();
let forward_resize = async {
while future_or_cancel(sigwinch.recv(), &cancel)
.await
.flatten()
.is_some()
{
if let Ok(resize_message) = resize_message()
&& write_tx.send(resize_message).await.is_err()
{
break;
}
}
cancel.cancel();
};
let forward_stdin = async {
let mut stdin = tokio::io::stdin();
let mut buf = [0u8; 8192];
while let Some(Ok(n)) =
future_or_cancel(stdin.read(&mut buf), &cancel).await
{
// EOF
if n == 0 {
break;
}
let bytes = &buf[..n];
// Check for disconnect sequence (alt + q)
if bytes == [197, 147] {
break;
}
// Forward bytes
if write_tx
.send(TerminalStdinMessage::Forward(bytes.to_vec()))
.await
.is_err()
{
break;
};
}
cancel.cancel();
};
// =====================
// CONNECT AND FORWARD
// =====================
let (mut ws_write, mut ws_read) = connect.await?.split();
let forward_write = async {
while let Some(message) =
future_or_cancel(write_rx.recv(), &cancel).await.flatten()
{
if let Err(e) = ws_write.send_stdin_message(message).await {
cancel.cancel();
return Some(e);
};
}
cancel.cancel();
None
};
let forward_read = async {
let mut stdout = tokio::io::stdout();
while let Some(msg) =
future_or_cancel(ws_read.receive_stdout(), &cancel).await
{
let bytes = match msg {
Ok(Some(bytes)) => bytes,
Ok(None) => break,
Err(e) => {
cancel.cancel();
return Some(e.context("Websocket read error"));
}
};
if let Err(e) = stdout
.write_all(&bytes)
.await
.context("Failed to write text to stdout")
{
cancel.cancel();
return Some(e);
}
let _ = stdout.flush().await;
}
cancel.cancel();
None
};
let guard = RawModeGuard::enable_raw_mode()?;
let (_, _, write_error, read_error) = tokio::join!(
forward_resize,
forward_stdin,
forward_write,
forward_read
);
drop(guard);
if let Some(e) = write_error {
eprintln!("\nFailed to forward stdin | {e:#}");
}
if let Some(e) = read_error {
eprintln!("\nFailed to forward stdout | {e:#}");
}
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
// It doesn't seem to exit by itself after the raw mode stuff.
std::process::exit(0)
}
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
let (cols, rows) = crossterm::terminal::size()
.context("Failed to get terminal size")?;
Ok(TerminalStdinMessage::Resize(TerminalResizeMessage {
rows,
cols,
}))
}
struct RawModeGuard;
impl RawModeGuard {
fn enable_raw_mode() -> anyhow::Result<Self> {
crossterm::terminal::enable_raw_mode()
.context("Failed to enable terminal raw mode")?;
Ok(Self)
}
}
impl Drop for RawModeGuard {
fn drop(&mut self) {
if let Err(e) = crossterm::terminal::disable_raw_mode() {
eprintln!("Failed to disable terminal raw mode | {e:?}");
}
}
}
async fn future_or_cancel<T, F: Future<Output = T>>(
fut: F,
cancel: &CancellationToken,
) -> Option<T> {
tokio::select! {
res = fut => Some(res),
_ = cancel.cancelled() => None
}
}

View File

@@ -28,7 +28,7 @@ pub fn cli_env() -> &'static Env {
{
Ok(env) => env,
Err(e) => {
panic!("{e:?}");
panic!("{e:?}")
}
}
})
@@ -261,12 +261,18 @@ pub fn cli_config() -> &'static CliConfig {
.komodo_cli_logging_pretty
.unwrap_or(config.cli_logging.pretty),
location: false,
ansi: env
.komodo_cli_logging_ansi
.unwrap_or(config.cli_logging.ansi),
otlp_endpoint: env
.komodo_cli_logging_otlp_endpoint
.unwrap_or(config.cli_logging.otlp_endpoint),
opentelemetry_service_name: env
.komodo_cli_logging_opentelemetry_service_name
.unwrap_or(config.cli_logging.opentelemetry_service_name),
opentelemetry_scope_name: env
.komodo_cli_logging_opentelemetry_scope_name
.unwrap_or(config.cli_logging.opentelemetry_scope_name),
},
profile: config.profile,
}

View File

@@ -2,6 +2,7 @@
extern crate tracing;
use anyhow::Context;
use colored::Colorize;
use komodo_client::entities::config::cli::args;
use crate::config::cli_config;
@@ -41,12 +42,6 @@ async fn app() -> anyhow::Result<()> {
}
Ok(())
}
args::Command::Key { command } => {
noise::key::command::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await
}
args::Command::Container(container) => {
command::container::handle(container).await
}
@@ -60,6 +55,21 @@ async fn app() -> anyhow::Result<()> {
args::Command::Update { command } => {
command::update::handle(command).await
}
args::Command::Connect(connect) => {
command::terminal::handle_connect(connect).await
}
args::Command::Exec(exec) => {
command::terminal::handle_exec(exec).await
}
args::Command::Attach(attach) => {
command::terminal::handle_attach(attach).await
}
args::Command::Key { command } => {
noise::key::command::handle(command).await
}
args::Command::Database { command } => {
command::database::handle(command).await
}
}
}
@@ -69,7 +79,18 @@ async fn main() -> anyhow::Result<()> {
tokio::signal::unix::SignalKind::terminate(),
)?;
tokio::select! {
res = tokio::spawn(app()) => res?,
_ = term_signal.recv() => Ok(()),
res = tokio::spawn(app()) => match res {
Ok(Err(e)) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
}
Err(e) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
},
Ok(_) => {}
},
_ = term_signal.recv() => {},
}
Ok(())
}

View File

@@ -45,6 +45,7 @@ aws-credential-types.workspace = true
english-to-cron.workspace = true
openidconnect.workspace = true
jsonwebtoken.workspace = true
futures-util.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
@@ -54,6 +55,7 @@ axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
serde_yaml_ng.workspace = true
serde_qs.workspace = true
typeshare.workspace = true
chrono-tz.workspace = true
indexmap.workspace = true
@@ -63,7 +65,6 @@ colored.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
futures.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
croner.workspace = true
@@ -74,12 +75,12 @@ rustls.workspace = true
bytes.workspace = true
tokio.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true
uuid.workspace = true
envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
hex.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.90.0-bullseye AS core-builder
FROM rust:1.90.0-trixie AS core-builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -26,7 +26,7 @@ RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
@@ -48,6 +48,9 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
@@ -55,7 +58,7 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
CMD [ "core" ]
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
# Label to prevent Komodo from stopping with StopAllContainers
LABEL komodo.skip="true"

View File

@@ -13,7 +13,7 @@ FROM ${AARCH64_BINARIES} AS aarch64
FROM ${FRONTEND_IMAGE} AS frontend
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
@@ -28,7 +28,7 @@ COPY --from=x86_64 /core /app/core/linux/amd64
COPY --from=aarch64 /core /app/core/linux/arm64
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
# Same for util
# Same for km
COPY --from=x86_64 /km /app/km/linux/amd64
COPY --from=aarch64 /km /app/km/linux/arm64
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
@@ -44,6 +44,9 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
@@ -51,6 +54,7 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
ENTRYPOINT [ "entrypoint.sh" ]
CMD [ "core" ]
# Label to prevent Komodo from stopping with StopAllContainers

View File

@@ -14,7 +14,7 @@ COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .
@@ -33,6 +33,9 @@ RUN mkdir /action-cache && \
cd /action-cache && \
deno install jsr:@std/yaml jsr:@std/toml
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
RUN chmod +x /usr/local/bin/entrypoint.sh
# Hint at the port
EXPOSE 9120
@@ -40,6 +43,7 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
ENTRYPOINT [ "entrypoint.sh" ]
CMD [ "core" ]
# Label to prevent Komodo from stopping with StopAllContainers

View File

@@ -4,7 +4,6 @@ use serde::Serialize;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
@@ -50,7 +49,7 @@ pub async fn send_alert(
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | **{name}**{region} is now **reachable**\n{link}"
"{level} | **{name}**{region} is now **connected**\n{link}"
)
}
SeverityLevel::Critical => {
@@ -241,31 +240,33 @@ pub async fn send_alert(
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, &content)
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})?;
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, &content)
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(

View File

@@ -1,7 +1,7 @@
use anyhow::{Context, anyhow};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use futures_util::future::join_all;
use interpolate::Interpolator;
use komodo_client::entities::{
ResourceTargetVariant,
@@ -11,7 +11,6 @@ use komodo_client::entities::{
komodo_timestamp,
stack::StackState,
};
use tracing::Instrument;
use crate::helpers::query::get_variables_and_secrets;
use crate::helpers::{
@@ -24,40 +23,32 @@ mod ntfy;
mod pushover;
mod slack;
#[instrument(level = "debug")]
pub async fn send_alerts(alerts: &[Alert]) {
if alerts.is_empty() {
return;
}
let span =
info_span!("send_alerts", alerts = format!("{alerts:?}"));
async {
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
let Ok(alerters) = find_collect(
&db_client().alerters,
doc! { "config.enabled": true },
None,
)
.await
.inspect_err(|e| {
error!(
"ERROR sending alerts | failed to get alerters from db | {e:#}"
)
}) else {
return;
};
}) else {
return;
};
let handles = alerts
.iter()
.map(|alert| send_alert_to_alerters(&alerters, alert));
let handles = alerts
.iter()
.map(|alert| send_alert_to_alerters(&alerters, alert));
join_all(handles).await;
}
.instrument(span)
.await
join_all(handles).await;
}
#[instrument(level = "debug")]
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
if alerters.is_empty() {
return;
@@ -161,7 +152,6 @@ pub async fn send_alert_to_alerter(
}
}
#[instrument(level = "debug")]
async fn send_custom_alert(
url: &str,
alert: &Alert,
@@ -294,7 +284,7 @@ fn standard_alert_content(alert: &Alert) -> String {
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!("{level} | {name}{region} is now reachable\n{link}")
format!("{level} | {name}{region} is now connected\n{link}")
}
SeverityLevel::Critical => {
let err = err

View File

@@ -2,17 +2,38 @@ use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
email: Option<&str>,
alert: &Alert,
) -> anyhow::Result<()> {
let content = standard_alert_content(alert);
if !content.is_empty() {
send_message(url, email, content).await?;
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, email, content)
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(
@@ -22,7 +43,7 @@ async fn send_message(
) -> anyhow::Result<()> {
let mut request = http_client()
.post(url)
.header("Title", "ntfy Alert")
.header("Title", "Komodo Alert")
.body(content);
if let Some(email) = email {
@@ -43,9 +64,7 @@ async fn send_message(
)
})?;
Err(anyhow!(
"Failed to send message to ntfy | {} | {}",
status,
text
"Failed to send message to ntfy | {status} | {text}",
))
}
}

View File

@@ -2,16 +2,35 @@ use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let content = standard_alert_content(alert);
if !content.is_empty() {
send_message(url, content).await?;
if content.is_empty() {
return Ok(());
}
Ok(())
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
send_message(&url_interpolated, content).await.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})
}
async fn send_message(

View File

@@ -2,7 +2,6 @@ use ::slack::types::OwnedBlock as Block;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
@@ -64,11 +63,11 @@ pub async fn send_alert(
match alert.level {
SeverityLevel::Ok => {
let text =
format!("{level} | *{name}*{region} is now *reachable*");
format!("{level} | *{name}*{region} is now *connected*");
let blocks = vec![
Block::header(level),
Block::section(format!(
"*{name}*{region} is now *reachable*"
"*{name}*{region} is now *connnected*"
)),
];
(text, blocks.into())
@@ -468,31 +467,32 @@ pub async fn send_alert(
}
AlertData::None {} => Default::default(),
};
if !text.is_empty() {
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
let slack = ::slack::Client::new(url_interpolated);
slack
.send_owned_message_single(&text, blocks.as_deref())
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})?;
if text.is_empty() {
return Ok(());
}
let VariablesAndSecrets { variables, secrets } =
get_variables_and_secrets().await?;
let mut url_interpolated = url.to_string();
let mut interpolator =
Interpolator::new(Some(&variables), &secrets);
interpolator.interpolate_string(&mut url_interpolated)?;
let slack = ::slack::Client::new(url_interpolated);
slack
.send_owned_message_single(&text, None, blocks.as_deref())
.await
.map_err(|e| {
let replacers = interpolator
.secret_replacers
.into_iter()
.collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {sanitized_error}"
))
})?;
Ok(())
}

View File

@@ -88,7 +88,6 @@ async fn variant_handler(
handler(headers, Json(req)).await
}
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
async fn handler(
headers: HeaderMap,
Json(request): Json<AuthRequest>,
@@ -125,7 +124,6 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
}
impl Resolve<AuthArgs> for GetLoginOptions {
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
async fn resolve(
self,
_: &AuthArgs,
@@ -135,7 +133,6 @@ impl Resolve<AuthArgs> for GetLoginOptions {
}
impl Resolve<AuthArgs> for ExchangeForJwt {
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
async fn resolve(
self,
_: &AuthArgs,
@@ -148,7 +145,6 @@ impl Resolve<AuthArgs> for ExchangeForJwt {
}
impl Resolve<AuthArgs> for GetUser {
#[instrument(name = "GetUser", level = "debug", skip(self))]
async fn resolve(
self,
AuthArgs { headers }: &AuthArgs,

View File

@@ -1,12 +1,11 @@
use std::{
collections::HashSet,
path::{Path, PathBuf},
str::FromStr,
sync::OnceLock,
};
use anyhow::Context;
use command::run_komodo_command;
use command::run_komodo_standard_command;
use config::merge_objects;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
@@ -24,6 +23,7 @@ use komodo_client::{
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
random_string,
update::Update,
user::action_user,
},
@@ -38,7 +38,6 @@ use crate::{
config::core_config,
helpers::{
query::{VariablesAndSecrets, get_variables_and_secrets},
random_string,
update::update_update,
},
permission::get_check_permissions,
@@ -59,10 +58,18 @@ impl super::BatchExecute for BatchRunAction {
}
impl Resolve<ExecuteArgs> for BatchRunAction {
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
#[instrument(
"BatchRunAction",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunAction>(&self.pattern, user)
@@ -72,10 +79,19 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
}
impl Resolve<ExecuteArgs> for RunAction {
#[instrument(name = "RunAction", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunAction",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
action = self.action,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut action = get_check_permissions::<Action>(
&self.action,
@@ -162,7 +178,7 @@ impl Resolve<ExecuteArgs> for RunAction {
""
};
let mut res = run_komodo_command(
let mut res = run_komodo_standard_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
@@ -213,7 +229,6 @@ impl Resolve<ExecuteArgs> for RunAction {
update_update(update.clone()).await?;
if !update.success && action.config.failure_alert {
warn!("action unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -236,6 +251,7 @@ impl Resolve<ExecuteArgs> for RunAction {
}
}
#[instrument("Interpolate", skip(contents, update, secret))]
async fn interpolate(
contents: &mut String,
update: &mut Update,
@@ -321,6 +337,7 @@ main()
/// Cleans up file at given path.
/// ALSO if $DENO_DIR is set,
/// will clean up the generated file matching "file"
#[instrument("CleanupRun")]
async fn cleanup_run(file: String, path: &Path) {
if let Err(e) = fs::remove_file(path).await {
warn!(
@@ -340,7 +357,7 @@ fn deno_dir() -> Option<&'static Path> {
DENO_DIR
.get_or_init(|| {
let deno_dir = std::env::var("DENO_DIR").ok()?;
PathBuf::from_str(&deno_dir).ok()
Some(PathBuf::from(&deno_dir))
})
.as_deref()
}

View File

@@ -1,6 +1,8 @@
use anyhow::{Context, anyhow};
use formatting::format_serror;
use futures::{TryStreamExt, stream::FuturesUnordered};
use futures_util::{
StreamExt, TryStreamExt, stream::FuturesUnordered,
};
use komodo_client::{
api::execute::{SendAlert, TestAlerter},
entities::{
@@ -22,10 +24,19 @@ use crate::{
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for TestAlerter {
#[instrument(name = "TestAlerter", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"TestAlerter",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
alerter = self.alerter,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerter = get_check_permissions::<Alerter>(
&self.alerter,
@@ -79,15 +90,24 @@ impl Resolve<ExecuteArgs> for TestAlerter {
//
impl Resolve<ExecuteArgs> for SendAlert {
#[instrument(name = "SendAlert", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"SendAlert",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
request = format!("{self:?}"),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerters = list_full_for_user::<Alerter>(
Default::default(),
user,
PermissionLevel::Execute.into(),
PermissionLevel::Read.into(),
&[],
)
.await?
@@ -102,6 +122,28 @@ impl Resolve<ExecuteArgs> for SendAlert {
})
.collect::<Vec<_>>();
let alerters = if user.admin {
alerters
} else {
// Only keep alerters with execute permissions
alerters
.into_iter()
.map(|alerter| async move {
get_check_permissions::<Alerter>(
&alerter.id,
user,
PermissionLevel::Execute.into(),
)
.await
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flatten()
.collect()
};
if alerters.is_empty() {
return Err(anyhow!(
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"

View File

@@ -14,7 +14,7 @@ use database::mungos::{
},
};
use formatting::format_serror;
use futures::future::join_all;
use futures_util::future::join_all;
use interpolate::Interpolator;
use komodo_client::{
api::execute::{
@@ -37,12 +37,13 @@ use komodo_client::{
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
use uuid::Uuid;
use crate::{
alert::send_alerts,
helpers::{
build_git_token,
builder::{cleanup_builder_instance, get_builder_periphery},
builder::{cleanup_builder_instance, connect_builder_periphery},
channel::build_cancel_channel,
query::{
VariablesAndSecrets, get_deployment_state,
@@ -66,10 +67,18 @@ impl super::BatchExecute for BatchRunBuild {
}
impl Resolve<ExecuteArgs> for BatchRunBuild {
#[instrument(name = "BatchRunBuild", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchRunBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
@@ -79,10 +88,19 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
}
impl Resolve<ExecuteArgs> for RunBuild {
#[instrument(name = "RunBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut build = get_check_permissions::<Build>(
&self.build,
@@ -186,7 +204,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
let (periphery, cleanup_data) = match connect_builder_periphery(
build.name.clone(),
Some(build.config.version),
builder,
@@ -247,11 +265,11 @@ impl Resolve<ExecuteArgs> for RunBuild {
replacers: Default::default(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
debug!("Build cancelled during clone, cleaning up builder");
update.push_error_log("Build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
info!("Builder cleaned up");
return handle_early_return(update, build.id, build.name, true).await
},
};
@@ -296,8 +314,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
additional_tags: Default::default(),
}) => res.context("failed at call to periphery to build"),
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
info!("Build cancelled during build, cleaning up builder");
update.push_error_log("Build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name, true).await
@@ -312,7 +330,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
Err(e) => {
warn!("error in build | {e:#}");
update.push_error_log(
"build",
"Build Error",
format_serror(&e.context("failed to build").into()),
)
}
@@ -370,7 +388,6 @@ impl Resolve<ExecuteArgs> for RunBuild {
handle_post_build_redeploy(&build.id).await;
});
} else {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
@@ -395,7 +412,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
}
}
#[instrument(skip(update))]
#[instrument("HandleEarlyReturn", skip(update))]
async fn handle_early_return(
mut update: Update,
build_id: String,
@@ -419,7 +436,6 @@ async fn handle_early_return(
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("build unsuccessful, alerting...");
let target = update.target.clone();
let version = update.version;
tokio::spawn(async move {
@@ -489,10 +505,19 @@ pub async fn validate_cancel_build(
}
impl Resolve<ExecuteArgs> for CancelBuild {
#[instrument(name = "CancelBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CancelBuild",
skip(user, update),
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
build = self.build,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
@@ -549,7 +574,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
}
}
#[instrument]
#[instrument("PostBuildRedeploy")]
async fn handle_post_build_redeploy(build_id: &str) {
let Ok(redeploy_deployments) = find_collect(
&db_client().deployments,
@@ -585,7 +610,11 @@ async fn handle_post_build_redeploy(build_id: &str) {
stop_signal: None,
stop_time: None,
}
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
}
.await;
@@ -611,6 +640,7 @@ async fn handle_post_build_redeploy(build_id: &str) {
/// This will make sure that a build with non-none image registry has an account attached,
/// and will check the core config for a token matching requirements.
/// Otherwise it is left to periphery.
#[instrument("ValidateRegistryTokens")]
async fn validate_account_extract_registry_tokens(
Build {
config: BuildConfig { image_registry, .. },

View File

@@ -49,10 +49,18 @@ impl super::BatchExecute for BatchDeploy {
}
impl Resolve<ExecuteArgs> for BatchDeploy {
#[instrument(name = "BatchDeploy", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDeploy",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeploy>(&self.pattern, user)
@@ -61,6 +69,7 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
}
}
#[instrument("SetupDeploy", skip_all)]
async fn setup_deployment_execution(
deployment: &str,
user: &User,
@@ -87,10 +96,21 @@ async fn setup_deployment_execution(
}
impl Resolve<ExecuteArgs> for Deploy {
#[instrument(name = "Deploy", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"Deploy",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
stop_signal = format!("{:?}", self.stop_signal),
stop_time = self.stop_time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -243,6 +263,14 @@ fn pull_cache() -> &'static PullCache {
PULL_CACHE.get_or_init(Default::default)
}
#[instrument(
"PullDeploymentInner",
skip_all,
fields(
deployment = deployment.id,
server = server.id
)
)]
pub async fn pull_deployment_inner(
deployment: Deployment,
server: &Server,
@@ -358,10 +386,19 @@ pub async fn pull_deployment_inner(
}
impl Resolve<ExecuteArgs> for PullDeployment {
#[instrument(name = "PullDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PullDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -392,10 +429,19 @@ impl Resolve<ExecuteArgs> for PullDeployment {
}
impl Resolve<ExecuteArgs> for StartDeployment {
#[instrument(name = "StartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -440,10 +486,19 @@ impl Resolve<ExecuteArgs> for StartDeployment {
}
impl Resolve<ExecuteArgs> for RestartDeployment {
#[instrument(name = "RestartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -490,10 +545,19 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
}
impl Resolve<ExecuteArgs> for PauseDeployment {
#[instrument(name = "PauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -538,10 +602,19 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
}
impl Resolve<ExecuteArgs> for UnpauseDeployment {
#[instrument(name = "UnpauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -588,10 +661,21 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
}
impl Resolve<ExecuteArgs> for StopDeployment {
#[instrument(name = "StopDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
@@ -655,10 +739,18 @@ impl super::BatchExecute for BatchDestroyDeployment {
}
impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
#[instrument(name = "BatchDestroyDeployment", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDestroyDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDestroyDeployment>(
@@ -671,10 +763,21 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
}
impl Resolve<ExecuteArgs> for DestroyDeployment {
#[instrument(name = "DestroyDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DestroyDeployment",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
deployment = self.deployment,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;

View File

@@ -1,13 +1,13 @@
use std::{fmt::Write as _, sync::OnceLock};
use anyhow::{Context, anyhow};
use command::run_komodo_command;
use command::run_komodo_standard_command;
use database::{
bson::{Document, doc},
mungos::find::find_collect,
};
use formatting::{bold, format_serror};
use futures::{StreamExt, stream::FuturesOrdered};
use futures_util::{StreamExt, stream::FuturesOrdered};
use komodo_client::{
api::execute::{
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
@@ -45,13 +45,17 @@ fn clear_repo_cache_lock() -> &'static Mutex<()> {
impl Resolve<ExecuteArgs> for ClearRepoCache {
#[instrument(
name = "ClearRepoCache",
skip(user, update),
fields(user_id = user.id, update_id = update.id)
"ClearRepoCache",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -120,13 +124,17 @@ fn backup_database_lock() -> &'static Mutex<()> {
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
#[instrument(
name = "BackupCoreDatabase",
skip(user, update),
fields(user_id = user.id, update_id = update.id)
"BackupCoreDatabase",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -143,7 +151,7 @@ impl Resolve<ExecuteArgs> for BackupCoreDatabase {
update_update(update.clone()).await?;
let res = run_komodo_command(
let res = run_komodo_standard_command(
"Backup Core Database",
None,
"km database backup --yes",
@@ -169,13 +177,17 @@ fn global_update_lock() -> &'static Mutex<()> {
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
#[instrument(
name = "GlobalAutoUpdate",
skip(user, update),
fields(user_id = user.id, update_id = update.id)
"GlobalAutoUpdate",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -335,13 +347,17 @@ fn global_rotate_lock() -> &'static Mutex<()> {
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
#[instrument(
name = "RotateAllServerKeys",
skip(user, update),
fields(user_id = user.id, update_id = update.id)
"RotateAllServerKeys",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(
@@ -445,13 +461,18 @@ impl Resolve<ExecuteArgs> for RotateAllServerKeys {
impl Resolve<ExecuteArgs> for RotateCoreKeys {
#[instrument(
name = "RotateCoreKeys",
skip(user, update),
fields(user_id = user.id, update_id = update.id)
"RotateCoreKeys",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
force = self.force,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
if !user.admin {
return Err(

View File

@@ -1,4 +1,4 @@
use std::{pin::Pin, time::Instant};
use std::pin::Pin;
use anyhow::Context;
use axum::{
@@ -8,7 +8,7 @@ use axum_extra::{TypedHeader, headers::ContentType};
use database::mungos::by_id::find_one_by_id;
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use futures_util::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
@@ -23,6 +23,7 @@ use response::JsonString;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use strum::Display;
use typeshare::typeshare;
use uuid::Uuid;
@@ -51,6 +52,9 @@ pub use {
};
pub struct ExecuteArgs {
/// The execution id.
/// Unique for every /execute call.
pub id: Uuid,
pub user: User,
pub update: Update,
}
@@ -59,7 +63,7 @@ pub struct ExecuteArgs {
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[variant_derive(Debug)]
#[variant_derive(Debug, Display)]
#[args(ExecuteArgs)]
#[response(JsonString)]
#[error(serror::Error)]
@@ -203,7 +207,7 @@ pub fn inner_handler(
>,
> {
Box::pin(async move {
let req_id = Uuid::new_v4();
let task_id = Uuid::new_v4();
// Need to validate no cancel is active before any update is created.
// This ensures no double update created if Cancel is called more than once for the same request.
@@ -219,14 +223,14 @@ pub fn inner_handler(
// here either.
if update.operation == Operation::None {
return Ok(ExecutionResult::Batch(
task(req_id, request, user, update).await?,
task(task_id, request, user, update).await?,
));
}
// Spawn a task for the execution which continues
// running after this method returns.
let handle =
tokio::spawn(task(req_id, request, user, update.clone()));
tokio::spawn(task(task_id, request, user, update.clone()));
// Spawns another task to monitor the first for failures,
// and add the log to Update about it (which primary task can't do because it errored out)
@@ -235,11 +239,11 @@ pub fn inner_handler(
async move {
let log = match handle.await {
Ok(Err(e)) => {
warn!("/execute request {req_id} task error: {e:#}",);
warn!("/execute request {task_id} task error: {e:#}",);
Log::error("Task Error", format_serror(&e.into()))
}
Err(e) => {
warn!("/execute request {req_id} spawn error: {e:?}",);
warn!("/execute request {task_id} spawn error: {e:?}",);
Log::error("Spawn Error", format!("{e:#?}"))
}
_ => return,
@@ -273,40 +277,33 @@ pub fn inner_handler(
})
}
#[instrument(
name = "ExecuteRequest",
skip(user, update),
fields(
user_id = user.id,
update_id = update.id,
request = format!("{:?}", request.extract_variant()))
)
]
async fn task(
req_id: Uuid,
id: Uuid,
request: ExecuteRequest,
user: User,
update: Update,
) -> anyhow::Result<String> {
info!("/execute request {req_id} | user: {}", user.username);
let timer = Instant::now();
let variant = request.extract_variant();
let res = match request.resolve(&ExecuteArgs { user, update }).await
{
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e).context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
info!(
"/execute request {id} | {variant} | user: {}",
user.username
);
let res =
match request.resolve(&ExecuteArgs { user, update, id }).await {
Err(e) => Err(e.error),
Ok(JsonString::Err(e)) => Err(
anyhow::Error::from(e)
.context("failed to serialize response"),
),
Ok(JsonString::Ok(res)) => Ok(res),
};
if let Err(e) = &res {
warn!("/execute request {req_id} error: {e:#}");
warn!("/execute request {id} error: {e:#}");
}
let elapsed = timer.elapsed();
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
res
}
@@ -315,6 +312,7 @@ trait BatchExecute {
fn single_request(name: String) -> ExecuteRequest;
}
#[instrument("BatchExecute", skip(user))]
async fn batch_execute<E: BatchExecute>(
pattern: &str,
user: &User,
@@ -327,6 +325,7 @@ async fn batch_execute<E: BatchExecute>(
&[],
)
.await?;
let futures = resources.into_iter().map(|resource| {
let user = user.clone();
async move {

View File

@@ -38,7 +38,11 @@ impl super::BatchExecute for BatchRunProcedure {
}
impl Resolve<ExecuteArgs> for BatchRunProcedure {
#[instrument(name = "BatchRunProcedure", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchRunProcedure",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
@@ -51,10 +55,19 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
}
impl Resolve<ExecuteArgs> for RunProcedure {
#[instrument(name = "RunProcedure", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunProcedure",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
procedure = self.procedure,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
Ok(
resolve_inner(self.procedure, user.clone(), update.clone())
@@ -146,7 +159,6 @@ fn resolve_inner(
update_update(update.clone()).await?;
if !update.success && procedure.config.failure_alert {
warn!("procedure unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {

View File

@@ -30,7 +30,7 @@ use crate::{
alert::send_alerts,
api::write::WriteArgs,
helpers::{
builder::{cleanup_builder_instance, get_builder_periphery},
builder::{cleanup_builder_instance, connect_builder_periphery},
channel::repo_cancel_channel,
git_token, periphery_client,
query::{VariablesAndSecrets, get_variables_and_secrets},
@@ -51,10 +51,18 @@ impl super::BatchExecute for BatchCloneRepo {
}
impl Resolve<ExecuteArgs> for BatchCloneRepo {
#[instrument(name = "BatchCloneRepo", skip( user), fields(user_id = user.id))]
#[instrument(
"BatchCloneRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
@@ -64,10 +72,19 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
}
impl Resolve<ExecuteArgs> for CloneRepo {
#[instrument(name = "CloneRepo", skip( user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CloneRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -165,10 +182,18 @@ impl super::BatchExecute for BatchPullRepo {
}
impl Resolve<ExecuteArgs> for BatchPullRepo {
#[instrument(name = "BatchPullRepo", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchPullRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
@@ -178,10 +203,19 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
}
impl Resolve<ExecuteArgs> for PullRepo {
#[instrument(name = "PullRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PullRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -275,7 +309,11 @@ impl Resolve<ExecuteArgs> for PullRepo {
}
}
#[instrument(skip_all, fields(update_id = update.id))]
#[instrument(
"HandleRepoEarlyReturn",
skip_all,
fields(update_id = update.id)
)]
async fn handle_repo_update_return(
update: Update,
) -> serror::Result<Update> {
@@ -297,7 +335,7 @@ async fn handle_repo_update_return(
Ok(update)
}
#[instrument]
#[instrument("UpdateLastPulledTime")]
async fn update_last_pulled_time(repo_name: &str) {
let res = db_client()
.repos
@@ -321,10 +359,18 @@ impl super::BatchExecute for BatchBuildRepo {
}
impl Resolve<ExecuteArgs> for BatchBuildRepo {
#[instrument(name = "BatchBuildRepo", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchBuildRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
@@ -334,10 +380,19 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
}
impl Resolve<ExecuteArgs> for BuildRepo {
#[instrument(name = "BuildRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"BuildRepo",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
&self.repo,
@@ -419,7 +474,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
let (periphery, cleanup_data) = match connect_builder_periphery(
repo.name.clone(),
None,
builder,
@@ -531,7 +586,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
update_update(update.clone()).await?;
if !update.success {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -554,7 +608,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
}
}
#[instrument(skip(update))]
#[instrument("HandleRepoBuildEarlyReturn", skip(update))]
async fn handle_builder_early_return(
mut update: Update,
repo_id: String,
@@ -578,7 +632,6 @@ async fn handle_builder_early_return(
}
update_update(update.clone()).await?;
if !update.success && !is_cancel {
warn!("repo build unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
@@ -599,7 +652,6 @@ async fn handle_builder_early_return(
Ok(update)
}
#[instrument(skip_all)]
pub async fn validate_cancel_repo_build(
request: &ExecuteRequest,
) -> anyhow::Result<()> {
@@ -649,10 +701,19 @@ pub async fn validate_cancel_repo_build(
}
impl Resolve<ExecuteArgs> for CancelRepoBuild {
#[instrument(name = "CancelRepoBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"CancelRepoBuild",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
repo = self.repo,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
&self.repo,
@@ -709,6 +770,13 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
}
}
#[instrument(
"Interpolate",
skip_all,
fields(
skip_secret_interp = repo.config.skip_secret_interp
)
)]
async fn interpolate(
repo: &mut Repo,
update: &mut Update,

View File

@@ -22,10 +22,20 @@ use crate::{
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for StartContainer {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -76,10 +86,20 @@ impl Resolve<ExecuteArgs> for StartContainer {
}
impl Resolve<ExecuteArgs> for RestartContainer {
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -132,10 +152,20 @@ impl Resolve<ExecuteArgs> for RestartContainer {
}
impl Resolve<ExecuteArgs> for PauseContainer {
#[instrument(name = "PauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -186,10 +216,20 @@ impl Resolve<ExecuteArgs> for PauseContainer {
}
impl Resolve<ExecuteArgs> for UnpauseContainer {
#[instrument(name = "UnpauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -242,10 +282,22 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
}
impl Resolve<ExecuteArgs> for StopContainer {
#[instrument(name = "StopContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -298,10 +350,22 @@ impl Resolve<ExecuteArgs> for StopContainer {
}
impl Resolve<ExecuteArgs> for DestroyContainer {
#[instrument(name = "DestroyContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DestroyContainer",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
container = self.container,
signal = format!("{:?}", self.signal),
time = self.time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let DestroyContainer {
server,
@@ -360,10 +424,19 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
}
impl Resolve<ExecuteArgs> for StartAllContainers {
#[instrument(name = "StartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StartAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -411,10 +484,19 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
}
impl Resolve<ExecuteArgs> for RestartAllContainers {
#[instrument(name = "RestartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RestartAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -464,10 +546,19 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
}
impl Resolve<ExecuteArgs> for PauseAllContainers {
#[instrument(name = "PauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -515,10 +606,19 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
}
impl Resolve<ExecuteArgs> for UnpauseAllContainers {
#[instrument(name = "UnpauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -568,10 +668,19 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
}
impl Resolve<ExecuteArgs> for StopAllContainers {
#[instrument(name = "StopAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopAllContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -619,10 +728,19 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
}
impl Resolve<ExecuteArgs> for PruneContainers {
#[instrument(name = "PruneContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneContainers",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -675,10 +793,20 @@ impl Resolve<ExecuteArgs> for PruneContainers {
}
impl Resolve<ExecuteArgs> for DeleteNetwork {
#[instrument(name = "DeleteNetwork", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteNetwork",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
network = self.name
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -726,10 +854,19 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
}
impl Resolve<ExecuteArgs> for PruneNetworks {
#[instrument(name = "PruneNetworks", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneNetworks",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -780,10 +917,20 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
}
impl Resolve<ExecuteArgs> for DeleteImage {
#[instrument(name = "DeleteImage", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteImage",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
image = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -828,10 +975,19 @@ impl Resolve<ExecuteArgs> for DeleteImage {
}
impl Resolve<ExecuteArgs> for PruneImages {
#[instrument(name = "PruneImages", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneImages",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -880,10 +1036,20 @@ impl Resolve<ExecuteArgs> for PruneImages {
}
impl Resolve<ExecuteArgs> for DeleteVolume {
#[instrument(name = "DeleteVolume", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeleteVolume",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
volume = self.name,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -931,10 +1097,19 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
}
impl Resolve<ExecuteArgs> for PruneVolumes {
#[instrument(name = "PruneVolumes", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneVolumes",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -983,10 +1158,19 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
}
impl Resolve<ExecuteArgs> for PruneDockerBuilders {
#[instrument(name = "PruneDockerBuilders", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneDockerBuilders",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -1035,10 +1219,19 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
}
impl Resolve<ExecuteArgs> for PruneBuildx {
#[instrument(name = "PruneBuildx", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneBuildx",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,
@@ -1087,10 +1280,19 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
}
impl Resolve<ExecuteArgs> for PruneSystem {
#[instrument(name = "PruneSystem", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PruneSystem",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
server = self.server,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
&self.server,

View File

@@ -22,6 +22,7 @@ use komodo_client::{
};
use periphery_client::api::compose::*;
use resolver_api::Resolve;
use uuid::Uuid;
use crate::{
api::write::WriteArgs,
@@ -54,10 +55,18 @@ impl super::BatchExecute for BatchDeployStack {
}
impl Resolve<ExecuteArgs> for BatchDeployStack {
#[instrument(name = "BatchDeployStack", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDeployStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeployStack>(&self.pattern, user)
@@ -67,10 +76,21 @@ impl Resolve<ExecuteArgs> for BatchDeployStack {
}
impl Resolve<ExecuteArgs> for DeployStack {
#[instrument(name = "DeployStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DeployStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
stop_time = self.stop_time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&self.stack,
@@ -281,10 +301,18 @@ impl super::BatchExecute for BatchDeployStackIfChanged {
}
impl Resolve<ExecuteArgs> for BatchDeployStackIfChanged {
#[instrument(name = "BatchDeployStackIfChanged", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDeployStackIfChanged",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchDeployStackIfChanged>(
@@ -297,10 +325,20 @@ impl Resolve<ExecuteArgs> for BatchDeployStackIfChanged {
}
impl Resolve<ExecuteArgs> for DeployStackIfChanged {
#[instrument(name = "DeployStackIfChanged", skip(user, update), fields(user_id = user.id))]
#[instrument(
"DeployStackIfChanged",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
stop_time = self.stop_time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let stack = get_check_permissions::<Stack>(
&self.stack,
@@ -358,6 +396,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
.resolve(&ExecuteArgs {
user: user.clone(),
update,
id: *id,
})
.await
}
@@ -467,6 +506,14 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
}
}
#[instrument(
"DeployStackServices",
skip_all,
fields(
stack = stack,
services = format!("{services:?}")
)
)]
async fn deploy_services(
stack: String,
services: Vec<String>,
@@ -488,10 +535,19 @@ async fn deploy_services(
.resolve(&ExecuteArgs {
user: user.clone(),
update,
id: Uuid::new_v4(),
})
.await
}
#[instrument(
"RestartStackServices",
skip_all,
fields(
stack = stack,
services = format!("{services:?}")
)
)]
async fn restart_services(
stack: String,
services: Vec<String>,
@@ -510,6 +566,7 @@ async fn restart_services(
.resolve(&ExecuteArgs {
user: user.clone(),
update,
id: Uuid::new_v4(),
})
.await
}
@@ -526,6 +583,11 @@ async fn restart_services(
/// Changes to config files after restart is applied should
/// be taken as the deployed contents, otherwise next changed check
/// will restart service again for no reason.
#[instrument(
"UpdateStackDeployedContents",
skip_all,
fields(stack = id)
)]
async fn update_deployed_contents_with_latest(
id: &str,
contents: Option<Vec<StackRemoteFileContents>>,
@@ -663,10 +725,18 @@ impl super::BatchExecute for BatchPullStack {
}
impl Resolve<ExecuteArgs> for BatchPullStack {
#[instrument(name = "BatchPullStack", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchPullStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullStack>(&self.pattern, user)
@@ -700,6 +770,14 @@ async fn maybe_pull_stack(
Ok(())
}
#[instrument(
"PullStackInner",
skip_all,
fields(
stack = stack.id,
services = format!("{services:?}"),
)
)]
pub async fn pull_stack_inner(
mut stack: Stack,
services: Vec<String>,
@@ -769,10 +847,20 @@ pub async fn pull_stack_inner(
}
impl Resolve<ExecuteArgs> for PullStack {
#[instrument(name = "PullStack", skip(user, update), fields(user_id = user.id))]
#[instrument(
"PullStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (stack, server) = get_stack_and_server(
&self.stack,
@@ -822,10 +910,20 @@ impl Resolve<ExecuteArgs> for PullStack {
}
impl Resolve<ExecuteArgs> for StartStack {
#[instrument(name = "StartStack", skip(user, update), fields(user_id = user.id))]
#[instrument(
"StartStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<StartStack>(
&self.stack,
@@ -841,10 +939,20 @@ impl Resolve<ExecuteArgs> for StartStack {
}
impl Resolve<ExecuteArgs> for RestartStack {
#[instrument(name = "RestartStack", skip(user, update), fields(user_id = user.id))]
#[instrument(
"RestartStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<RestartStack>(
&self.stack,
@@ -862,10 +970,20 @@ impl Resolve<ExecuteArgs> for RestartStack {
}
impl Resolve<ExecuteArgs> for PauseStack {
#[instrument(name = "PauseStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"PauseStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<PauseStack>(
&self.stack,
@@ -881,10 +999,20 @@ impl Resolve<ExecuteArgs> for PauseStack {
}
impl Resolve<ExecuteArgs> for UnpauseStack {
#[instrument(name = "UnpauseStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"UnpauseStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<UnpauseStack>(
&self.stack,
@@ -900,10 +1028,20 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
}
impl Resolve<ExecuteArgs> for StopStack {
#[instrument(name = "StopStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"StopStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<StopStack>(
&self.stack,
@@ -931,10 +1069,18 @@ impl super::BatchExecute for BatchDestroyStack {
}
impl Resolve<ExecuteArgs> for BatchDestroyStack {
#[instrument(name = "BatchDestroyStack", skip(user), fields(user_id = user.id))]
#[instrument(
"BatchDestroyStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
pattern = self.pattern,
)
)]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
ExecuteArgs { user, id, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
super::batch_execute::<BatchDestroyStack>(&self.pattern, user)
.await
@@ -943,10 +1089,22 @@ impl Resolve<ExecuteArgs> for BatchDestroyStack {
}
impl Resolve<ExecuteArgs> for DestroyStack {
#[instrument(name = "DestroyStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"DestroyStack",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
services = format!("{:?}", self.services),
remove_orphans = self.remove_orphans,
stop_time = self.stop_time,
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
execute_compose::<DestroyStack>(
&self.stack,
@@ -962,10 +1120,21 @@ impl Resolve<ExecuteArgs> for DestroyStack {
}
impl Resolve<ExecuteArgs> for RunStackService {
#[instrument(name = "RunStackService", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunStackService",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
stack = self.stack,
service = self.service,
request = format!("{self:?}"),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let (mut stack, server) = get_stack_and_server(
&self.stack,

View File

@@ -49,10 +49,21 @@ use crate::{
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for RunSync {
#[instrument(name = "RunSync", skip(user, update), fields(user_id = user.id, update_id = update.id))]
#[instrument(
"RunSync",
skip_all,
fields(
id = id.to_string(),
operator = user.id,
update_id = update.id,
sync = self.sync,
resource_type = format!("{:?}", self.resource_type),
resources = format!("{:?}", self.resources),
)
)]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> serror::Result<Update> {
let RunSync {
sync,

View File

@@ -9,14 +9,14 @@ use komodo_client::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{
deployment::Deployment, server::Server, stack::Stack,
sync::ResourceSync,
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, sync::ResourceSync,
},
};
use resolver_api::Resolve;
use crate::{
config::core_config, permission::get_resource_ids_for_user,
config::core_config, permission::list_resource_ids_for_user,
state::db_client,
};
@@ -31,14 +31,29 @@ impl Resolve<ReadArgs> for ListAlerts {
) -> serror::Result<ListAlertsResponse> {
let mut query = self.query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids =
get_resource_ids_for_user::<Server>(user).await?;
let stack_ids =
get_resource_ids_for_user::<Stack>(user).await?;
let deployment_ids =
get_resource_ids_for_user::<Deployment>(user).await?;
let sync_ids =
get_resource_ids_for_user::<ResourceSync>(user).await?;
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
)?;
// All of the vecs will be non-none if !admin and !transparent mode.
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },

View File

@@ -11,8 +11,10 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(user)
let query = match list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -6,7 +6,7 @@ use database::mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use futures::TryStreamExt;
use futures_util::TryStreamExt;
use komodo_client::{
api::read::*,
entities::{

View File

@@ -11,8 +11,10 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(user)
let query = match list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -51,6 +51,7 @@ mod server;
mod stack;
mod sync;
mod tag;
mod terminal;
mod toml;
mod update;
mod user;
@@ -114,6 +115,8 @@ enum ReadRequest {
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
// ==== TERMINAL ====
ListTerminals(ListTerminals),
// ==== DOCKER ====
@@ -249,7 +252,6 @@ async fn variant_handler(
handler(user, Json(req)).await
}
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ReadRequest>,

View File

@@ -1,4 +1,4 @@
use futures::future::join_all;
use futures_util::future::join_all;
use komodo_client::{
api::read::*,
entities::{

View File

@@ -25,11 +25,10 @@ use komodo_client::{
network::Network,
volume::Volume,
},
komodo_timestamp,
permission::PermissionLevel,
server::{
Server, ServerActionState, ServerListItem, ServerState,
TerminalInfo,
Server, ServerActionState, ServerListItem, ServerQuery,
ServerState,
},
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
@@ -50,7 +49,7 @@ use tokio::sync::Mutex;
use crate::{
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
permission::{get_check_permissions, list_resources_for_user},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache},
@@ -398,18 +397,12 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
ServerQuery::builder().names(self.servers.clone()).build(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
.into_iter()
.filter(|server| {
self.servers.is_empty()
|| self.servers.contains(&server.id)
|| self.servers.contains(&server.name)
});
.await?;
let mut containers = Vec::<ContainerListItem>::new();
@@ -417,9 +410,17 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(more_containers) = &cache.containers {
containers.extend(more_containers.clone());
}
let Some(more) = &cache.containers else {
continue;
};
let more = more
.iter()
.filter(|container| {
self.containers.is_empty()
|| self.containers.contains(&container.name)
})
.cloned();
containers.extend(more);
}
Ok(containers)
@@ -586,12 +587,12 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
}
// then check stacks
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
user,
)
.await?;
let stacks = list_resources_for_user::<Stack>(
doc! { "config.server_id": &server.id },
user,
PermissionLevel::Read.into(),
)
.await?;
// check matching stack
for stack in stacks {
@@ -832,68 +833,46 @@ impl Resolve<ReadArgs> for ListComposeProjects {
}
}
#[derive(Default)]
struct TerminalCacheItem {
list: Vec<TerminalInfo>,
ttl: i64,
}
// impl Resolve<ReadArgs> for ListAllTerminals {
// async fn resolve(
// self,
// args: &ReadArgs,
// ) -> Result<Self::Response, Self::Error> {
// // match self.tar
// let mut terminals = resource::list_full_for_user::<Server>(
// self.query, &args.user, &all_tags,
// )
// .await?
// .into_iter()
// .map(|server| async move {
// (
// list_terminals_inner(&server, self.fresh).await,
// (server.id, server.name),
// )
// })
// .collect::<FuturesUnordered<_>>()
// .collect::<Vec<_>>()
// .await
// .into_iter()
// .flat_map(|(terminals, server)| {
// let terminals = terminals.ok()?;
// Some((terminals, server))
// })
// .flat_map(|(terminals, (server_id, server_name))| {
// terminals.into_iter().map(move |info| {
// TerminalInfoWithServer::from_terminal_info(
// &server_id,
// &server_name,
// info,
// )
// })
// })
// .collect::<Vec<_>>();
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
// terminals.sort_by(|a, b| {
// a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
// });
#[derive(Default)]
struct TerminalCache(
std::sync::Mutex<
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
>,
);
impl TerminalCache {
fn get_or_insert(
&self,
server_id: String,
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
if let Some(cached) =
self.0.lock().unwrap().get(&server_id).cloned()
{
return cached;
}
let to_cache =
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
self.0.lock().unwrap().insert(server_id, to_cache.clone());
to_cache
}
}
fn terminals_cache() -> &'static TerminalCache {
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
TERMINALS.get_or_init(Default::default)
}
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let cache = terminals_cache().get_or_insert(server.id.clone());
let mut cache = cache.lock().await;
if self.fresh || komodo_timestamp() > cache.ttl {
cache.list = periphery_client(&server)
.await?
.request(periphery_client::api::terminal::ListTerminals {
container: None,
})
.await
.context("Failed to get fresh terminal list")?;
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
Ok(cache.list.clone())
} else {
Ok(cache.list.clone())
}
}
}
// Ok(terminals)
// }
// }

View File

@@ -0,0 +1,247 @@
use anyhow::Context as _;
use futures_util::{
FutureExt, StreamExt as _, stream::FuturesUnordered,
};
use komodo_client::{
api::read::{ListTerminals, ListTerminalsResponse},
entities::{
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{Terminal, TerminalTarget},
user::User,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::periphery_client, permission::get_check_permissions,
resource,
};
use super::ReadArgs;
//
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let Some(target) = self.target else {
return list_all_terminals_for_user(user, self.use_names).await;
};
match &target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Container { server, .. } => {
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
}
}
}
async fn list_all_terminals_for_user(
user: &User,
use_names: bool,
) -> serror::Result<Vec<Terminal>> {
let (mut servers, stacks, deployments) = tokio::try_join!(
resource::list_full_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
)
.map(|res| res.map(|servers| servers
.into_iter()
// true denotes user actually has permission on this Server.
.map(|server| (server, true))
.collect::<Vec<_>>())),
resource::list_full_for_user::<Stack>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
resource::list_full_for_user::<Deployment>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
)?;
// Ensure any missing servers are present to query
for stack in &stacks {
if !stack.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == stack.config.server_id)
{
let server =
resource::get::<Server>(&stack.config.server_id).await?;
servers.push((server, false));
}
}
for deployment in &deployments {
if !deployment.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == deployment.config.server_id)
{
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
servers.push((server, false));
}
}
let mut terminals = servers
.into_iter()
.map(|(server, server_permission)| async move {
(
list_terminals_on_server(&server, None).await,
(server.id, server.name, server_permission),
)
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flat_map(
|(terminals, (server_id, server_name, server_permission))| {
let terminals = terminals
.ok()?
.into_iter()
.filter_map(|mut terminal| {
// Only keep terminals with appropriate perms.
match terminal.target.clone() {
TerminalTarget::Server { .. } => server_permission
.then(|| {
terminal.target = TerminalTarget::Server {
server: Some(if use_names {
server_name.clone()
} else {
server_id.clone()
}),
};
terminal
}),
TerminalTarget::Container { container, .. } => {
server_permission.then(|| {
terminal.target = TerminalTarget::Container {
server: if use_names {
server_name.clone()
} else {
server_id.clone()
},
container,
};
terminal
})
}
TerminalTarget::Stack { stack, service } => {
stacks.iter().find(|s| s.id == stack).map(|s| {
terminal.target = TerminalTarget::Stack {
stack: if use_names {
s.name.clone()
} else {
s.id.clone()
},
service,
};
terminal
})
}
TerminalTarget::Deployment { deployment } => {
deployments.iter().find(|d| d.id == deployment).map(
|d| {
terminal.target = TerminalTarget::Deployment {
deployment: if use_names {
d.name.clone()
} else {
d.id.clone()
},
};
terminal
},
)
}
}
})
.collect::<Vec<_>>();
Some(terminals)
},
)
.flatten()
.collect::<Vec<_>>();
terminals.sort_by(|a, b| {
a.target.cmp(&b.target).then(a.name.cmp(&b.name))
});
Ok(terminals)
}
async fn list_terminals_on_server(
server: &Server,
target: Option<TerminalTarget>,
) -> serror::Result<Vec<Terminal>> {
periphery_client(server)
.await?
.request(periphery_client::api::terminal::ListTerminals {
target,
})
.await
.with_context(|| {
format!(
"Failed to get Terminal list from Server {} ({})",
server.name, server.id
)
})
.map_err(Into::into)
}

View File

@@ -29,7 +29,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{get_check_permissions, get_resource_ids_for_user},
permission::{get_check_permissions, list_resource_ids_for_user},
state::db_client,
};
@@ -45,99 +45,137 @@ impl Resolve<ReadArgs> for ListUpdates {
let query = if user.admin || core_config().transparent_mode {
self.query
} else {
let server_query = get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
get_resource_ids_for_user::<Deployment>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
get_resource_ids_for_user::<Procedure>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = get_resource_ids_for_user::<Action>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query = get_resource_ids_for_user::<
ResourceSync,
>(user)
let server_query = list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = list_resource_ids_for_user::<Build>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = list_resource_ids_for_user::<Repo>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = list_resource_ids_for_user::<Procedure>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = list_resource_ids_for_user::<Action>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query =
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = self.query.unwrap_or_default();
query.extend(doc! {

View File

@@ -1,27 +1,15 @@
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::terminal::*,
entities::{
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, user::User,
},
};
use komodo_client::{api::terminal::*, entities::user::User};
use serror::Json;
use uuid::Uuid;
use crate::{
auth::auth_request, helpers::periphery_client,
permission::get_check_permissions, resource::get,
state::stack_status_cache,
auth::auth_request, helpers::terminal::setup_target_for_user,
};
pub fn router() -> Router {
Router::new()
.route("/execute", post(execute_terminal))
.route("/execute/container", post(execute_container_exec))
.route("/execute/deployment", post(execute_deployment_exec))
.route("/execute/stack", post(execute_stack_exec))
.layer(middleware::from_fn(auth_request))
}
@@ -29,211 +17,34 @@ pub fn router() -> Router {
// ExecuteTerminal
// =================
async fn execute_terminal(
Extension(user): Extension<User>,
Json(request): Json<ExecuteTerminalBody>,
) -> serror::Result<axum::body::Body> {
execute_terminal_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteTerminal",
skip(user),
skip_all,
fields(
user_id = user.id,
operator = user.id,
target,
terminal,
init = format!("{init:?}")
)
)]
async fn execute_terminal_inner(
req_id: Uuid,
ExecuteTerminalBody {
server,
async fn execute_terminal(
Extension(user): Extension<User>,
Json(ExecuteTerminalBody {
target,
terminal,
command,
}: ExecuteTerminalBody,
user: User,
init,
}): Json<ExecuteTerminalBody>,
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute request | user: {}", user.username);
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let stream = periphery_client(&server)
.await?
.execute_terminal(terminal, command)
.await
.context("Failed to execute command on periphery")?;
Ok(axum::body::Body::from_stream(stream))
}
// ======================
// ExecuteContainerExec
// ======================
async fn execute_container_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteContainerExecBody>,
) -> serror::Result<axum::body::Body> {
execute_container_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteContainerExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_container_exec_inner(
req_id: Uuid,
ExecuteContainerExecBody {
server,
container,
shell,
command,
recreate,
}: ExecuteContainerExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("ExecuteContainerExec request | user: {}", user.username);
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
let (target, terminal, periphery) =
setup_target_for_user(target, terminal, init, &user).await?;
let stream = periphery
.execute_container_exec(container, shell, command, recreate)
.execute_terminal(target, terminal, command)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
Ok(axum::body::Body::from_stream(stream))
}
// =======================
// ExecuteDeploymentExec
// =======================
async fn execute_deployment_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteDeploymentExecBody>,
) -> serror::Result<axum::body::Body> {
execute_deployment_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteDeploymentExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_deployment_exec_inner(
req_id: Uuid,
ExecuteDeploymentExecBody {
deployment,
shell,
command,
recreate,
}: ExecuteDeploymentExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("ExecuteDeploymentExec request | user: {}", user.username);
let deployment = get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(deployment.name, shell, command, recreate)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
Ok(axum::body::Body::from_stream(stream))
}
// ==================
// ExecuteStackExec
// ==================
async fn execute_stack_exec(
Extension(user): Extension<User>,
Json(request): Json<ExecuteStackExecBody>,
) -> serror::Result<axum::body::Body> {
execute_stack_exec_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteStackExec",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_stack_exec_inner(
req_id: Uuid,
ExecuteStackExecBody {
stack,
service,
shell,
command,
recreate,
}: ExecuteStackExecBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("ExecuteStackExec request | user: {}", user.username);
let stack = get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&stack.config.server_id).await?;
let container = stack_status_cache()
.get(&stack.id)
.await
.context("could not get stack status")?
.curr
.services
.iter()
.find(|s| s.service == service)
.context("could not find service")?
.container
.as_ref()
.context("could not find service container")?
.name
.clone();
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(container, shell, command, recreate)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
.context("Failed to execute command on Terminal")?;
Ok(axum::body::Body::from_stream(stream))
}

View File

@@ -9,6 +9,7 @@ use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_bson,
};
use derive_variants::EnumVariants;
use komodo_client::entities::random_string;
use komodo_client::{
api::user::*,
entities::{api_key::ApiKey, komodo_timestamp, user::User},
@@ -21,9 +22,7 @@ use typeshare::typeshare;
use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::{query::get_user, random_string},
state::db_client,
auth::auth_request, helpers::query::get_user, state::db_client,
};
use super::Variant;
@@ -66,7 +65,6 @@ async fn variant_handler(
handler(user, Json(req)).await
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<UserRequest>,
@@ -89,11 +87,6 @@ async fn handler(
const RECENTLY_VIEWED_MAX: usize = 10;
impl Resolve<UserArgs> for PushRecentlyViewed {
#[instrument(
name = "PushRecentlyViewed",
level = "debug",
skip(user)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -131,11 +124,6 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
}
impl Resolve<UserArgs> for SetLastSeenUpdate {
#[instrument(
name = "SetLastSeenUpdate",
level = "debug",
skip(user)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -158,7 +146,11 @@ const SECRET_LENGTH: usize = 40;
const BCRYPT_COST: u32 = 10;
impl Resolve<UserArgs> for CreateApiKey {
#[instrument(name = "CreateApiKey", level = "debug", skip(user))]
#[instrument(
"CreateApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,
@@ -188,7 +180,11 @@ impl Resolve<UserArgs> for CreateApiKey {
}
impl Resolve<UserArgs> for DeleteApiKey {
#[instrument(name = "DeleteApiKey", level = "debug", skip(user))]
#[instrument(
"DeleteApiKey",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
UserArgs { user }: &UserArgs,

View File

@@ -11,7 +11,15 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateAction {
#[instrument(name = "CreateAction", skip(user))]
#[instrument(
"CreateAction",
skip_all,
fields(
operator = user.id,
action = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -22,7 +30,15 @@ impl Resolve<WriteArgs> for CreateAction {
}
impl Resolve<WriteArgs> for CopyAction {
#[instrument(name = "CopyAction", skip(user))]
#[instrument(
"CopyAction",
skip_all,
fields(
operator = user.id,
action = self.name,
copy_action = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -39,7 +55,15 @@ impl Resolve<WriteArgs> for CopyAction {
}
impl Resolve<WriteArgs> for UpdateAction {
#[instrument(name = "UpdateAction", skip(user))]
#[instrument(
"UpdateAction",
skip_all,
fields(
operator = user.id,
action = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -49,7 +73,15 @@ impl Resolve<WriteArgs> for UpdateAction {
}
impl Resolve<WriteArgs> for RenameAction {
#[instrument(name = "RenameAction", skip(user))]
#[instrument(
"RenameAction",
skip_all,
fields(
operator = user.id,
action = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -59,8 +91,18 @@ impl Resolve<WriteArgs> for RenameAction {
}
impl Resolve<WriteArgs> for DeleteAction {
#[instrument(name = "DeleteAction", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Action> {
Ok(resource::delete::<Action>(&self.id, args).await?)
#[instrument(
"DeleteAction",
skip_all,
fields(
operator = user.id,
action = self.id
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Action> {
Ok(resource::delete::<Action>(&self.id, user).await?)
}
}

View File

@@ -10,6 +10,14 @@ use serror::AddStatusCodeError;
use crate::{api::write::WriteArgs, state::db_client};
impl Resolve<WriteArgs> for CloseAlert {
#[instrument(
"CloseAlert",
skip_all,
fields(
operator = admin.id,
alert_id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -11,7 +11,15 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateAlerter {
#[instrument(name = "CreateAlerter", skip(user))]
#[instrument(
"CreateAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -22,7 +30,15 @@ impl Resolve<WriteArgs> for CreateAlerter {
}
impl Resolve<WriteArgs> for CopyAlerter {
#[instrument(name = "CopyAlerter", skip(user))]
#[instrument(
"CopyAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.name,
copy_alerter = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -39,17 +55,32 @@ impl Resolve<WriteArgs> for CopyAlerter {
}
impl Resolve<WriteArgs> for DeleteAlerter {
#[instrument(name = "DeleteAlerter", skip(args))]
#[instrument(
"DeleteAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Alerter> {
Ok(resource::delete::<Alerter>(&self.id, args).await?)
Ok(resource::delete::<Alerter>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateAlerter {
#[instrument(name = "UpdateAlerter", skip(user))]
#[instrument(
"UpdateAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
update = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -62,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateAlerter {
}
impl Resolve<WriteArgs> for RenameAlerter {
#[instrument(name = "RenameAlerter", skip(user))]
#[instrument(
"RenameAlerter",
skip_all,
fields(
operator = user.id,
alerter = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,4 +1,4 @@
use std::{path::PathBuf, str::FromStr, time::Duration};
use std::{path::PathBuf, time::Duration};
use anyhow::{Context, anyhow};
use database::mungos::mongodb::bson::to_document;
@@ -42,7 +42,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateBuild {
#[instrument(name = "CreateBuild", skip(user))]
#[instrument(
"CreateBuild",
skip_all,
fields(
operator = user.id,
build = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -53,7 +61,15 @@ impl Resolve<WriteArgs> for CreateBuild {
}
impl Resolve<WriteArgs> for CopyBuild {
#[instrument(name = "CopyBuild", skip(user))]
#[instrument(
"CopyBuild",
skip_all,
fields(
operator = user.id,
build = self.name,
copy_build = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -72,14 +88,32 @@ impl Resolve<WriteArgs> for CopyBuild {
}
impl Resolve<WriteArgs> for DeleteBuild {
#[instrument(name = "DeleteBuild", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Build> {
Ok(resource::delete::<Build>(&self.id, args).await?)
#[instrument(
"DeleteBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Build> {
Ok(resource::delete::<Build>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateBuild {
#[instrument(name = "UpdateBuild", skip(user))]
#[instrument(
"UpdateBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -89,7 +123,15 @@ impl Resolve<WriteArgs> for UpdateBuild {
}
impl Resolve<WriteArgs> for RenameBuild {
#[instrument(name = "RenameBuild", skip(user))]
#[instrument(
"RenameBuild",
skip_all,
fields(
operator = user.id,
build = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -99,7 +141,14 @@ impl Resolve<WriteArgs> for RenameBuild {
}
impl Resolve<WriteArgs> for WriteBuildFileContents {
#[instrument(name = "WriteBuildFileContents", skip(args))]
#[instrument(
"WriteBuildFileContents",
skip_all,
fields(
operator = args.user.id,
build = self.build,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
@@ -171,6 +220,7 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
}
}
#[instrument("WriteDockerfileContentsGit", skip_all)]
async fn write_dockerfile_contents_git(
req: WriteBuildFileContents,
args: &WriteArgs,
@@ -317,11 +367,6 @@ async fn write_dockerfile_contents_git(
}
impl Resolve<WriteArgs> for RefreshBuildCache {
#[instrument(
name = "RefreshBuildCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -345,23 +390,28 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
None
};
let (
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
) = if build.config.files_on_host {
let RemoteDockerfileContents {
path,
contents,
error,
hash,
message,
} = if build.config.files_on_host {
// =============
// FILES ON HOST
// =============
match get_on_host_dockerfile(&build).await {
Ok(FileContents { path, contents }) => {
(Some(path), Some(contents), None, None, None)
}
Err(e) => {
(None, None, Some(format_serror(&e.into())), None, None)
RemoteDockerfileContents {
path: Some(path),
contents: Some(contents),
..Default::default()
}
}
Err(e) => RemoteDockerfileContents {
error: Some(format_serror(&e.into())),
..Default::default()
},
}
} else if let Some(repo) = &repo {
let Some(res) = get_git_remote(&build, repo.into()).await?
@@ -381,7 +431,7 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
// =============
// UI BASED FILE
// =============
(None, None, None, None, None)
RemoteDockerfileContents::default()
};
let info = BuildInfo {
@@ -389,11 +439,11 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
built_hash: build.info.built_hash,
built_message: build.info.built_message,
built_contents: build.info.built_contents,
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
remote_path: path,
remote_contents: contents,
remote_error: error,
latest_hash: hash,
latest_message: message,
};
let info = to_document(&info)
@@ -485,15 +535,7 @@ async fn get_on_host_dockerfile(
async fn get_git_remote(
build: &Build,
mut clone_args: RepoExecutionArgs,
) -> anyhow::Result<
Option<(
Option<String>,
Option<String>,
Option<String>,
Option<String>,
Option<String>,
)>,
> {
) -> anyhow::Result<Option<RemoteDockerfileContents>> {
if clone_args.provider.is_empty() {
// Nothing to do here
return Ok(None);
@@ -520,10 +562,19 @@ async fn get_git_remote(
access_token,
)
.await
.context("failed to clone build repo")?;
.context("Failed to clone Build repo")?;
let relative_path = PathBuf::from_str(&build.config.build_path)
.context("Invalid build path")?
// Ensure clone / pull successful,
// propogate error log -> 'errored' and return.
if let Some(failure) = res.logs.iter().find(|log| !log.success) {
return Ok(Some(RemoteDockerfileContents {
path: Some(format!("Failed at: {}", failure.stage)),
error: Some(failure.combined()),
..Default::default()
}));
}
let relative_path = PathBuf::from(&build.config.build_path)
.join(&build.config.dockerfile_path);
let full_path = repo_path.join(&relative_path);
@@ -534,11 +585,20 @@ async fn get_git_remote(
Ok(contents) => (Some(contents), None),
Err(e) => (None, Some(format_serror(&e.into()))),
};
Ok(Some((
Some(relative_path.display().to_string()),
Ok(Some(RemoteDockerfileContents {
path: Some(relative_path.display().to_string()),
contents,
error,
res.commit_hash,
res.commit_message,
)))
hash: res.commit_hash,
message: res.commit_message,
}))
}
#[derive(Default)]
pub struct RemoteDockerfileContents {
pub path: Option<String>,
pub contents: Option<String>,
pub error: Option<String>,
pub hash: Option<String>,
pub message: Option<String>,
}

View File

@@ -11,7 +11,15 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateBuilder {
#[instrument(name = "CreateBuilder", skip(user))]
#[instrument(
"CreateBuilder",
skip_all,
fields(
operator = user.id,
builder = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -22,7 +30,15 @@ impl Resolve<WriteArgs> for CreateBuilder {
}
impl Resolve<WriteArgs> for CopyBuilder {
#[instrument(name = "CopyBuilder", skip(user))]
#[instrument(
"CopyBuilder",
skip_all,
fields(
operator = user.id,
builder = self.name,
copy_builder = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -39,17 +55,32 @@ impl Resolve<WriteArgs> for CopyBuilder {
}
impl Resolve<WriteArgs> for DeleteBuilder {
#[instrument(name = "DeleteBuilder", skip(args))]
#[instrument(
"DeleteBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Builder> {
Ok(resource::delete::<Builder>(&self.id, args).await?)
Ok(resource::delete::<Builder>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateBuilder {
#[instrument(name = "UpdateBuilder", skip(user))]
#[instrument(
"UpdateBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -62,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateBuilder {
}
impl Resolve<WriteArgs> for RenameBuilder {
#[instrument(name = "RenameBuilder", skip(user))]
#[instrument(
"RenameBuilder",
skip_all,
fields(
operator = user.id,
builder = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -33,7 +33,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateDeployment {
#[instrument(name = "CreateDeployment", skip(user))]
#[instrument(
"CreateDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -49,7 +57,15 @@ impl Resolve<WriteArgs> for CreateDeployment {
}
impl Resolve<WriteArgs> for CopyDeployment {
#[instrument(name = "CopyDeployment", skip(user))]
#[instrument(
"CopyDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.name,
copy_deployment = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -72,7 +88,15 @@ impl Resolve<WriteArgs> for CopyDeployment {
}
impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
#[instrument(name = "CreateDeploymentFromContainer", skip(user))]
#[instrument(
"CreateDeploymentFromContainer",
skip_all,
fields(
operator = user.id,
server = self.server,
deployment = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -166,17 +190,32 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
}
impl Resolve<WriteArgs> for DeleteDeployment {
#[instrument(name = "DeleteDeployment", skip(args))]
#[instrument(
"DeleteDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
Ok(resource::delete::<Deployment>(&self.id, args).await?)
Ok(resource::delete::<Deployment>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateDeployment {
#[instrument(name = "UpdateDeployment", skip(user))]
#[instrument(
"UpdateDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -189,7 +228,15 @@ impl Resolve<WriteArgs> for UpdateDeployment {
}
impl Resolve<WriteArgs> for RenameDeployment {
#[instrument(name = "RenameDeployment", skip(user))]
#[instrument(
"RenameDeployment",
skip_all,
fields(
operator = user.id,
deployment = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,5 +1,3 @@
use std::time::Instant;
use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
@@ -11,6 +9,7 @@ use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use strum::Display;
use typeshare::typeshare;
use uuid::Uuid;
@@ -35,6 +34,7 @@ mod service_user;
mod stack;
mod sync;
mod tag;
mod terminal;
mod user;
mod user_group;
mod variable;
@@ -47,7 +47,7 @@ pub struct WriteArgs {
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
)]
#[variant_derive(Debug)]
#[variant_derive(Debug, Display)]
#[args(WriteArgs)]
#[response(Response)]
#[error(serror::Error)]
@@ -90,9 +90,6 @@ pub enum WriteRequest {
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
UpdateServerPublicKey(UpdateServerPublicKey),
RotateServerKeys(RotateServerKeys),
@@ -168,6 +165,12 @@ pub enum WriteRequest {
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
// ==== TERMINAL ====
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
@@ -230,31 +233,22 @@ async fn handler(
res?
}
#[instrument(
name = "WriteRequest",
skip(user, request),
fields(
user_id = user.id,
request = format!("{:?}", request.extract_variant())
)
)]
async fn task(
req_id: Uuid,
request: WriteRequest,
user: User,
) -> serror::Result<axum::response::Response> {
info!("/write request | user: {}", user.username);
let timer = Instant::now();
let variant = request.extract_variant();
info!("/write request | {variant} | user: {}", user.username);
let res = request.resolve(&WriteArgs { user }).await;
if let Err(e) = &res {
warn!("/write request {req_id} error: {:#}", e.error);
warn!(
"/write request {req_id} | {variant} | error: {:#}",
e.error
);
}
let elapsed = timer.elapsed();
debug!("/write request {req_id} | resolve time: {elapsed:?}");
res.map(|res| res.0)
}

View File

@@ -6,7 +6,9 @@ use komodo_client::{
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
},
entities::{komodo_timestamp, onboarding_key::OnboardingKey},
entities::{
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
},
};
use noise::key::EncodedKeyPair;
use reqwest::StatusCode;
@@ -18,7 +20,18 @@ use crate::{api::write::WriteArgs, state::db_client};
//
impl Resolve<WriteArgs> for CreateOnboardingKey {
#[instrument(name = "CreateServerOnboardingKey", skip(self, admin))]
#[instrument(
"CreateOnboardingKey",
skip_all,
fields(
operator = admin.id,
name = self.name,
expires = self.expires,
tags = format!("{:?}", self.tags),
copy_server = self.copy_server,
create_builder = self.create_builder,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -29,13 +42,16 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
.status_code(StatusCode::FORBIDDEN),
);
}
let keys = if let Some(private_key) = self.private_key {
EncodedKeyPair::from_private_key(&private_key)?
let private_key = if let Some(private_key) = self.private_key {
private_key
} else {
EncodedKeyPair::generate()?
format!("O-{}", random_string(30))
};
let public_key = EncodedKeyPair::from_private_key(&private_key)?
.public
.into_inner();
let onboarding_key = OnboardingKey {
public_key: keys.public.into_inner(),
public_key,
name: self.name,
enabled: true,
onboarded: Default::default(),
@@ -62,7 +78,7 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
"No Server onboarding key found on database after create",
)?;
Ok(CreateOnboardingKeyResponse {
private_key: keys.private.into_inner(),
private_key,
created,
})
}
@@ -71,6 +87,15 @@ impl Resolve<WriteArgs> for CreateOnboardingKey {
//
impl Resolve<WriteArgs> for UpdateOnboardingKey {
#[instrument(
"UpdateOnboardingKey",
skip_all,
fields(
operator = admin.id,
public_key = self.public_key,
update = format!("{:?}", self),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -140,7 +165,14 @@ impl Resolve<WriteArgs> for UpdateOnboardingKey {
//
impl Resolve<WriteArgs> for DeleteOnboardingKey {
#[instrument(name = "DeleteServerOnboardingKey", skip(admin))]
#[instrument(
"DeleteOnboardingKey",
skip_all,
fields(
operator = admin.id,
public_key = self.public_key,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -8,6 +8,7 @@ use database::mungos::{
options::UpdateOptions,
},
};
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::*,
entities::{
@@ -22,7 +23,15 @@ use crate::{helpers::query::get_user, state::db_client};
use super::WriteArgs;
impl Resolve<WriteArgs> for UpdateUserAdmin {
#[instrument(name = "UpdateUserAdmin", skip(super_admin))]
#[instrument(
"UpdateUserAdmin",
skip_all,
fields(
operator = super_admin.id,
target_user = self.user_id,
admin = self.admin,
)
)]
async fn resolve(
self,
WriteArgs { user: super_admin }: &WriteArgs,
@@ -60,7 +69,17 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
}
impl Resolve<WriteArgs> for UpdateUserBasePermissions {
#[instrument(name = "UpdateUserBasePermissions", skip(admin))]
#[instrument(
"UpdateUserBasePermissions",
skip_all,
fields(
operator = admin.id,
target_user = self.user_id,
enabled = self.enabled,
create_servers = self.create_servers,
create_builds = self.create_builds,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -117,7 +136,16 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
}
impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
#[instrument(name = "UpdatePermissionOnResourceType", skip(admin))]
#[instrument(
"UpdatePermissionOnResourceType",
skip_all,
fields(
operator = admin.id,
user_target = format!("{:?}", self.user_target),
resource_type = self.resource_type.to_string(),
permission = format!("{:?}", self.permission),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -185,7 +213,17 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
}
impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
#[instrument(name = "UpdatePermissionOnTarget", skip(admin))]
#[instrument(
"UpdatePermissionOnTarget",
skip_all,
fields(
operator = admin.id,
user_target = format!("{:?}", self.user_target),
resource_type = self.resource_target.extract_variant().to_string(),
resource_id = self.resource_target.extract_variant_id().1,
permission = format!("{:?}", self.permission),
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -11,7 +11,15 @@ use crate::{permission::get_check_permissions, resource};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateProcedure {
#[instrument(name = "CreateProcedure", skip(user))]
#[instrument(
"CreateProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.name,
config = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -22,7 +30,15 @@ impl Resolve<WriteArgs> for CreateProcedure {
}
impl Resolve<WriteArgs> for CopyProcedure {
#[instrument(name = "CopyProcedure", skip(user))]
#[instrument(
"CopyProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.name,
copy_procedure = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -45,7 +61,15 @@ impl Resolve<WriteArgs> for CopyProcedure {
}
impl Resolve<WriteArgs> for UpdateProcedure {
#[instrument(name = "UpdateProcedure", skip(user))]
#[instrument(
"UpdateProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -58,7 +82,15 @@ impl Resolve<WriteArgs> for UpdateProcedure {
}
impl Resolve<WriteArgs> for RenameProcedure {
#[instrument(name = "RenameProcedure", skip(user))]
#[instrument(
"RenameProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -71,11 +103,18 @@ impl Resolve<WriteArgs> for RenameProcedure {
}
impl Resolve<WriteArgs> for DeleteProcedure {
#[instrument(name = "DeleteProcedure", skip(args))]
#[instrument(
"DeleteProcedure",
skip_all,
fields(
operator = user.id,
procedure = self.id
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteProcedureResponse> {
Ok(resource::delete::<Procedure>(&self.id, args).await?)
Ok(resource::delete::<Procedure>(&self.id, user).await?)
}
}

View File

@@ -10,7 +10,9 @@ use komodo_client::{
provider::{DockerRegistryAccount, GitProviderAccount},
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::{
helpers::update::{add_update, make_update},
@@ -20,25 +22,41 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateGitProviderAccount {
#[instrument(
"CreateGitProviderAccount",
skip_all,
fields(
operator = user.id,
domain = self.account.domain,
username = self.account.username,
https = self.account.https.unwrap_or(true),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<CreateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can create git provider accounts")
.into(),
anyhow!("Only admins can create git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
let mut account: GitProviderAccount = self.account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string.").into());
return Err(
anyhow!("Domain cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string.").into());
return Err(
anyhow!("Username cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
let mut update = make_update(
@@ -51,14 +69,14 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
.git_accounts
.insert_one(&account)
.await
.context("failed to create git provider account on db")?
.context("Failed to create git provider account on db")?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.context("Inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create git provider account",
"Create git provider account",
format!(
"Created git provider account for {} with username {}",
account.domain, account.username
@@ -70,7 +88,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create git provider account | {e:#}")
error!("Failed to add update for create git provider account | {e:#}")
})
.ok();
@@ -79,14 +97,25 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
}
impl Resolve<WriteArgs> for UpdateGitProviderAccount {
#[instrument(
"UpdateGitProviderAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
domain = self.account.domain,
username = self.account.username,
https = self.account.https.unwrap_or(true),
)
)]
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can update git provider accounts")
.into(),
anyhow!("Only admins can update git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -94,8 +123,8 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
&& domain.is_empty()
{
return Err(
anyhow!("cannot update git provider with empty domain")
.into(),
anyhow!("Cannot update git provider with empty domain")
.status_code(StatusCode::BAD_REQUEST),
);
}
@@ -103,8 +132,8 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
&& username.is_empty()
{
return Err(
anyhow!("cannot update git provider with empty username")
.into(),
anyhow!("Cannot update git provider with empty username")
.status_code(StatusCode::BAD_REQUEST),
);
}
@@ -118,7 +147,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
);
let account = to_document(&self.account).context(
"failed to serialize partial git provider account to bson",
"Failed to serialize partial git provider account to bson",
)?;
let db = db_client();
update_one_by_id(
@@ -128,17 +157,17 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
None,
)
.await
.context("failed to update git provider account on db")?;
.context("Failed to update git provider account on db")?;
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(anyhow!("No account found with given id").into());
};
update.push_simple_log(
"update git provider account",
"Update git provider account",
format!(
"Updated git provider account for {} with username {}",
account.domain, account.username
@@ -150,7 +179,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update git provider account | {e:#}")
error!("Failed to add update for update git provider account | {e:#}")
})
.ok();
@@ -159,14 +188,22 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
}
impl Resolve<WriteArgs> for DeleteGitProviderAccount {
#[instrument(
"DeleteGitProviderAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteGitProviderAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can delete git provider accounts")
.into(),
anyhow!("Only admins can delete git provider accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -179,16 +216,19 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
let db = db_client();
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(
anyhow!("No account found with given id")
.status_code(StatusCode::BAD_REQUEST),
);
};
delete_one_by_id(&db.git_accounts, &self.id, None)
.await
.context("failed to delete git account on db")?;
update.push_simple_log(
"delete git provider account",
"Delete git provider account",
format!(
"Deleted git provider account for {} with username {}",
account.domain, account.username
@@ -200,7 +240,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete git provider account | {e:#}")
error!("Failed to add update for delete git provider account | {e:#}")
})
.ok();
@@ -209,6 +249,15 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
}
impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
#[instrument(
"CreateDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
domain = self.account.domain,
username = self.account.username,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -216,20 +265,26 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
if !user.admin {
return Err(
anyhow!(
"only admins can create docker registry account accounts"
"Only admins can create docker registry account accounts"
)
.into(),
.status_code(StatusCode::FORBIDDEN),
);
}
let mut account: DockerRegistryAccount = self.account.into();
if account.domain.is_empty() {
return Err(anyhow!("domain cannot be empty string.").into());
return Err(
anyhow!("Domain cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
if account.username.is_empty() {
return Err(anyhow!("username cannot be empty string.").into());
return Err(
anyhow!("Username cannot be empty string.")
.status_code(StatusCode::BAD_REQUEST),
);
}
let mut update = make_update(
@@ -243,15 +298,15 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
.insert_one(&account)
.await
.context(
"failed to create docker registry account account on db",
"Failed to create docker registry account account on db",
)?
.inserted_id
.as_object_id()
.context("inserted id is not ObjectId")?
.context("Inserted id is not ObjectId")?
.to_string();
update.push_simple_log(
"create docker registry account",
"Create docker registry account",
format!(
"Created docker registry account account for {} with username {}",
account.domain, account.username
@@ -263,7 +318,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for create docker registry account | {e:#}")
error!("Failed to add update for create docker registry account | {e:#}")
})
.ok();
@@ -272,14 +327,24 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
}
impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
#[instrument(
"UpdateDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
domain = self.account.domain,
username = self.account.username,
)
)]
async fn resolve(
mut self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can update docker registry accounts")
.into(),
anyhow!("Only admins can update docker registry accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -288,9 +353,9 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
{
return Err(
anyhow!(
"cannot update docker registry account with empty domain"
"Cannot update docker registry account with empty domain"
)
.into(),
.status_code(StatusCode::BAD_REQUEST),
);
}
@@ -299,9 +364,9 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
{
return Err(
anyhow!(
"cannot update docker registry account with empty username"
"Cannot update docker registry account with empty username"
)
.into(),
.status_code(StatusCode::BAD_REQUEST),
);
}
@@ -314,7 +379,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
);
let account = to_document(&self.account).context(
"failed to serialize partial docker registry account account to bson",
"Failed to serialize partial docker registry account account to bson",
)?;
let db = db_client();
@@ -326,19 +391,19 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
)
.await
.context(
"failed to update docker registry account account on db",
"Failed to update docker registry account account on db",
)?;
let Some(account) =
find_one_by_id(&db.registry_accounts, &self.id)
.await
.context("failed to query db for registry accounts")?
.context("Failed to query db for registry accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(anyhow!("No account found with given id").into());
};
update.push_simple_log(
"update docker registry account",
"Update docker registry account",
format!(
"Updated docker registry account account for {} with username {}",
account.domain, account.username
@@ -350,7 +415,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for update docker registry account | {e:#}")
error!("Failed to add update for update docker registry account | {e:#}")
})
.ok();
@@ -359,14 +424,22 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
}
impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
#[instrument(
"DeleteDockerRegistryAccount",
skip_all,
fields(
operator = user.id,
id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
if !user.admin {
return Err(
anyhow!("only admins can delete docker registry accounts")
.into(),
anyhow!("Only admins can delete docker registry accounts")
.status_code(StatusCode::FORBIDDEN),
);
}
@@ -380,16 +453,19 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
let Some(account) =
find_one_by_id(&db.registry_accounts, &self.id)
.await
.context("failed to query db for git accounts")?
.context("Failed to query db for git accounts")?
else {
return Err(anyhow!("no account found with given id").into());
return Err(
anyhow!("No account found with given id")
.status_code(StatusCode::BAD_REQUEST),
);
};
delete_one_by_id(&db.registry_accounts, &self.id, None)
.await
.context("failed to delete registry account on db")?;
.context("Failed to delete registry account on db")?;
update.push_simple_log(
"delete registry account",
"Delete registry account",
format!(
"Deleted registry account for {} with username {}",
account.domain, account.username
@@ -401,7 +477,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
add_update(update)
.await
.inspect_err(|e| {
error!("failed to add update for delete docker registry account | {e:#}")
error!("Failed to add update for delete docker registry account | {e:#}")
})
.ok();

View File

@@ -32,7 +32,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateRepo {
#[instrument(name = "CreateRepo", skip(user))]
#[instrument(
"CreateRepo",
skip_all,
fields(
operator = user.id,
repo = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -43,7 +51,15 @@ impl Resolve<WriteArgs> for CreateRepo {
}
impl Resolve<WriteArgs> for CopyRepo {
#[instrument(name = "CopyRepo", skip(user))]
#[instrument(
"CopyRepo",
skip_all,
fields(
operator = user.id,
repo = self.name,
copy_repo = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -60,14 +76,32 @@ impl Resolve<WriteArgs> for CopyRepo {
}
impl Resolve<WriteArgs> for DeleteRepo {
#[instrument(name = "DeleteRepo", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Repo> {
Ok(resource::delete::<Repo>(&self.id, args).await?)
#[instrument(
"DeleteRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Repo> {
Ok(resource::delete::<Repo>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateRepo {
#[instrument(name = "UpdateRepo", skip(user))]
#[instrument(
"UpdateRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
update = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -77,7 +111,15 @@ impl Resolve<WriteArgs> for UpdateRepo {
}
impl Resolve<WriteArgs> for RenameRepo {
#[instrument(name = "RenameRepo", skip(user))]
#[instrument(
"RenameRepo",
skip_all,
fields(
operator = user.id,
repo = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -154,11 +196,6 @@ impl Resolve<WriteArgs> for RenameRepo {
}
impl Resolve<WriteArgs> for RefreshRepoCache {
#[instrument(
name = "RefreshRepoCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,4 +1,5 @@
use anyhow::anyhow;
use derive_variants::ExtractVariant as _;
use komodo_client::{
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
entities::{
@@ -7,14 +8,27 @@ use komodo_client::{
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCodeError;
use crate::resource::{self, ResourceMetaUpdate};
use super::WriteArgs;
impl Resolve<WriteArgs> for UpdateResourceMeta {
#[instrument(name = "UpdateResourceMeta", skip(args))]
#[instrument(
"UpdateResourceMeta",
skip_all,
fields(
operator = args.user.id,
resource_type = self.target.extract_variant().to_string(),
resource_id = self.target.extract_variant_id().1,
description = self.description,
template = self.template,
tags = format!("{:?}", self.tags),
)
)]
async fn resolve(
self,
args: &WriteArgs,
@@ -28,7 +42,7 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
ResourceTarget::System(_) => {
return Err(
anyhow!("cannot update meta of System resource target")
.into(),
.status_code(StatusCode::BAD_REQUEST),
);
}
ResourceTarget::Server(id) => {

View File

@@ -3,7 +3,7 @@ use formatting::{bold, format_serror};
use komodo_client::{
api::write::*,
entities::{
NoData, Operation,
Operation,
permission::PermissionLevel,
server::{Server, ServerInfo},
to_docker_compatible_name,
@@ -25,7 +25,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateServer {
#[instrument(name = "CreateServer", skip(user))]
#[instrument(
"CreateServer",
skip_all,
fields(
operator = user.id,
server = self.name,
config = serde_json::to_string(&self.config).unwrap()
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -44,7 +52,15 @@ impl Resolve<WriteArgs> for CreateServer {
}
impl Resolve<WriteArgs> for CopyServer {
#[instrument(name = "CopyServer", skip(user))]
#[instrument(
"CopyServer",
skip_all,
fields(
operator = user.id,
server = self.name,
copy_server = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -70,14 +86,32 @@ impl Resolve<WriteArgs> for CopyServer {
}
impl Resolve<WriteArgs> for DeleteServer {
#[instrument(name = "DeleteServer", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Server> {
Ok(resource::delete::<Server>(&self.id, args).await?)
#[instrument(
"DeleteServer",
skip_all,
fields(
operator = user.id,
server = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Server> {
Ok(resource::delete::<Server>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateServer {
#[instrument(name = "UpdateServer", skip(user))]
#[instrument(
"UpdateServer",
skip_all,
fields(
operator = user.id,
server = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -87,7 +121,15 @@ impl Resolve<WriteArgs> for UpdateServer {
}
impl Resolve<WriteArgs> for RenameServer {
#[instrument(name = "RenameServer", skip(user))]
#[instrument(
"RenameServer",
skip_all,
fields(
operator = user.id,
server = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -97,7 +139,15 @@ impl Resolve<WriteArgs> for RenameServer {
}
impl Resolve<WriteArgs> for CreateNetwork {
#[instrument(name = "CreateNetwork", skip(user))]
#[instrument(
"CreateNetwork",
skip_all,
fields(
operator = user.id,
server = self.server,
network = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -137,88 +187,18 @@ impl Resolve<WriteArgs> for CreateNetwork {
}
}
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(name = "CreateTerminal", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateTerminal {
name: self.name,
command: self.command,
recreate: self.recreate,
})
.await
.context("Failed to create terminal on Periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(name = "DeleteTerminal", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(name = "DeleteAllTerminals", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for UpdateServerPublicKey {
#[instrument(name = "UpdateServerPublicKey", skip(args))]
#[instrument(
"UpdateServerPublicKey",
skip_all,
fields(
operator = args.user.id,
server = self.server,
public_key = self.public_key,
)
)]
async fn resolve(
self,
args: &WriteArgs,
@@ -249,7 +229,14 @@ impl Resolve<WriteArgs> for UpdateServerPublicKey {
//
impl Resolve<WriteArgs> for RotateServerKeys {
#[instrument(name = "RotateServerPrivateKey", skip(args))]
#[instrument(
"RotateServerKeys",
skip_all,
fields(
operator = args.user.id,
server = self.server,
)
)]
async fn resolve(
self,
args: &WriteArgs,

View File

@@ -19,7 +19,15 @@ use crate::{api::user::UserArgs, state::db_client};
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateServiceUser {
#[instrument(name = "CreateServiceUser", skip(user))]
#[instrument(
"CreateServiceUser",
skip_all,
fields(
operator = user.id,
username = self.username,
description = self.description,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -63,7 +71,15 @@ impl Resolve<WriteArgs> for CreateServiceUser {
}
impl Resolve<WriteArgs> for UpdateServiceUserDescription {
#[instrument(name = "UpdateServiceUserDescription", skip(user))]
#[instrument(
"UpdateServiceUserDescription",
skip_all,
fields(
operator = user.id,
username = self.username,
description = self.description,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -99,7 +115,16 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
}
impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
#[instrument(name = "CreateApiKeyForServiceUser", skip(user))]
#[instrument(
"CreateApiKeyForServiceUser",
skip_all,
fields(
operator = user.id,
service_user = self.user_id,
name = self.name,
expires = self.expires,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -125,7 +150,14 @@ impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
}
impl Resolve<WriteArgs> for DeleteApiKeyForServiceUser {
#[instrument(name = "DeleteApiKeyForServiceUser", skip(user))]
#[instrument(
"DeleteApiKeyForServiceUser",
skip_all,
fields(
operator = user.id,
key = self.key,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -42,7 +42,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateStack {
#[instrument(name = "CreateStack", skip(user))]
#[instrument(
"CreateStack",
skip_all,
fields(
operator = user.id,
stack = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -53,7 +61,15 @@ impl Resolve<WriteArgs> for CreateStack {
}
impl Resolve<WriteArgs> for CopyStack {
#[instrument(name = "CopyStack", skip(user))]
#[instrument(
"CopyStack",
skip_all,
fields(
operator = user.id,
stack = self.name,
copy_stack = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -71,14 +87,32 @@ impl Resolve<WriteArgs> for CopyStack {
}
impl Resolve<WriteArgs> for DeleteStack {
#[instrument(name = "DeleteStack", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Stack> {
Ok(resource::delete::<Stack>(&self.id, args).await?)
#[instrument(
"DeleteStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Stack> {
Ok(resource::delete::<Stack>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateStack {
#[instrument(name = "UpdateStack", skip(user))]
#[instrument(
"UpdateStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -88,7 +122,15 @@ impl Resolve<WriteArgs> for UpdateStack {
}
impl Resolve<WriteArgs> for RenameStack {
#[instrument(name = "RenameStack", skip(user))]
#[instrument(
"RenameStack",
skip_all,
fields(
operator = user.id,
stack = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -98,7 +140,15 @@ impl Resolve<WriteArgs> for RenameStack {
}
impl Resolve<WriteArgs> for WriteStackFileContents {
#[instrument(name = "WriteStackFileContents", skip(user))]
#[instrument(
"WriteStackFileContents",
skip_all,
fields(
operator = user.id,
stack = self.stack,
path = self.file_path,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -147,6 +197,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
}
}
#[instrument("WriteStackFileContentsOnHost", skip_all)]
async fn write_stack_file_contents_on_host(
stack: Stack,
file_path: String,
@@ -219,6 +270,7 @@ async fn write_stack_file_contents_on_host(
Ok(update)
}
#[instrument("WriteStackFileContentsGit", skip_all)]
async fn write_stack_file_contents_git(
mut stack: Stack,
file_path: &str,
@@ -360,11 +412,6 @@ async fn write_stack_file_contents_git(
}
impl Resolve<WriteArgs> for RefreshStackCache {
#[instrument(
name = "RefreshStackCache",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -33,6 +33,7 @@ use komodo_client::{
},
};
use resolver_api::Resolve;
use tracing::Instrument;
use crate::{
alert::send_alerts,
@@ -56,7 +57,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateResourceSync {
#[instrument(name = "CreateResourceSync", skip(user))]
#[instrument(
"CreateResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.name,
config = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -72,7 +81,15 @@ impl Resolve<WriteArgs> for CreateResourceSync {
}
impl Resolve<WriteArgs> for CopyResourceSync {
#[instrument(name = "CopyResourceSync", skip(user))]
#[instrument(
"CopyResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.name,
copy_sync = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -95,17 +112,32 @@ impl Resolve<WriteArgs> for CopyResourceSync {
}
impl Resolve<WriteArgs> for DeleteResourceSync {
#[instrument(name = "DeleteResourceSync", skip(args))]
#[instrument(
"DeleteResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
)
)]
async fn resolve(
self,
args: &WriteArgs,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
Ok(resource::delete::<ResourceSync>(&self.id, args).await?)
Ok(resource::delete::<ResourceSync>(&self.id, user).await?)
}
}
impl Resolve<WriteArgs> for UpdateResourceSync {
#[instrument(name = "UpdateResourceSync", skip(user))]
#[instrument(
"UpdateResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
update = serde_json::to_string(&self.config).unwrap(),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -118,7 +150,15 @@ impl Resolve<WriteArgs> for UpdateResourceSync {
}
impl Resolve<WriteArgs> for RenameResourceSync {
#[instrument(name = "RenameResourceSync", skip(user))]
#[instrument(
"RenameResourceSync",
skip_all,
fields(
operator = user.id,
sync = self.id,
new_name = self.name
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -131,7 +171,16 @@ impl Resolve<WriteArgs> for RenameResourceSync {
}
impl Resolve<WriteArgs> for WriteSyncFileContents {
#[instrument(name = "WriteSyncFileContents", skip(args))]
#[instrument(
"WriteSyncFileContents",
skip_all,
fields(
operator = args.user.id,
sync = self.sync,
resource_path = self.resource_path,
file_path = self.file_path,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
@@ -176,6 +225,7 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
}
}
#[instrument("WriteSyncFileContentsOnHost", skip_all)]
async fn write_sync_file_contents_on_host(
req: WriteSyncFileContents,
args: &WriteArgs,
@@ -238,6 +288,7 @@ async fn write_sync_file_contents_on_host(
Ok(update)
}
#[instrument("WriteSyncFileContentsGit", skip_all)]
async fn write_sync_file_contents_git(
req: WriteSyncFileContents,
args: &WriteArgs,
@@ -389,7 +440,14 @@ async fn write_sync_file_contents_git(
}
impl Resolve<WriteArgs> for CommitSync {
#[instrument(name = "CommitSync", skip(args))]
#[instrument(
"CommitSync",
skip_all,
fields(
operator = args.user.id,
sync = self.sync,
)
)]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let WriteArgs { user } = args;
@@ -476,7 +534,9 @@ impl Resolve<WriteArgs> for CommitSync {
.sync_directory
.join(to_path_compatible_name(&sync.name))
.join(&resource_path);
let span = info_span!("CommitSyncOnHost");
if let Err(e) = secret_file::write_async(&file_path, &res.toml)
.instrument(span)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
@@ -569,6 +629,7 @@ impl Resolve<WriteArgs> for CommitSync {
}
}
#[instrument("CommitSyncGit", skip_all)]
async fn commit_git_sync(
mut args: RepoExecutionArgs,
resource_path: &Path,
@@ -613,11 +674,6 @@ async fn commit_git_sync(
}
impl Resolve<WriteArgs> for RefreshResourceSyncPending {
#[instrument(
name = "RefreshResourceSyncPending",
level = "debug",
skip(user)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -27,7 +27,15 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateTag {
#[instrument(name = "CreateTag", skip(user))]
#[instrument(
"CreateTag",
skip_all,
fields(
operator = user.id,
tag = self.name,
color = format!("{:?}", self.color),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -68,7 +76,15 @@ impl Resolve<WriteArgs> for CreateTag {
}
impl Resolve<WriteArgs> for RenameTag {
#[instrument(name = "RenameTag", skip(user))]
#[instrument(
"RenameTag",
skip_all,
fields(
operator = user.id,
tag = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -93,7 +109,15 @@ impl Resolve<WriteArgs> for RenameTag {
}
impl Resolve<WriteArgs> for UpdateTagColor {
#[instrument(name = "UpdateTagColor", skip(user))]
#[instrument(
"UpdateTagColor",
skip_all,
fields(
operator = user.id,
tag = self.tag,
color = format!("{:?}", self.color),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -114,7 +138,14 @@ impl Resolve<WriteArgs> for UpdateTagColor {
}
impl Resolve<WriteArgs> for DeleteTag {
#[instrument(name = "DeleteTag", skip(user))]
#[instrument(
"DeleteTag",
skip_all,
fields(
operator = user.id,
tag_id = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -0,0 +1,309 @@
use anyhow::Context as _;
use futures_util::{StreamExt as _, stream::FuturesUnordered};
use komodo_client::{
api::write::*,
entities::{
NoData, deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, terminal::TerminalTarget,
user::User,
},
};
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::{
periphery_client,
query::get_all_tags,
terminal::{
create_container_terminal_inner,
get_deployment_periphery_container,
get_stack_service_periphery_container,
},
},
permission::get_check_permissions,
resource,
};
use super::WriteArgs;
//
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(
"CreateTerminal",
skip_all,
fields(
operator = user.id,
terminal = self.name,
target = format!("{:?}", self.target),
command = self.command,
mode = format!("{:?}", self.mode),
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
match self.target.clone() {
TerminalTarget::Server { server } => {
let server = server
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
create_server_terminal(self, server, user).await?;
}
TerminalTarget::Container { server, container } => {
create_container_terminal(self, server, container, user)
.await?;
}
TerminalTarget::Stack { stack, service } => {
let service = service
.context("Must provide 'target.params.service'")
.status_code(StatusCode::BAD_REQUEST)?;
create_stack_service_terminal(self, stack, service, user)
.await?;
}
TerminalTarget::Deployment { deployment } => {
create_deployment_terminal(self, deployment, user).await?;
}
};
Ok(NoData {})
}
}
async fn create_server_terminal(
CreateTerminal {
name,
command,
recreate,
target: _,
mode: _,
}: CreateTerminal,
server: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateServerTerminal {
name,
command,
recreate,
})
.await
.context("Failed to create Server Terminal on Periphery")?;
Ok(())
}
async fn create_container_terminal(
req: CreateTerminal,
server: String,
container: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_stack_service_terminal(
req: CreateTerminal,
stack: String,
service: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_deployment_terminal(
req: CreateTerminal,
deployment: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
create_container_terminal_inner(req, &periphery, container).await
}
//
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(
"DeleteTerminal",
skip_all,
fields(
operator = user.id,
target = format!("{:?}", self.target),
terminal = self.terminal,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = match &self.target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Container { server, .. } => {
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
};
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
target: self.target,
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(
"DeleteAllTerminals",
skip_all,
fields(
operator = user.id,
server = self.server,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for BatchDeleteAllTerminals {
#[instrument(
"BatchDeleteAllTerminals",
skip_all,
fields(
operator = user.id,
query = format!("{:?}", self.query),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.terminal(),
&all_tags,
)
.await?
.into_iter()
.map(|server| async move {
let res = async {
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
anyhow::Ok(())
}
.await;
if let Err(e) = res {
warn!(
"Failed to delete all terminals on {} ({}) | {e:#}",
server.name, server.id
)
}
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await;
Ok(NoData {})
}
}

View File

@@ -24,7 +24,14 @@ use super::WriteArgs;
//
impl Resolve<WriteArgs> for CreateLocalUser {
#[instrument(name = "CreateLocalUser", skip(admin, self), fields(admin_id = admin.id, username = self.username))]
#[instrument(
"CreateLocalUser",
skip_all,
fields(
admin_id = admin.id,
username = self.username
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -101,7 +108,14 @@ impl Resolve<WriteArgs> for CreateLocalUser {
//
impl Resolve<WriteArgs> for UpdateUserUsername {
#[instrument(name = "UpdateUserUsername", skip(user), fields(user_id = user.id))]
#[instrument(
"UpdateUserUsername",
skip_all,
fields(
operator = user.id,
new_username = self.username,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -152,7 +166,11 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
//
impl Resolve<WriteArgs> for UpdateUserPassword {
#[instrument(name = "UpdateUserPassword", skip(user, self), fields(user_id = user.id))]
#[instrument(
"UpdateUserPassword",
skip_all,
fields(operator = user.id)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -175,7 +193,14 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
//
impl Resolve<WriteArgs> for DeleteUser {
#[instrument(name = "DeleteUser", skip(admin), fields(user = self.user))]
#[instrument(
"DeleteUser",
skip_all,
fields(
admin_id = admin.id,
user_to_delete = self.user
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -19,7 +19,14 @@ use crate::state::db_client;
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateUserGroup {
#[instrument(name = "CreateUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"CreateUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -57,7 +64,15 @@ impl Resolve<WriteArgs> for CreateUserGroup {
}
impl Resolve<WriteArgs> for RenameUserGroup {
#[instrument(name = "RenameUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"RenameUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.id,
new_name = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -86,7 +101,14 @@ impl Resolve<WriteArgs> for RenameUserGroup {
}
impl Resolve<WriteArgs> for DeleteUserGroup {
#[instrument(name = "DeleteUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"DeleteUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.id,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -122,7 +144,15 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
}
impl Resolve<WriteArgs> for AddUserToUserGroup {
#[instrument(name = "AddUserToUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"AddUserToUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.user_group,
user = self.user,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -169,7 +199,15 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
}
impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
#[instrument(name = "RemoveUserFromUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"RemoveUserFromUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.user_group,
user = self.user,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -216,7 +254,15 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
}
impl Resolve<WriteArgs> for SetUsersInUserGroup {
#[instrument(name = "SetUsersInUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"SetUsersInUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.user_group,
users = format!("{:?}", self.users)
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -266,7 +312,15 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
}
impl Resolve<WriteArgs> for SetEveryoneUserGroup {
#[instrument(name = "SetEveryoneUserGroup", skip(admin), fields(admin = admin.username))]
#[instrument(
"SetEveryoneUserGroup",
skip_all,
fields(
operator = admin.id,
group = self.user_group,
everyone = self.everyone,
)
)]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,

View File

@@ -19,7 +19,16 @@ use crate::{
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateVariable {
#[instrument(name = "CreateVariable", skip(user, self), fields(name = &self.name))]
#[instrument(
"CreateVariable",
skip_all,
fields(
operator = user.id,
variable = self.name,
description = self.description,
is_secret = self.is_secret,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -68,7 +77,14 @@ impl Resolve<WriteArgs> for CreateVariable {
}
impl Resolve<WriteArgs> for UpdateVariableValue {
#[instrument(name = "UpdateVariableValue", skip(user, self), fields(name = &self.name))]
#[instrument(
"UpdateVariableValue",
skip_all,
fields(
operator = user.id,
variable = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -125,7 +141,15 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
}
impl Resolve<WriteArgs> for UpdateVariableDescription {
#[instrument(name = "UpdateVariableDescription", skip(user))]
#[instrument(
"UpdateVariableDescription",
skip_all,
fields(
operator = user.id,
variable = self.name,
description = self.description,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -149,7 +173,15 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
}
impl Resolve<WriteArgs> for UpdateVariableIsSecret {
#[instrument(name = "UpdateVariableIsSecret", skip(user))]
#[instrument(
"UpdateVariableIsSecret",
skip_all,
fields(
operator = user.id,
variable = self.name,
is_secret = self.is_secret,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
@@ -173,6 +205,14 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
}
impl Resolve<WriteArgs> for DeleteVariable {
#[instrument(
"DeleteVariable",
skip_all,
fields(
operator = user.id,
variable = self.name,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,

View File

@@ -1,17 +1,15 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
use komodo_client::entities::{
config::core::{CoreConfig, OauthCredentials},
random_string,
};
use reqwest::StatusCode;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use tokio::sync::Mutex;
use crate::{
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
use crate::{auth::STATE_PREFIX_LENGTH, config::core_config};
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
static GITHUB_OAUTH_CLIENT: OnceLock<Option<GithubOauthClient>> =
@@ -76,7 +74,6 @@ impl GithubOauthClient {
.into()
}
#[instrument(level = "debug", skip(self))]
pub async fn get_login_redirect_url(
&self,
redirect: Option<String>,
@@ -95,7 +92,6 @@ impl GithubOauthClient {
redirect_url
}
#[instrument(level = "debug", skip(self))]
pub async fn check_state(&self, state: &str) -> bool {
let mut contained = false;
self.states.lock().await.retain(|s| {
@@ -109,7 +105,6 @@ impl GithubOauthClient {
contained
}
#[instrument(level = "debug", skip(self))]
pub async fn get_access_token(
&self,
code: &str,
@@ -130,7 +125,6 @@ impl GithubOauthClient {
.context("failed to get github access token using code")
}
#[instrument(level = "debug", skip(self))]
pub async fn get_github_user(
&self,
token: &str,
@@ -141,7 +135,6 @@ impl GithubOauthClient {
.context("failed to get github user using access token")
}
#[instrument(level = "debug", skip(self))]
async fn get<R: DeserializeOwned>(
&self,
endpoint: &str,

View File

@@ -5,7 +5,7 @@ use axum::{
use database::mongo_indexed::Document;
use database::mungos::mongodb::bson::doc;
use komodo_client::entities::{
komodo_timestamp,
komodo_timestamp, random_string,
user::{User, UserConfig},
};
use reqwest::StatusCode;
@@ -14,7 +14,6 @@ use serror::AddStatusCode;
use crate::{
config::core_config,
helpers::random_string,
state::{db_client, jwt_client},
};
@@ -53,7 +52,6 @@ struct CallbackQuery {
code: String,
}
#[instrument(name = "GithubCallback", level = "debug")]
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {

View File

@@ -1,18 +1,16 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use jsonwebtoken::{DecodingKey, Validation, decode};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
use jsonwebtoken::dangerous::insecure_decode;
use komodo_client::entities::{
config::core::{CoreConfig, OauthCredentials},
random_string,
};
use reqwest::StatusCode;
use serde::{Deserialize, de::DeserializeOwned};
use tokio::sync::Mutex;
use crate::{
auth::STATE_PREFIX_LENGTH, config::core_config,
helpers::random_string,
};
use crate::{auth::STATE_PREFIX_LENGTH, config::core_config};
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
static GOOGLE_OAUTH_CLIENT: OnceLock<Option<GoogleOauthClient>> =
@@ -85,7 +83,6 @@ impl GoogleOauthClient {
.into()
}
#[instrument(level = "debug", skip(self))]
pub async fn get_login_redirect_url(
&self,
redirect: Option<String>,
@@ -104,7 +101,6 @@ impl GoogleOauthClient {
redirect_url
}
#[instrument(level = "debug", skip(self))]
pub async fn check_state(&self, state: &str) -> bool {
let mut contained = false;
self.states.lock().await.retain(|s| {
@@ -118,7 +114,6 @@ impl GoogleOauthClient {
contained
}
#[instrument(level = "debug", skip(self))]
pub async fn get_access_token(
&self,
code: &str,
@@ -139,24 +134,15 @@ impl GoogleOauthClient {
.context("failed to get google access token using code")
}
#[instrument(level = "debug", skip(self))]
pub fn get_google_user(
&self,
id_token: &str,
) -> anyhow::Result<GoogleUser> {
let mut v = Validation::new(Default::default());
v.insecure_disable_signature_validation();
v.validate_aud = false;
let res = decode::<GoogleUser>(
id_token,
&DecodingKey::from_secret(b""),
&v,
)
.context("failed to decode google id token")?;
let res = insecure_decode::<GoogleUser>(id_token)
.context("failed to decode google id token")?;
Ok(res.claims)
}
#[instrument(level = "debug", skip(self))]
async fn post<R: DeserializeOwned>(
&self,
endpoint: &str,

View File

@@ -5,14 +5,16 @@ use axum::{
};
use database::mongo_indexed::Document;
use database::mungos::mongodb::bson::doc;
use komodo_client::entities::user::{User, UserConfig};
use komodo_client::entities::{
random_string,
user::{User, UserConfig},
};
use reqwest::StatusCode;
use serde::Deserialize;
use serror::AddStatusCode;
use crate::{
config::core_config,
helpers::random_string,
state::{db_client, jwt_client},
};
@@ -52,7 +54,6 @@ struct CallbackQuery {
error: Option<String>,
}
#[instrument(name = "GoogleCallback", level = "debug")]
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {

View File

@@ -9,13 +9,12 @@ use jsonwebtoken::{
DecodingKey, EncodingKey, Header, Validation, decode, encode,
};
use komodo_client::{
api::auth::JwtResponse, entities::config::core::CoreConfig,
api::auth::JwtResponse,
entities::{config::core::CoreConfig, random_string},
};
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use crate::helpers::random_string;
type ExchangeTokenMap = Mutex<HashMap<String, (JwtResponse, u128)>>;
#[derive(Serialize, Deserialize, Clone)]
@@ -75,7 +74,6 @@ impl JwtClient {
.context("failed to decode token claims")
}
#[instrument(level = "debug", skip_all)]
pub async fn create_exchange_token(
&self,
jwt: JwtResponse,
@@ -91,7 +89,7 @@ impl JwtClient {
);
exchange_token
}
#[instrument(level = "debug", skip(self))]
pub async fn redeem_exchange_token(
&self,
exchange_token: &str,

View File

@@ -22,7 +22,7 @@ use crate::{
};
impl Resolve<AuthArgs> for SignUpLocalUser {
#[instrument(name = "SignUpLocalUser", skip(self))]
#[instrument("SignUpLocalUser", skip(self))]
async fn resolve(
self,
_: &AuthArgs,
@@ -104,7 +104,6 @@ impl Resolve<AuthArgs> for SignUpLocalUser {
}
impl Resolve<AuthArgs> for LoginLocalUser {
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
async fn resolve(
self,
_: &AuthArgs,

View File

@@ -31,7 +31,6 @@ struct RedirectQuery {
redirect: Option<String>,
}
#[instrument(level = "debug")]
pub async fn auth_request(
headers: HeaderMap,
mut req: Request,
@@ -44,7 +43,6 @@ pub async fn auth_request(
Ok(next.run(req).await)
}
#[instrument(level = "debug")]
pub async fn get_user_id_from_headers(
headers: &HeaderMap,
) -> anyhow::Result<String> {
@@ -77,7 +75,6 @@ pub async fn get_user_id_from_headers(
}
}
#[instrument(level = "debug")]
pub async fn authenticate_check_enabled(
headers: &HeaderMap,
) -> anyhow::Result<User> {
@@ -90,7 +87,6 @@ pub async fn authenticate_check_enabled(
}
}
#[instrument(level = "debug")]
pub async fn auth_jwt_get_user_id(
jwt: &str,
) -> anyhow::Result<String> {
@@ -102,7 +98,6 @@ pub async fn auth_jwt_get_user_id(
}
}
#[instrument(level = "debug")]
pub async fn auth_jwt_check_enabled(
jwt: &str,
) -> anyhow::Result<User> {
@@ -110,7 +105,6 @@ pub async fn auth_jwt_check_enabled(
check_enabled(user_id).await
}
#[instrument(level = "debug")]
pub async fn auth_api_key_get_user_id(
key: &str,
secret: &str,
@@ -135,7 +129,6 @@ pub async fn auth_api_key_get_user_id(
}
}
#[instrument(level = "debug")]
pub async fn auth_api_key_check_enabled(
key: &str,
secret: &str,
@@ -144,7 +137,6 @@ pub async fn auth_api_key_check_enabled(
check_enabled(user_id).await
}
#[instrument(level = "debug")]
async fn check_enabled(user_id: String) -> anyhow::Result<User> {
let user = get_user(&user_id).await?;
if user.enabled {

View File

@@ -8,7 +8,7 @@ use client::oidc_client;
use dashmap::DashMap;
use database::mungos::mongodb::bson::{Document, doc};
use komodo_client::entities::{
komodo_timestamp,
komodo_timestamp, random_string,
user::{User, UserConfig},
};
use openidconnect::{
@@ -23,7 +23,6 @@ use serror::AddStatusCode;
use crate::{
config::core_config,
helpers::random_string,
state::{db_client, jwt_client},
};
@@ -75,7 +74,6 @@ pub fn router() -> Router {
)
}
#[instrument(name = "OidcRedirect", level = "debug")]
async fn login(
Query(RedirectQuery { redirect }): Query<RedirectQuery>,
) -> anyhow::Result<Redirect> {
@@ -138,7 +136,6 @@ struct CallbackQuery {
error: Option<String>,
}
#[instrument(name = "OidcCallback", level = "debug")]
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {

View File

@@ -57,7 +57,6 @@ impl aws_credential_types::provider::ProvideCredentials
}
}
#[instrument]
async fn create_ec2_client(region: String) -> Client {
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::latest())
@@ -68,7 +67,7 @@ async fn create_ec2_client(region: String) -> Client {
Client::new(&config)
}
#[instrument]
#[instrument("LaunchEc2Instance")]
pub async fn launch_ec2_instance(
name: &str,
config: &AwsBuilderConfig,
@@ -170,7 +169,7 @@ pub async fn launch_ec2_instance(
const MAX_TERMINATION_TRIES: usize = 5;
const TERMINATION_WAIT_SECS: u64 = 15;
#[instrument]
#[instrument("TerminateEc2Instance")]
pub async fn terminate_ec2_instance_with_retry(
region: String,
instance_id: &str,
@@ -210,7 +209,7 @@ pub async fn terminate_ec2_instance_with_retry(
unreachable!()
}
#[instrument(skip(client))]
#[instrument("TerminateEc2InstanceInner", skip_all)]
async fn terminate_ec2_instance_inner(
client: &Client,
instance_id: &str,
@@ -229,7 +228,6 @@ async fn terminate_ec2_instance_inner(
}
/// Automatically retries 5 times, waiting 2 sec in between
#[instrument(level = "debug")]
async fn get_ec2_instance_status(
client: &Client,
instance_id: &str,
@@ -261,7 +259,6 @@ async fn get_ec2_instance_status(
}
}
#[instrument(level = "debug")]
async fn get_ec2_instance_state_name(
client: &Client,
instance_id: &str,
@@ -281,7 +278,6 @@ async fn get_ec2_instance_state_name(
}
/// Automatically retries 5 times, waiting 2 sec in between
#[instrument(level = "debug")]
async fn get_ec2_instance_public_ip(
client: &Client,
instance_id: &str,

View File

@@ -349,12 +349,16 @@ pub fn core_config() -> &'static CoreConfig {
location: env
.komodo_logging_location
.unwrap_or(config.logging.location),
ansi: env.komodo_logging_ansi.unwrap_or(config.logging.ansi),
otlp_endpoint: env
.komodo_logging_otlp_endpoint
.unwrap_or(config.logging.otlp_endpoint),
opentelemetry_service_name: env
.komodo_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
opentelemetry_scope_name: env
.komodo_logging_opentelemetry_scope_name
.unwrap_or(config.logging.opentelemetry_scope_name),
},
pretty_startup_config: env
.komodo_pretty_startup_config

View File

@@ -102,6 +102,15 @@ impl PeripheryConnectionArgs<'_> {
impl PeripheryConnection {
/// Custom Core -> Periphery side only login wrapper
/// to implement passkey support for backward compatibility
#[instrument(
"PeripheryLogin",
skip(self, socket, identifiers),
fields(
server_id = self.args.id,
address = self.args.address,
direction = "CoreToPeriphery"
)
)]
async fn client_login(
&self,
socket: &mut TungsteniteWebsocket,
@@ -124,6 +133,7 @@ impl PeripheryConnection {
}
}
#[instrument("V1PasskeyPeripheryLoginFlow", skip(socket, passkey))]
async fn handle_passkey_login(
socket: &mut TungsteniteWebsocket,
// for legacy auth
@@ -142,7 +152,7 @@ async fn handle_passkey_login(
};
socket
.send(LoginMessage::V1Passkey(passkey))
.send_message(LoginMessage::V1Passkey(passkey))
.await
.context("Failed to send Login V1Passkey message")?;

View File

@@ -10,8 +10,8 @@ use anyhow::anyhow;
use cache::CloneCache;
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use encoding::{
CastBytes as _, Decode as _, EncodedJsonMessage, EncodedOption,
EncodedResult, WithChannel,
CastBytes as _, Decode as _, EncodedJsonMessage, EncodedResponse,
WithChannel,
};
use komodo_client::entities::{
builder::{AwsBuilderConfig, UrlBuilderConfig},
@@ -19,7 +19,7 @@ use komodo_client::entities::{
server::Server,
};
use periphery_client::transport::{
EncodedTransportMessage, TransportMessage,
EncodedTransportMessage, ResponseMessage, TransportMessage,
};
use serror::serror_into_anyhow_error;
use tokio::sync::RwLock;
@@ -31,7 +31,7 @@ use transport::{
},
channel::{BufferedReceiver, Sender, buffered_channel},
websocket::{
Websocket, WebsocketMessage, WebsocketReceiver as _,
Websocket, WebsocketReceiver as _, WebsocketReceiverExt,
WebsocketSender as _,
},
};
@@ -109,6 +109,7 @@ pub struct PeripheryConnectionArgs<'a> {
impl PublicKeyValidator for PeripheryConnectionArgs<'_> {
type ValidationResult = String;
#[instrument("ValidatePeripheryPublicKey", skip(self))]
async fn validate(
&self,
public_key: String,
@@ -253,12 +254,11 @@ impl<'a> From<&'a OwnedPeripheryConnectionArgs>
}
/// Sends None as InProgress ping.
pub type ResponseChannels = CloneCache<
Uuid,
Sender<EncodedOption<EncodedResult<EncodedJsonMessage>>>,
>;
pub type ResponseChannels =
CloneCache<Uuid, Sender<EncodedResponse<EncodedJsonMessage>>>;
pub type TerminalChannels = CloneCache<Uuid, Sender<Vec<u8>>>;
pub type TerminalChannels =
CloneCache<Uuid, Sender<anyhow::Result<Vec<u8>>>>;
#[derive(Debug)]
pub struct PeripheryConnection {
@@ -328,6 +328,11 @@ impl PeripheryConnection {
)
}
#[instrument(
"StandardPeripheryLoginFlow",
skip(self, socket, identifiers),
fields(expected_public_key = self.args.periphery_public_key)
)]
pub async fn handle_login<W: Websocket, L: LoginFlow>(
&self,
socket: &mut W,
@@ -362,10 +367,24 @@ impl PeripheryConnection {
let forward_writes = async {
loop {
let Ok(message) = receiver.recv().await else {
break;
let message = match tokio::time::timeout(
Duration::from_secs(5),
receiver.recv(),
)
.await
{
Ok(Ok(message)) => message,
Ok(Err(_)) => break,
// Handle sending Ping
Err(_) => {
if let Err(e) = ws_write.ping().await {
self.set_error(e).await;
break;
}
continue;
}
};
match ws_write.send_inner(message.into_bytes()).await {
match ws_write.send(message.into_bytes()).await {
Ok(_) => receiver.clear_buffer(),
Err(e) => {
self.set_error(e).await;
@@ -380,19 +399,13 @@ impl PeripheryConnection {
let handle_reads = async {
loop {
match ws_read.recv_inner().await {
Ok(WebsocketMessage::Message(message)) => {
self.handle_incoming_message(message).await
}
Ok(WebsocketMessage::Close(_))
| Ok(WebsocketMessage::Closed) => {
self.set_error(anyhow!("Connection closed")).await;
break;
}
match ws_read.recv_message().await {
Ok(message) => self.handle_incoming_message(message).await,
Err(e) => {
self.set_error(e).await;
break;
}
};
}
}
// Cancel again if not already
cancel.cancel();
@@ -405,38 +418,31 @@ impl PeripheryConnection {
pub async fn handle_incoming_message(
&self,
message: EncodedTransportMessage,
message: TransportMessage,
) {
let message: TransportMessage = match message.decode() {
Ok(res) => res,
Err(e) => {
warn!("Failed to parse Message bytes | {e:#}");
return;
}
};
match message {
TransportMessage::Response(data) => match data.decode() {
Ok(WithChannel {
channel: channel_id,
data,
}) => {
let Some(channel) = self.responses.get(&channel_id).await
else {
warn!(
"Failed to forward Response message | No response channel found at {channel_id}"
);
return;
};
if let Err(e) = channel.send(data).await {
warn!(
"Failed to send response | Channel failure at {channel_id} | {e:#}"
);
TransportMessage::Response(data) => {
match data.decode().map(ResponseMessage::into_inner) {
Ok(WithChannel { channel, data }) => {
let Some(response_channel) =
self.responses.get(&channel).await
else {
warn!(
"Failed to forward Response message | No response channel found at {channel}"
);
return;
};
if let Err(e) = response_channel.send(data).await {
warn!(
"Failed to forward Response | Response channel failure at {channel} | {e:#}"
);
}
}
Err(e) => {
warn!("Failed to read Response message | {e:#}");
}
}
Err(e) => {
warn!("Failed to read Response message | {e:#}");
}
},
}
TransportMessage::Terminal(data) => match data.decode() {
Ok(WithChannel {
channel: channel_id,

View File

@@ -17,11 +17,13 @@ use komodo_client::{
user::system_user,
},
};
use periphery_client::transport::LoginMessage;
use periphery_client::{
api::PeripheryConnectionQuery, transport::LoginMessage,
};
use resolver_api::Resolve;
use serror::{AddStatusCode, AddStatusCodeError};
use tracing::Instrument;
use transport::{
PeripheryConnectionQuery,
auth::{
HeaderConnectionIdentifiers, LoginFlow, LoginFlowArgs,
PublicKeyValidator, ServerLoginFlow,
@@ -124,7 +126,7 @@ async fn existing_server_handler(
let mut socket = AxumWebsocket(socket);
if let Err(e) = socket
.send(LoginMessage::OnboardingFlow(false))
.send_message(LoginMessage::OnboardingFlow(false))
.await
.context("Failed to send Login OnboardingFlow false message")
{
@@ -132,13 +134,23 @@ async fn existing_server_handler(
return;
};
if let Err(e) = connection
.handle_login::<_, ServerLoginFlow>(
&mut socket,
identifiers.build(query.as_bytes()),
)
.await
{
let span = info_span!(
"PeripheryLogin",
server_id = server.id,
direction = "PeripheryToCore"
);
let login = async {
connection
.handle_login::<_, ServerLoginFlow>(
&mut socket,
identifiers.build(query.as_bytes()),
)
.await
}
.instrument(span)
.await;
if let Err(e) = login {
connection.set_error(e).await;
return;
}
@@ -157,7 +169,7 @@ async fn onboard_server_handler(
format!("server={}", urlencoding::encode(&server_query));
let mut socket = AxumWebsocket(socket);
if let Err(e) = socket.send(LoginMessage::OnboardingFlow(true)).await.context(
if let Err(e) = socket.send_message(LoginMessage::OnboardingFlow(true)).await.context(
"Failed to send Login OnboardingFlow true message",
).context("Server onboarding error") {
warn!("{e:#}");
@@ -214,7 +226,7 @@ async fn onboard_server_handler(
};
if let Err(e) = socket
.send(LoginMessage::Success)
.send_message(LoginMessage::Success)
.await
.context("Failed to send Login Onboarding Successful message")
{

View File

@@ -31,8 +31,16 @@ use super::periphery_client;
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 60;
#[instrument(skip_all, fields(builder_id = builder.id, update_id = update.id))]
pub async fn get_builder_periphery(
#[instrument(
"ConnectBuilderPeriphery",
skip_all,
fields(
resource_name,
builder_id = builder.id,
update_id = update.id
)
)]
pub async fn connect_builder_periphery(
// build: &Build,
resource_name: String,
version: Option<Version>,
@@ -76,7 +84,14 @@ pub async fn get_builder_periphery(
}
}
#[instrument(skip_all, fields(resource_name, update_id = update.id))]
#[instrument(
"GetAwsBuilder",
skip_all,
fields(
resource_name,
update_id = update.id,
)
)]
async fn get_aws_builder(
resource_name: &str,
version: Option<Version>,
@@ -168,7 +183,11 @@ async fn get_aws_builder(
)
}
#[instrument(skip(update))]
#[instrument(
"CleanupBuilderInstance",
skip_all,
fields(update_id = update.id)
)]
pub async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,

View File

@@ -15,7 +15,6 @@ use komodo_client::entities::{
stack::Stack,
user::User,
};
use rand::Rng;
use crate::{
config::core_config, connection::PeripheryConnectionArgs,
@@ -31,6 +30,7 @@ pub mod matcher;
pub mod procedure;
pub mod prune;
pub mod query;
pub mod terminal;
pub mod update;
// pub mod resource;
@@ -47,14 +47,6 @@ pub fn empty_or_only_spaces(word: &str) -> bool {
true
}
pub fn random_string(length: usize) -> String {
rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(length)
.map(char::from)
.collect()
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
/// Returns (token, use_https)
@@ -199,7 +191,14 @@ pub async fn periphery_client(
.await
}
#[instrument]
#[instrument(
"CreatePermission",
skip(user),
fields(
operator = user.id,
username = user.username
)
)]
pub async fn create_permission<T>(
user: &User,
target: T,

View File

@@ -3,7 +3,7 @@ use std::time::{Duration, Instant};
use anyhow::{Context, anyhow};
use database::mungos::by_id::find_one_by_id;
use formatting::{Color, bold, colored, format_serror, muted};
use futures::future::join_all;
use futures_util::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
@@ -20,6 +20,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use tokio::sync::Mutex;
use uuid::Uuid;
use crate::{
api::{
@@ -32,7 +33,6 @@ use crate::{
use super::update::{init_execution_update, update_update};
#[instrument(skip_all)]
pub async fn execute_procedure(
procedure: &Procedure,
update: &Mutex<Update>,
@@ -51,7 +51,7 @@ pub async fn execute_procedure(
)
.await;
let timer = Instant::now();
execute_stage(
execute_procedure_stage(
stage
.executions
.iter()
@@ -86,9 +86,8 @@ pub async fn execute_procedure(
Ok(())
}
#[allow(dependency_on_unit_never_type_fallback)]
#[instrument(skip(update))]
async fn execute_stage(
#[instrument("ExecuteProcedureStage", skip_all)]
async fn execute_procedure_stage(
_executions: Vec<Execution>,
parent_id: &str,
parent_name: &str,
@@ -218,6 +217,10 @@ async fn execute_stage(
Ok(())
}
#[instrument(
"ExecuteProcedureExecution",
skip(parent_id, parent_name)
)]
async fn execute_execution(
execution: Execution,
// used to prevent recursive procedure
@@ -225,6 +228,7 @@ async fn execute_execution(
parent_name: &str,
) -> anyhow::Result<()> {
let user = procedure_user().to_owned();
let id = Uuid::new_v4();
let update = match execution {
Execution::None(_) => return Ok(()),
Execution::RunProcedure(req) => {
@@ -239,7 +243,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RunProcedure"),
@@ -262,7 +266,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RunAction"),
@@ -285,7 +289,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RunBuild"),
@@ -308,7 +312,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at CancelBuild"),
@@ -325,7 +329,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at Deploy"),
@@ -348,7 +352,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PullDeployment"),
@@ -365,7 +369,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StartDeployment"),
@@ -382,7 +386,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RestartDeployment"),
@@ -399,7 +403,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PauseDeployment"),
@@ -416,7 +420,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at UnpauseDeployment"),
@@ -433,7 +437,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StopDeployment"),
@@ -450,7 +454,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RemoveDeployment"),
@@ -473,7 +477,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at CloneRepo"),
@@ -496,7 +500,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PullRepo"),
@@ -519,7 +523,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at BuildRepo"),
@@ -542,7 +546,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at CancelRepoBuild"),
@@ -559,7 +563,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StartContainer"),
@@ -576,7 +580,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RestartContainer"),
@@ -593,7 +597,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PauseContainer"),
@@ -610,7 +614,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at UnpauseContainer"),
@@ -627,7 +631,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StopContainer"),
@@ -644,7 +648,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RemoveContainer"),
@@ -661,7 +665,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StartAllContainers"),
@@ -678,7 +682,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RestartAllContainers"),
@@ -695,7 +699,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PauseAllContainers"),
@@ -712,7 +716,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at UnpauseAllContainers"),
@@ -729,7 +733,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StopAllContainers"),
@@ -746,7 +750,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneContainers"),
@@ -763,7 +767,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DeleteNetwork"),
@@ -780,7 +784,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneNetworks"),
@@ -797,7 +801,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DeleteImage"),
@@ -814,7 +818,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneImages"),
@@ -831,7 +835,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DeleteVolume"),
@@ -848,7 +852,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneVolumes"),
@@ -865,7 +869,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneDockerBuilders"),
@@ -882,7 +886,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneBuildx"),
@@ -899,7 +903,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PruneSystem"),
@@ -916,7 +920,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RunSync"),
@@ -939,7 +943,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DeployStack"),
@@ -962,7 +966,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DeployStackIfChanged"),
@@ -985,7 +989,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PullStack"),
@@ -1008,7 +1012,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StartStack"),
@@ -1025,7 +1029,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RestartStack"),
@@ -1042,7 +1046,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at PauseStack"),
@@ -1059,7 +1063,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at UnpauseStack"),
@@ -1076,7 +1080,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at StopStack"),
@@ -1093,7 +1097,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at DestroyStack"),
@@ -1110,7 +1114,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RunStackService"),
@@ -1133,7 +1137,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at TestAlerter"),
@@ -1150,7 +1154,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at SendAlert"),
@@ -1167,7 +1171,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at ClearRepoCache"),
@@ -1184,7 +1188,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at BackupCoreDatabase"),
@@ -1201,7 +1205,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at GlobalAutoUpdate"),
@@ -1218,7 +1222,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RotateAllServerKeys"),
@@ -1235,7 +1239,7 @@ async fn execute_execution(
let update_id = update.id.clone();
handle_resolve_result(
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs { user, update, id })
.await
.map_err(|e| e.error)
.context("Failed at RotateCoreKeys"),
@@ -1288,7 +1292,6 @@ async fn handle_resolve_result(
}
/// ASSUMES FIRST LOG IS ALREADY CREATED
#[instrument(level = "debug")]
async fn add_line_to_update(update: &Mutex<Update>, line: &str) {
let mut lock = update.lock().await;
let log = &mut lock.logs[0];

View File

@@ -3,7 +3,7 @@ use async_timing_util::{
ONE_DAY_MS, Timelength, unix_timestamp_ms, wait_until_timelength,
};
use database::mungos::{find::find_collect, mongodb::bson::doc};
use futures::{StreamExt, stream::FuturesUnordered};
use futures_util::{StreamExt, stream::FuturesUnordered};
use periphery_client::api::docker::PruneImages;
use crate::{config::core_config, state::db_client};

View File

@@ -47,7 +47,6 @@ use crate::{
};
// user: Id or username
#[instrument(level = "debug")]
pub async fn get_user(user: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user) {
return Ok(user);
@@ -60,7 +59,6 @@ pub async fn get_user(user: &str) -> anyhow::Result<User> {
.with_context(|| format!("no user found with {user}"))
}
#[instrument(level = "debug")]
pub async fn get_server_with_state(
server_id_or_name: &str,
) -> anyhow::Result<(Server, ServerState)> {
@@ -69,7 +67,6 @@ pub async fn get_server_with_state(
Ok((server, state))
}
#[instrument(level = "debug")]
pub async fn get_server_state(server: &Server) -> ServerState {
if !server.config.enabled {
return ServerState::Disabled;
@@ -86,7 +83,6 @@ pub async fn get_server_state(server: &Server) -> ServerState {
}
}
#[instrument(level = "debug")]
pub async fn get_deployment_state(
id: &String,
) -> anyhow::Result<DeploymentState> {
@@ -182,7 +178,6 @@ pub fn get_stack_state_from_containers(
StackState::Unhealthy
}
#[instrument(level = "debug")]
pub async fn get_stack_state(
stack: &Stack,
) -> anyhow::Result<StackState> {
@@ -198,7 +193,6 @@ pub async fn get_stack_state(
Ok(state)
}
#[instrument(level = "debug")]
pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
let query = match ObjectId::from_str(id_or_name) {
Ok(id) => doc! { "_id": id },
@@ -212,7 +206,6 @@ pub async fn get_tag(id_or_name: &str) -> anyhow::Result<Tag> {
.with_context(|| format!("no tag found matching {id_or_name}"))
}
#[instrument(level = "debug")]
pub async fn get_tag_check_owner(
id_or_name: &str,
user: &User,
@@ -244,7 +237,6 @@ pub async fn get_id_to_tags(
Ok(res)
}
#[instrument(level = "debug")]
pub async fn get_user_user_groups(
user_id: &str,
) -> anyhow::Result<Vec<UserGroup>> {
@@ -262,7 +254,6 @@ pub async fn get_user_user_groups(
.context("failed to query db for user groups")
}
#[instrument(level = "debug")]
pub async fn get_user_user_group_ids(
user_id: &str,
) -> anyhow::Result<Vec<String>> {

View File

@@ -0,0 +1,345 @@
use anyhow::{Context as _, anyhow};
use komodo_client::{
api::{terminal::InitTerminal, write::CreateTerminal},
entities::{
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{ContainerTerminalMode, TerminalTarget},
user::User,
},
};
use periphery_client::api;
use crate::{
helpers::periphery_client, periphery::PeripheryClient,
permission::get_check_permissions, resource,
state::stack_status_cache,
};
pub async fn setup_target_for_user(
target: TerminalTarget,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
match target {
TerminalTarget::Server { server } => {
setup_server_target_for_user(
server.context("Missing 'target.params.server'")?,
terminal,
init,
user,
)
.await
}
TerminalTarget::Container { server, container } => {
setup_container_target_for_user(
server, container, terminal, init, user,
)
.await
}
TerminalTarget::Stack { stack, service } => {
setup_stack_service_target_for_user(
stack,
service.context("Missing 'target.params.service'")?,
terminal,
init,
user,
)
.await
}
TerminalTarget::Deployment { deployment } => {
setup_deployment_target_for_user(
deployment, terminal, init, user,
)
.await
}
}
}
async fn setup_server_target_for_user(
server: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let terminal = terminal.unwrap_or_else(|| {
init
.as_ref()
.and_then(|init| init.command.clone())
.unwrap_or_else(|| String::from("term"))
});
let periphery = periphery_client(&server).await?;
if let Some(init) = init {
periphery
.request(api::terminal::CreateServerTerminal {
name: terminal.clone(),
command: init.command,
recreate: init.recreate,
})
.await
.context("Failed to create Server Terminal on Periphery")?;
}
Ok((
TerminalTarget::Server {
server: Some(server.id),
},
terminal,
periphery,
))
}
async fn setup_container_target_for_user(
server: String,
container: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
let periphery = periphery_client(&server).await?;
let target = TerminalTarget::Container {
server: server.id,
container: container.clone(),
};
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
async fn setup_stack_service_target_for_user(
stack: String,
service: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let (target, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
async fn setup_deployment_target_for_user(
deployment: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let (target, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
fn default_container_terminal_name(
terminal: Option<String>,
container: &str,
init: Option<&InitTerminal>,
) -> String {
terminal.unwrap_or_else(|| {
init
.as_ref()
.map(|init| {
init.command.clone().unwrap_or_else(|| {
init.mode.unwrap_or_default().as_ref().to_string()
})
})
.unwrap_or_else(|| container.to_string())
})
}
pub async fn create_container_terminal_inner(
CreateTerminal {
name,
target,
command,
mode,
recreate,
}: CreateTerminal,
periphery: &PeripheryClient,
container: String,
) -> anyhow::Result<()> {
match mode.unwrap_or_default() {
ContainerTerminalMode::Exec => periphery
.request(periphery_client::api::terminal::CreateContainerExecTerminal {
name,
target,
container,
command,
recreate,
})
.await
.context(
"Failed to create Container Exec Terminal on Periphery",
)?,
ContainerTerminalMode::Attach => periphery
.request(periphery_client::api::terminal::CreateContainerAttachTerminal {
name,
target,
container,
recreate,
})
.await
.context(
"Failed to create Container Attach Terminal on Periphery",
)?,
};
Ok(())
}
pub async fn get_stack_service_periphery_container(
stack: &str,
service: &str,
user: &User,
) -> anyhow::Result<(TerminalTarget, PeripheryClient, String)> {
let stack = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let server =
resource::get::<Server>(&stack.config.server_id).await?;
let Some(status) = stack_status_cache().get(&stack.id).await else {
return Err(anyhow!("Could not get Stack status"));
};
let container = status
.curr
.services
.iter()
.find(|s| s.service.as_str() == service)
.with_context(|| {
format!("Did not find Stack service matching {service}")
})?
.container
.as_ref()
.with_context(|| {
format!("Did not find container for Stack service {service}")
})?
.name
.clone();
let periphery = periphery_client(&server).await?;
Ok((
TerminalTarget::Stack {
stack: stack.id,
service: Some(service.to_string()),
},
periphery,
container,
))
}
pub async fn get_deployment_periphery_container(
deployment: &str,
user: &User,
) -> anyhow::Result<(TerminalTarget, PeripheryClient, String)> {
let deployment = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server).await?;
let container = deployment.name.clone();
Ok((
TerminalTarget::Deployment {
deployment: deployment.id,
},
periphery,
container,
))
}

View File

@@ -40,7 +40,6 @@ pub fn make_update(
}
}
#[instrument(level = "debug")]
pub async fn add_update(
mut update: Update,
) -> anyhow::Result<String> {
@@ -59,7 +58,6 @@ pub async fn add_update(
Ok(id)
}
#[instrument(level = "debug")]
pub async fn add_update_without_send(
update: &Update,
) -> anyhow::Result<String> {
@@ -75,7 +73,6 @@ pub async fn add_update_without_send(
Ok(id)
}
#[instrument(level = "debug")]
pub async fn update_update(update: Update) -> anyhow::Result<()> {
update_one_by_id(&db_client().updates, &update.id, database::mungos::update::Update::Set(to_document(&update)?), None)
.await
@@ -85,7 +82,6 @@ pub async fn update_update(update: Update) -> anyhow::Result<()> {
Ok(())
}
#[instrument(level = "debug")]
async fn update_list_item(
update: Update,
) -> anyhow::Result<UpdateListItem> {
@@ -115,7 +111,6 @@ async fn update_list_item(
Ok(update)
}
#[instrument(level = "debug")]
async fn send_update(update: UpdateListItem) -> anyhow::Result<()> {
update_channel().sender.lock().await.send(update)?;
Ok(())

View File

@@ -14,6 +14,7 @@ use komodo_client::{
use resolver_api::Resolve;
use serde::Deserialize;
use serde_json::json;
use uuid::Uuid;
use crate::{
api::{
@@ -21,6 +22,7 @@ use crate::{
write::WriteArgs,
},
helpers::update::init_execution_update,
resource,
};
use super::{ANY_BRANCH, ListenerLockCache};
@@ -54,7 +56,18 @@ pub async fn handle_build_webhook<B: super::ExtractBranch>(
let lock = build_locks().get_or_insert_default(&build.id).await;
let _lock = lock.lock().await;
B::verify_branch(&body, &build.config.branch)?;
// Use the correct target branch when using linked repo.
let branch = if build.config.linked_repo.is_empty() {
build.config.branch
} else {
resource::get::<Repo>(&build.config.linked_repo)
.await
.context("Failed to find 'linked_repo'")?
.config
.branch
};
B::verify_branch(&body, &branch)?;
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build.id });
@@ -63,7 +76,11 @@ pub async fn handle_build_webhook<B: super::ExtractBranch>(
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -101,7 +118,11 @@ impl RepoExecution for CloneRepo {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -121,7 +142,11 @@ impl RepoExecution for PullRepo {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -141,7 +166,11 @@ impl RepoExecution for BuildRepo {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -240,7 +269,11 @@ impl StackExecution for DeployStack {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
} else {
@@ -254,7 +287,11 @@ impl StackExecution for DeployStack {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
}
@@ -303,7 +340,18 @@ pub async fn handle_stack_webhook_inner<
let lock = stack_locks().get_or_insert_default(&stack.id).await;
let _lock = lock.lock().await;
B::verify_branch(&body, &stack.config.branch)?;
// Use the correct target branch when using linked repo.
let branch = if stack.config.linked_repo.is_empty() {
stack.config.branch.clone()
} else {
resource::get::<Repo>(&stack.config.linked_repo)
.await
.context("Failed to find 'linked_repo'")?
.config
.branch
};
B::verify_branch(&body, &branch)?;
E::resolve(stack).await.map_err(|e| e.error)
}
@@ -352,7 +400,11 @@ impl SyncExecution for RunSync {
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -401,7 +453,18 @@ async fn handle_sync_webhook_inner<
let lock = sync_locks().get_or_insert_default(&sync.id).await;
let _lock = lock.lock().await;
B::verify_branch(&body, &sync.config.branch)?;
// Use the correct target branch when using linked repo.
let branch = if sync.config.linked_repo.is_empty() {
sync.config.branch.clone()
} else {
resource::get::<Repo>(&sync.config.linked_repo)
.await
.context("Failed to find 'linked_repo'")?
.config
.branch
};
B::verify_branch(&body, &branch)?;
E::resolve(sync).await
}
@@ -451,7 +514,11 @@ pub async fn handle_procedure_webhook<B: super::ExtractBranch>(
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())
@@ -513,7 +580,11 @@ pub async fn handle_action_webhook<B: super::ExtractBranch>(
unreachable!()
};
req
.resolve(&ExecuteArgs { user, update })
.resolve(&ExecuteArgs {
user,
update,
id: Uuid::new_v4(),
})
.await
.map_err(|e| e.error)?;
Ok(())

View File

@@ -6,12 +6,13 @@ extern crate tracing;
use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::Router;
use axum::{Router, routing::get};
use axum_server::{Handle, tls_rustls::RustlsConfig};
use tower_http::{
cors::{Any, CorsLayer},
services::{ServeDir, ServeFile},
};
use tracing::Instrument;
use crate::config::{core_config, core_keys};
@@ -41,46 +42,52 @@ async fn app() -> anyhow::Result<()> {
let config = core_config();
logger::init(&config.logging)?;
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
let startup_span = info_span!("CoreStartup");
match (
config.pretty_startup_config,
config.unsafe_unsanitized_startup_config,
) {
(true, true) => info!("{:#?}", config),
(true, false) => info!("{:#?}", config.sanitized()),
(false, true) => info!("{:?}", config),
(false, false) => info!("{:?}", config.sanitized()),
async {
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
match (
config.pretty_startup_config,
config.unsafe_unsanitized_startup_config,
) {
(true, true) => info!("{:#?}", config),
(true, false) => info!("{:#?}", config.sanitized()),
(false, true) => info!("{:?}", config),
(false, false) => info!("{:?}", config.sanitized()),
}
// Init + log public key. Will crash if invalid private key here.
info!("Public Key: {}", core_keys().load().public);
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("Failed to install default crypto provider");
// Init jwt client to crash on failure
state::jwt_client();
tokio::join!(
// Init db_client check to crash on db init failure
state::init_db_client(),
// Manage OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::spawn_oidc_client_management()
);
// Run after db connection.
startup::on_startup().await;
// Spawn background tasks
monitor::spawn_monitor_loop();
resource::spawn_resource_refresh_loop();
resource::spawn_all_resources_cache_refresh_loop();
resource::spawn_build_state_refresh_loop();
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
schedule::spawn_schedule_executor();
helpers::prune::spawn_prune_loop();
}
// Init + log public key. Will crash if invalid private key here.
info!("Public Key: {}", core_keys().load().public);
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("Failed to install default crypto provider");
// Init jwt client to crash on failure
state::jwt_client();
tokio::join!(
// Init db_client check to crash on db init failure
state::init_db_client(),
// Manage OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::spawn_oidc_client_management()
);
// Run after db connection.
startup::on_startup().await;
// Spawn background tasks
monitor::spawn_monitor_loop();
resource::spawn_resource_refresh_loop();
resource::spawn_all_resources_cache_refresh_loop();
resource::spawn_build_state_refresh_loop();
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
schedule::spawn_schedule_executor();
helpers::prune::spawn_prune_loop();
.instrument(startup_span)
.await;
// Setup static frontend services
let frontend_path = &config.frontend_path;
@@ -90,6 +97,7 @@ async fn app() -> anyhow::Result<()> {
.not_found_service(frontend_index.clone());
let app = Router::new()
.route("/version", get(|| async { env!("CARGO_PKG_VERSION") }))
.nest("/auth", api::auth::router())
.nest("/user", api::user::router())
.nest("/read", api::read::router())

View File

@@ -13,7 +13,6 @@ use crate::{
state::{action_states, db_client},
};
#[instrument(level = "debug")]
pub async fn alert_deployments(
ts: i64,
server_names: &HashMap<String, String>,

View File

@@ -3,7 +3,7 @@ use std::collections::HashMap;
use anyhow::Context;
use komodo_client::entities::{
permission::PermissionLevel, resource::ResourceQuery,
server::Server, user::User,
server::Server, user::system_user,
};
use crate::resource;
@@ -13,7 +13,6 @@ mod server;
mod stack;
// called after cache update
#[instrument(level = "debug")]
pub async fn check_alerts(ts: i64) {
let (servers, server_names) = match get_all_servers_map().await {
Ok(res) => res,
@@ -30,16 +29,12 @@ pub async fn check_alerts(ts: i64) {
);
}
#[instrument(level = "debug")]
async fn get_all_servers_map()
-> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),
&User {
admin: true,
..Default::default()
},
system_user(),
PermissionLevel::Read.into(),
&[],
)

View File

@@ -73,7 +73,6 @@ fn alert_buffer() -> &'static AlertBuffer {
BUFFER.get_or_init(AlertBuffer::new)
}
#[instrument(level = "debug")]
pub async fn alert_servers(
ts: i64,
mut servers: HashMap<String, Server>,
@@ -555,7 +554,6 @@ pub async fn alert_servers(
);
}
#[instrument(level = "debug")]
async fn open_new_alerts(alerts: &[(Alert, SendAlerts)]) {
if alerts.is_empty() {
return;
@@ -603,7 +601,6 @@ async fn open_new_alerts(alerts: &[(Alert, SendAlerts)]) {
send_alerts(&alerts).await
}
#[instrument(level = "debug")]
async fn update_alerts(alerts: &[(Alert, SendAlerts)]) {
if alerts.is_empty() {
return;
@@ -651,7 +648,6 @@ async fn update_alerts(alerts: &[(Alert, SendAlerts)]) {
}
}
#[instrument(level = "debug")]
async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
if alerts.is_empty() {
return;
@@ -708,7 +704,6 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
}
}
#[instrument(level = "debug")]
async fn get_open_alerts()
-> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> {
let alerts = find_collect(

View File

@@ -12,7 +12,6 @@ use crate::{
state::{action_states, db_client, stack_status_cache},
};
#[instrument(level = "debug")]
pub async fn alert_stacks(
ts: i64,
server_names: &HashMap<String, String>,

View File

@@ -25,7 +25,6 @@ use super::{
CachedStackStatus, History,
};
#[instrument(level = "debug", skip_all)]
pub async fn insert_deployments_status_unknown(
deployments: Vec<Deployment>,
) {
@@ -51,7 +50,6 @@ pub async fn insert_deployments_status_unknown(
}
}
#[instrument(level = "debug", skip_all)]
pub async fn insert_repos_status_unknown(repos: Vec<Repo>) {
let status_cache = repo_status_cache();
for repo in repos {
@@ -68,7 +66,6 @@ pub async fn insert_repos_status_unknown(repos: Vec<Repo>) {
}
}
#[instrument(level = "debug", skip_all)]
pub async fn insert_stacks_status_unknown(stacks: Vec<Stack>) {
let status_cache = stack_status_cache();
for stack in stacks {
@@ -99,7 +96,6 @@ type DockerLists = (
Option<Vec<ComposeProject>>,
);
#[instrument(level = "debug", skip_all)]
pub async fn insert_server_status(
server: &Server,
state: ServerState,

View File

@@ -3,7 +3,7 @@ use std::sync::{Arc, OnceLock};
use async_timing_util::wait_until_timelength;
use cache::CloneCache;
use database::mungos::{find::find_collect, mongodb::bson::doc};
use futures::future::join_all;
use futures_util::future::join_all;
use helpers::insert_stacks_status_unknown;
use komodo_client::entities::{
build::Build,
@@ -140,7 +140,6 @@ fn update_cache_for_server_controller()
/// which exits early if the lock is busy or it was completed too recently.
/// If force is true, it will wait on simultaneous calls, and will
/// ignore the restriction on being completed too recently.
#[instrument(level = "debug")]
pub async fn update_cache_for_server(server: &Server, force: bool) {
// Concurrency controller to ensure it isn't done too often
// when it happens in other contexts.

View File

@@ -4,7 +4,6 @@ use komodo_client::entities::stats::{
use crate::state::{db_client, server_status_cache};
#[instrument(level = "debug")]
pub async fn record_server_stats(ts: i64) {
let status = server_status_cache().get_values().await;
let records = status

View File

@@ -21,6 +21,7 @@ pub mod terminal;
#[derive(Debug)]
pub struct PeripheryClient {
/// Usually the server id
pub id: String,
pub responses: Arc<ResponseChannels>,
pub terminals: Arc<TerminalChannels>,
@@ -79,17 +80,11 @@ impl PeripheryClient {
periphery_connections().remove(&self.id).await
}
#[tracing::instrument(level = "debug", skip(self))]
pub async fn health_check(&self) -> anyhow::Result<()> {
self.request(api::GetHealth {}).await?;
Ok(())
}
#[tracing::instrument(
name = "PeripheryRequest",
skip(self),
level = "debug"
)]
pub async fn request<T>(
&self,
request: T,
@@ -139,7 +134,7 @@ impl PeripheryClient {
continue;
};
return message.decode_into();
return message.decode();
}
}
}

View File

@@ -5,114 +5,37 @@ use std::{
};
use anyhow::Context;
use bytes::Bytes;
use cache::CloneCache;
use futures::Stream;
use komodo_client::api::write::TerminalRecreateMode;
use futures_util::Stream;
use komodo_client::entities::terminal::{
TerminalStdinMessageVariant, TerminalTarget,
};
use periphery_client::{
api::terminal::{
ConnectContainerAttach, ConnectContainerExec, ConnectTerminal,
END_OF_OUTPUT, ExecuteContainerExec, ExecuteTerminal,
},
api::terminal::{ConnectTerminal, END_OF_OUTPUT, ExecuteTerminal},
transport::EncodedTransportMessage,
};
use transport::channel::{Receiver, Sender, channel};
use transport::channel::{Receiver, Sender};
use uuid::Uuid;
use crate::{
periphery::PeripheryClient, state::periphery_connections,
};
pub struct ConnectTerminalResponse {
pub channel: Uuid,
pub sender: Sender<EncodedTransportMessage>,
pub receiver: Receiver<anyhow::Result<Vec<u8>>>,
}
impl PeripheryClient {
#[instrument("ConnectTerminal", skip(self), fields(server_id = self.id))]
pub async fn connect_terminal(
&self,
terminal: String,
) -> anyhow::Result<(
Uuid,
Sender<EncodedTransportMessage>,
Receiver<Vec<u8>>,
)> {
target: TerminalTarget,
) -> anyhow::Result<ConnectTerminalResponse> {
tracing::trace!(
"request | type: ConnectTerminal | terminal name: {terminal}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel_id = self
.request(ConnectTerminal { terminal })
.await
.context("Failed to create terminal connection")?;
let (sender, receiever) = channel();
connection.terminals.insert(channel_id, sender).await;
connection
.sender
.send_terminal(channel_id, Bytes::new())
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok((channel_id, connection.sender.clone(), receiever))
}
pub async fn connect_container_exec(
&self,
container: String,
shell: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<(
Uuid,
Sender<EncodedTransportMessage>,
Receiver<Vec<u8>>,
)> {
tracing::trace!(
"request | type: ConnectContainerExec | container name: {container} | shell: {shell}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel_id = self
.request(ConnectContainerExec {
container,
shell,
recreate,
})
.await
.context("Failed to create container exec connection")?;
let (sender, receiever) = channel();
connection.terminals.insert(channel_id, sender).await;
connection
.sender
.send_terminal(channel_id, Bytes::new())
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok((channel_id, connection.sender.clone(), receiever))
}
pub async fn connect_container_attach(
&self,
container: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<(
Uuid,
Sender<EncodedTransportMessage>,
Receiver<Vec<u8>>,
)> {
tracing::trace!(
"request | type: ConnectContainerAttach | container name: {container}",
"request | type: ConnectTerminal | Terminal: {terminal} | Target: {target:?}",
);
let connection =
@@ -121,25 +44,29 @@ impl PeripheryClient {
)?;
let channel = self
.request(ConnectContainerAttach {
container,
recreate,
})
.request(ConnectTerminal { terminal, target })
.await
.context("Failed to create container attach connection")?;
.context("Failed to create terminal connection")?;
let (sender, receiever) = transport::channel::channel();
let (sender, receiver) = transport::channel::channel();
connection.terminals.insert(channel, sender).await;
connection
.sender
.send_terminal(channel, Bytes::new())
.send_terminal(
channel,
Ok(vec![TerminalStdinMessageVariant::Begin.as_byte()]),
)
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
"Failed to send TerminalMessage Begin byte to begin forwarding.",
)?;
Ok((channel, connection.sender.clone(), receiever))
Ok(ConnectTerminalResponse {
channel,
sender: connection.sender.clone(),
receiver,
})
}
/// Executes command on specified terminal,
@@ -155,16 +82,17 @@ impl PeripheryClient {
///
/// If this value is NOT the final item before stream closes, it means
/// the terminal exited mid command, before giving status. Example: running `exit`.
#[tracing::instrument(level = "debug", skip(self))]
#[instrument("ExecuteTerminal", skip(self), fields(server_id = self.id))]
pub async fn execute_terminal(
&self,
target: TerminalTarget,
terminal: String,
command: String,
) -> anyhow::Result<
impl Stream<Item = anyhow::Result<Vec<u8>>> + 'static,
> {
tracing::trace!(
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
trace!(
"sending request | type: ExecuteTerminal | {target:?} | terminal name: {terminal} | command: {command}",
);
let connection =
@@ -172,87 +100,32 @@ impl PeripheryClient {
|| format!("No connection found for server {}", self.id),
)?;
let channel_id = self
.request(ExecuteTerminal { terminal, command })
let channel = self
.request(ExecuteTerminal {
terminal,
target,
command,
})
.await
.context("Failed to create execute terminal connection")?;
let (terminal_sender, terminal_receiver) = channel();
connection
.terminals
.insert(channel_id, terminal_sender)
.await;
let (terminal_sender, terminal_receiver) =
transport::channel::channel();
connection.terminals.insert(channel, terminal_sender).await;
connection
.sender
.send_terminal(channel_id, Bytes::new())
.send_terminal(
channel,
Ok(vec![TerminalStdinMessageVariant::Begin.as_byte()]),
)
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok(ReceiverStream {
channel_id,
receiver: terminal_receiver,
channels: connection.terminals.clone(),
})
}
/// Executes command on specified container,
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
/// sentinal value as the expected final line of the stream.
///
/// Example final line:
/// ```text
/// __KOMODO_EXIT_CODE:0
/// ```
///
/// This means the command exited with code 0 (success).
///
/// If this value is NOT the final item before stream closes, it means
/// the container shell exited mid command, before giving status. Example: running `exit`.
#[tracing::instrument(level = "debug", skip(self))]
pub async fn execute_container_exec(
&self,
container: String,
shell: String,
command: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<ReceiverStream> {
tracing::trace!(
"sending request | type: ExecuteContainerExec | container: {container} | shell: {shell} | command: {command}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel_id = self
.request(ExecuteContainerExec {
container,
shell,
command,
recreate,
})
.await
.context("Failed to create execute terminal connection")?;
let (terminal_sender, terminal_receiver) = channel();
connection
.terminals
.insert(channel_id, terminal_sender)
.await;
// Trigger forwarding to begin now that forwarding channel is ready.
// This is required to not miss messages.
connection
.sender
.send_terminal(channel_id, Bytes::new())
.await?;
Ok(ReceiverStream {
channel_id,
channel,
receiver: terminal_receiver,
channels: connection.terminals.clone(),
})
@@ -260,9 +133,9 @@ impl PeripheryClient {
}
pub struct ReceiverStream {
channel_id: Uuid,
channels: Arc<CloneCache<Uuid, Sender<Vec<u8>>>>,
receiver: Receiver<Vec<u8>>,
channel: Uuid,
channels: Arc<CloneCache<Uuid, Sender<anyhow::Result<Vec<u8>>>>>,
receiver: Receiver<anyhow::Result<Vec<u8>>>,
}
impl Stream for ReceiverStream {
@@ -272,14 +145,14 @@ impl Stream for ReceiverStream {
cx: &mut task::Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.receiver.poll_recv(cx) {
Poll::Ready(Some(bytes))
Poll::Ready(Some(Ok(bytes)))
if bytes == END_OF_OUTPUT.as_bytes() =>
{
self.cleanup();
Poll::Ready(None)
}
Poll::Ready(Some(bytes)) => Poll::Ready(Some(Ok(bytes))),
// Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(Some(Ok(bytes))) => Poll::Ready(Some(Ok(bytes))),
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(None) => {
self.cleanup();
Poll::Ready(None)
@@ -293,9 +166,9 @@ impl ReceiverStream {
fn cleanup(&self) {
// Not the prettiest but it should be fine
let channels = self.channels.clone();
let id = self.channel_id;
let channel = self.channel;
tokio::spawn(async move {
channels.remove(&id).await;
channels.remove(&channel).await;
});
}
}

View File

@@ -1,10 +1,12 @@
use std::collections::HashSet;
use std::collections::HashMap;
use anyhow::{Context, anyhow};
use database::mongo_indexed::doc;
use database::mungos::find::find_collect;
use futures::{FutureExt, future::BoxFuture};
use database::{bson::Document, mongo_indexed::doc};
use futures_util::{FutureExt, future::BoxFuture};
use indexmap::IndexSet;
use komodo_client::entities::ResourceTarget;
use komodo_client::entities::permission::SpecificPermission;
use komodo_client::{
api::read::GetPermission,
entities::{
@@ -15,6 +17,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::resource::list_all_resources;
use crate::{
api::read::ReadArgs,
config::core_config,
@@ -68,12 +71,11 @@ pub async fn get_check_permissions<T: KomodoResource>(
}
}
#[instrument(level = "debug")]
pub fn get_user_permission_on_resource<'a, T: KomodoResource>(
user: &'a User,
resource_id: &'a str,
) -> BoxFuture<'a, anyhow::Result<PermissionLevelAndSpecifics>> {
Box::pin(async {
Box::pin(async move {
// Admin returns early with max permissions
if user.admin {
return Ok(PermissionLevel::Write.all());
@@ -163,69 +165,186 @@ pub fn get_user_permission_on_resource<'a, T: KomodoResource>(
})
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_user<T: KomodoResource>(
pub async fn list_resources_for_user<T: KomodoResource>(
filters: impl Into<Option<Document>>,
user: &User,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
// Check admin
if user.admin {
return list_all_resources::<T>(filters).await;
}
let mut base = PermissionLevelAndSpecifics {
level: if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
},
specific: Default::default(),
};
// 'transparent_mode' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
let resource_type = T::resource_type();
// Check user 'all' on variant
if let Some(permission) = user.all.get(&resource_type).cloned()
&& permission.level > PermissionLevel::None
{
return Ok(None);
if let Some(all_permission) = user.all.get(&resource_type) {
base.elevate(all_permission);
// 'user.all' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(permission) = group.all.get(&resource_type).cloned()
&& permission.level > PermissionLevel::None
{
return Ok(None);
if let Some(all_permission) = group.all.get(&resource_type) {
base.elevate(all_permission);
// 'group.all' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
}
}
let (base, perms) = tokio::try_join!(
// Get any resources with non-none base permission,
find_collect(
T::coll(),
doc! { "$or": [
{ "base_permission": { "$in": ["Read", "Execute", "Write"] } },
{ "base_permission.level": { "$in": ["Read", "Execute", "Write"] } }
] },
None,
)
.map(|res| res.with_context(|| format!(
"failed to query {resource_type} on db"
))),
let (all, permissions) = tokio::try_join!(
list_all_resources::<T>(filters),
// And any ids using the permissions table
find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
None,
)
.map(|res| res.context("failed to query permissions on db"))
)?;
// Add specific ids
let ids = perms
let permission_by_resource_id = permissions
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// Chain in the ones with non-None base permissions
.chain(base.into_iter().map(|res| res.id))
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
.map(|perm| {
(
perm.resource_target.extract_variant_id().1.to_string(),
perm,
)
})
.collect::<HashMap<_, _>>();
Ok(Some(ids.into_iter().collect()))
let mut resources = Vec::new();
let mut additional_specific_cache =
HashMap::<ResourceTarget, IndexSet<SpecificPermission>>::new();
for resource in all {
let mut perm = if let Some(perm) =
permission_by_resource_id.get(&resource.id)
{
base.join(perm)
} else {
base.clone()
};
// Check if already fulfils
if perm.fulfills(&permission) {
resources.push(resource);
continue;
}
// Also check if fulfills with inherited specific
let additional_target = if let Some(additional_target) =
T::inherit_specific_permissions_from(&resource)
&& !additional_target.is_empty()
{
additional_target
} else {
continue;
};
let additional_specific = match additional_specific_cache
.get(&additional_target)
.cloned()
{
Some(specific) => specific,
None => {
let specific = GetPermission {
target: additional_target.clone(),
}
.resolve(&ReadArgs { user: user.clone() })
.await
.map_err(|e| e.error)
.context(
"failed to get user permission on additional target",
)?
.specific;
additional_specific_cache
.insert(additional_target, specific.clone());
specific
}
};
perm.specific.extend(additional_specific);
if perm.fulfills(&permission) {
resources.push(resource);
}
}
Ok(resources)
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
pub async fn list_resource_ids_for_user<T: KomodoResource>(
filters: Option<Document>,
user: &User,
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin
if user.admin {
return Ok(None);
}
let mut base = PermissionLevelAndSpecifics {
level: if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
},
specific: Default::default(),
};
// 'transparent_mode' early return.
if base.fulfills(&permission) {
return Ok(None);
}
let resource_type = T::resource_type();
if let Some(all) = user.all.get(&resource_type) {
base.elevate(all);
// 'user.all' early return.
if base.fulfills(&permission) {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(all) = group.all.get(&resource_type) {
base.elevate(all);
// 'group.all' early return.
if base.fulfills(&permission) {
return Ok(None);
}
}
}
let ids = list_resources_for_user::<T>(filters, user, permission)
.await?
.into_iter()
.map(|resource| resource.id)
.collect();
Ok(Some(ids))
}

View File

@@ -253,7 +253,7 @@ pub async fn refresh_build_state_cache() {
});
}
#[instrument(skip(user))]
#[instrument("ValidateBuildConfig", skip_all)]
async fn validate_config(
config: &mut PartialBuildConfig,
user: &User,

View File

@@ -179,7 +179,7 @@ impl super::KomodoResource for Builder {
}
}
#[instrument(skip(user))]
#[instrument("ValidateBuilderConfig", skip_all)]
async fn validate_config(
config: &mut PartialBuilderConfig,
user: &User,

View File

@@ -309,7 +309,7 @@ impl super::KomodoResource for Deployment {
}
}
#[instrument(skip(user))]
#[instrument("ValidateDeploymentConfig", skip_all)]
async fn validate_config(
config: &mut PartialDeploymentConfig,
user: &User,

View File

@@ -14,7 +14,7 @@ use database::mungos::{
},
};
use formatting::format_serror;
use futures::future::join_all;
use futures_util::future::join_all;
use indexmap::IndexSet;
use komodo_client::{
api::{read::ExportResourcesToToml, write::CreateTag},
@@ -46,7 +46,7 @@ use crate::{
query::{get_tag, id_or_name_filter},
update::{add_update, make_update},
},
permission::{get_check_permissions, get_resource_ids_for_user},
permission::{get_check_permissions, list_resources_for_user},
state::db_client,
};
@@ -254,36 +254,34 @@ pub async fn get<T: KomodoResource>(
// LIST
// ======
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_object_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<ObjectId>>> {
get_resource_ids_for_user::<T>(user).await.map(|ids| {
ids.map(|ids| {
ids
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect()
})
/// Get full resource list with no permissions check.
pub async fn list_all_resources<T: KomodoResource>(
filters: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
find_collect(
T::coll(),
filters,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.with_context(|| {
format!("Failed to pull {}s from mongo", T::resource_type())
})
}
#[instrument(level = "debug")]
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
permission: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_for_user_using_document::<T>(filters, user, permissions).await
list_for_user_using_document::<T>(filters, user, permission).await
}
// #[instrument(level = "debug")]
// pub async fn list_for_user_using_pattern<T: KomodoResource>(
// // pub async fn list_for_user_using_pattern<T: KomodoResource>(
// pattern: &str,
// query: ResourceQuery<T::QuerySpecifics>,
// user: &User,
@@ -303,13 +301,12 @@ pub async fn list_for_user<T: KomodoResource>(
// Ok(join_all(list).await)
// }
#[instrument(level = "debug")]
pub async fn list_for_user_using_document<T: KomodoResource>(
filters: Document,
user: &User,
permissions: PermissionLevelAndSpecifics,
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_document::<T>(filters, user)
let list = list_resources_for_user::<T>(filters, user, permission)
.await?
.into_iter()
.map(|resource| T::to_list_item(resource));
@@ -324,16 +321,15 @@ pub async fn list_for_user_using_document<T: KomodoResource>(
/// let items = list_full_for_user_using_match_string::<Build>("foo-*", Default::default(), user, all_tags).await?;
/// let items = list_full_for_user_using_match_string::<Build>("\\^foo-.*$\\", Default::default(), user, all_tags).await?;
/// ```
#[instrument(level = "debug")]
pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
permission: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
let resources =
list_full_for_user::<T>(query, user, permissions, all_tags)
list_full_for_user::<T>(query, user, permission, all_tags)
.await?;
let patterns = parse_string_list(pattern);
@@ -367,7 +363,6 @@ pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
)
}
#[instrument(level = "debug")]
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
@@ -377,28 +372,7 @@ pub async fn list_full_for_user<T: KomodoResource>(
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_full_for_user_using_document::<T>(filters, user).await
}
#[instrument(level = "debug")]
pub async fn list_full_for_user_using_document<T: KomodoResource>(
mut filters: Document,
user: &User,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
if let Some(ids) =
get_resource_object_ids_for_user::<T>(user).await?
{
filters.insert("_id", doc! { "$in": ids });
}
find_collect(
T::coll(),
filters,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.with_context(|| {
format!("Failed to pull {}s from mongo", T::resource_type())
})
list_resources_for_user::<T>(filters, user, permissions).await
}
pub type IdResourceMap<T> = HashMap<
@@ -409,7 +383,6 @@ pub type IdResourceMap<T> = HashMap<
>,
>;
#[instrument(level = "debug")]
pub async fn get_id_to_resource_map<T: KomodoResource>(
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
@@ -491,16 +464,13 @@ pub async fn create<T: KomodoResource>(
// Ensure an existing resource with same name doesn't already exist
// The database indexing also ensures this but doesn't give a good error message.
if list_full_for_user::<T>(
Default::default(),
system_user(),
PermissionLevel::Read.into(),
&[],
)
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
.any(|r| r.name == name)
if T::coll()
.find_one(doc! { "name": &name })
.await
.context(
"Failed to check existing resources for duplicate name check",
)?
.is_some()
{
return Err(
anyhow!("Resource with name '{}' already exists", name)
@@ -828,11 +798,11 @@ pub async fn rename<T: KomodoResource>(
pub async fn delete<T: KomodoResource>(
id_or_name: &str,
args: &WriteArgs,
user: &User,
) -> anyhow::Result<Resource<T::Config, T::Info>> {
let resource = get_check_permissions::<T>(
id_or_name,
&args.user,
user,
PermissionLevel::Write.into(),
)
.await?;
@@ -846,15 +816,13 @@ pub async fn delete<T: KomodoResource>(
targets: vec![target.clone()],
..Default::default()
}
.resolve(&ReadArgs {
user: args.user.clone(),
})
.resolve(&ReadArgs { user: user.clone() })
.await
.map_err(|e| e.error)?
.toml;
let mut update =
make_update(target.clone(), T::delete_operation(), &args.user);
make_update(target.clone(), T::delete_operation(), user);
T::pre_delete(&resource, &mut update).await?;
@@ -913,7 +881,6 @@ async fn delete_from_alerters<T: KomodoResource>(id: &str) {
// =======
#[instrument(level = "debug")]
pub fn validate_resource_query_tags<T: Default + std::fmt::Debug>(
query: &mut ResourceQuery<T>,
all_tags: &[Tag],
@@ -934,7 +901,7 @@ pub fn validate_resource_query_tags<T: Default + std::fmt::Debug>(
Ok(())
}
#[instrument]
#[instrument("DeleteAllPermissionsOnResource")]
pub async fn delete_all_permissions_on_resource<T>(target: T)
where
T: Into<ResourceTarget> + std::fmt::Debug,
@@ -955,7 +922,7 @@ where
}
}
#[instrument]
#[instrument("RemoveFromRecentlyViewed")]
pub async fn remove_from_recently_viewed<T>(resource: T)
where
T: Into<ResourceTarget> + std::fmt::Debug,

Some files were not shown because too many files have changed in this diff Show More