Compare commits

...

40 Commits

Author SHA1 Message Date
mbecker20
c5401de1c5 tweak user level tab view 2025-10-28 11:42:29 -07:00
mbecker20
7a3d9e0ef6 tweak description 2025-10-28 00:32:39 -07:00
mbecker20
595e3ece42 deploy 2.0.0-dev-86 2025-10-27 21:05:13 -07:00
mbecker20
a3bc895755 fix terminal disconnect 2025-10-27 21:04:46 -07:00
mbecker20
3e3def03ec terminal init properly lexes init command 2025-10-27 21:01:15 -07:00
mbecker20
bc672d9649 deploy 2.0.0-dev-85 2025-10-27 20:01:18 -07:00
mbecker20
ea6dee4d51 clippy lint 2025-10-27 19:13:43 -07:00
mbecker20
b985f18c74 deploy 2.0.0-dev-84 2025-10-27 19:12:54 -07:00
mbecker20
45909b2f04 pid1 reaper doesn't work, init: true should be required in compose 2025-10-27 19:06:50 -07:00
mbecker20
2b5a54ce89 deploy 2.0.0-dev-83 2025-10-27 18:31:56 -07:00
mbecker20
a18f33b95e formalize the terminal message variants 2025-10-27 18:31:30 -07:00
mbecker20
f35b00ea95 bump clap dependency 2025-10-27 16:18:30 -07:00
mbecker20
70fab08520 clean up terminal modules 2025-10-27 16:17:20 -07:00
mbecker20
0331780a5f rename variables shell -> command 2025-10-27 11:08:57 -07:00
mbecker20
06cdfd2bbc Terminal -> Terminals tabs 2025-10-27 02:53:06 -07:00
mbecker20
1555202569 Create Terminal don't auto set request after changed 2025-10-27 02:42:06 -07:00
mbecker20
5139622aad deploy 2.0.0-dev-82 2025-10-27 02:28:48 -07:00
mbecker20
61ce2ee3db improve new terminal 2025-10-27 02:04:15 -07:00
mbecker20
3171c14f2b comment on spawn process reaper 2025-10-27 01:41:06 -07:00
mbecker20
521db748d8 deploy 2.0.0-dev-81 2025-10-27 01:27:42 -07:00
mbecker20
35bf224080 deploy 2.0.0-dev-80 2025-10-27 01:21:44 -07:00
mbecker20
e0b31cfe51 CreateTerminal only shows resources which are actually available to connect to 2025-10-27 00:44:56 -07:00
mbecker20
0a890078b0 deploy 2.0.0-dev-79 2025-10-27 00:38:08 -07:00
mbecker20
df97ced7a4 deploy 2.0.0-dev-78 2025-10-27 00:03:26 -07:00
mbecker20
d4e5e2e6d8 add execute_<>_terminal convenience methods 2025-10-26 23:35:17 -07:00
mbecker20
19aa60dcb5 deploy 2.0.0-dev-77 2025-10-26 23:21:15 -07:00
mbecker20
fc19c53e6f deploy 2.0.0-dev-76 2025-10-26 23:00:59 -07:00
mbecker20
4f0af960db Big Terminal refactor + most commands run directly / bypass 'sh -c "..."' 2025-10-26 23:00:35 -07:00
mbecker20
e2ec5258fb add "New" kb shortcut 2025-10-23 23:55:24 -07:00
mbecker20
49b6545a02 reorder cli command list 2025-10-23 23:53:10 -07:00
mbecker20
0aabaa9e62 deploy 2.0.0-dev-75 2025-10-23 12:23:10 -07:00
mbecker20
dc65986eab binaries still built with bullseye for compat, but final images use trixie 2025-10-23 12:22:50 -07:00
mbecker20
1d8f28437d km attach <CONTAINER> 2025-10-23 12:22:02 -07:00
mbecker20
c1502e89c2 deploy 2.0.0-dev-74 2025-10-23 11:51:40 -07:00
mbecker20
0bd15fc442 ResourceQuery.names supports names or ids 2025-10-23 11:23:37 -07:00
mbecker20
5a3621b02e km exec 2025-10-23 01:55:50 -07:00
mbecker20
38192e2dac deploy 2.0.0-dev-73 2025-10-23 00:56:15 -07:00
mbecker20
5d271d5547 use Ping timeout to handle reconnect if for some reason network cuts but ws doesn't receive Close 2025-10-23 00:55:51 -07:00
mbecker20
11fb67a35b ssh use cancel token so stdout.write_all isn't cancelled mid-write, which leads to undefined behavior 2025-10-23 00:14:17 -07:00
mbecker20
a80499dcc4 improve stack config files responsive 2025-10-22 19:02:30 -07:00
144 changed files with 6470 additions and 5106 deletions

107
Cargo.lock generated
View File

@@ -278,9 +278,9 @@ dependencies = [
[[package]]
name = "aws-sdk-ec2"
version = "1.175.1"
version = "1.176.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1515b09ab70c0991c0b002b0e9114a3ee73b7c5d3d65dfdb047278b208f21b0"
checksum = "171b12a84d9c7b43b75bf2ae99e86ce40ff0b5ecc8194a67d547e55c1ad2438e"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -440,7 +440,7 @@ dependencies = [
"hyper-util",
"pin-project-lite",
"rustls 0.21.12",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-native-certs 0.8.1",
"rustls-pki-types",
"tokio",
@@ -671,7 +671,7 @@ dependencies = [
"hyper 1.7.0",
"hyper-util",
"pin-project-lite",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-pemfile 2.2.0",
"rustls-pki-types",
"tokio",
@@ -902,7 +902,7 @@ dependencies = [
[[package]]
name = "cache"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"tokio",
@@ -1019,9 +1019,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.49"
version = "4.5.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4512b90fa68d3a9932cea5184017c5d200f5921df706d45e853537dea51508f"
checksum = "0c2cfd7bf8a6017ddaa4e32ffe7403d547790db06bd171c1c53926faab501623"
dependencies = [
"clap_builder",
"clap_derive",
@@ -1029,9 +1029,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.49"
version = "4.5.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0025e98baa12e766c67ba13ff4695a887a1eba19569aad00a472546795bd6730"
checksum = "0a4c05b9e80c5ccd3a7ef080ad7b6ba7d6fc00a985b8b157197075677c82c7a0"
dependencies = [
"anstream",
"anstyle",
@@ -1094,11 +1094,12 @@ dependencies = [
[[package]]
name = "command"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"komodo_client",
"run_command",
"shlex",
"svi",
"tokio",
]
[[package]]
@@ -1120,7 +1121,7 @@ checksum = "e47641d3deaf41fb1538ac1f54735925e275eaf3bf4d55c81b137fba797e5cbb"
[[package]]
name = "config"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"colored",
"indexmap 2.12.0",
@@ -1442,7 +1443,7 @@ checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
[[package]]
name = "database"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"async-compression",
@@ -1741,7 +1742,7 @@ dependencies = [
[[package]]
name = "encoding"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"bytes",
@@ -1783,7 +1784,7 @@ dependencies = [
[[package]]
name = "environment"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"formatting",
@@ -1793,7 +1794,7 @@ dependencies = [
[[package]]
name = "environment_file"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"thiserror 2.0.17",
]
@@ -1889,7 +1890,7 @@ dependencies = [
[[package]]
name = "formatting"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"serror",
]
@@ -2055,14 +2056,13 @@ dependencies = [
[[package]]
name = "git"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"cache",
"command",
"formatting",
"komodo_client",
"run_command",
"tokio",
]
@@ -2454,7 +2454,7 @@ dependencies = [
"http 1.3.1",
"hyper 1.7.0",
"hyper-util",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-native-certs 0.8.1",
"rustls-pki-types",
"tokio",
@@ -2688,7 +2688,7 @@ dependencies = [
[[package]]
name = "interpolate"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"komodo_client",
@@ -2793,9 +2793,9 @@ dependencies = [
[[package]]
name = "jsonwebtoken"
version = "10.0.0"
version = "10.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1417155a38e99d7704ddb3ea7445fe57fdbd5d756d727740a9ed8b9ebaed6e1"
checksum = "3d119c6924272d16f0ab9ce41f7aa0bfef9340c00b0bb7ca3dd3b263d4a9150b"
dependencies = [
"aws-lc-rs",
"base64 0.22.1",
@@ -2810,10 +2810,9 @@ dependencies = [
[[package]]
name = "komodo_cli"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"bytes",
"chrono",
"clap",
"colored",
@@ -2832,18 +2831,19 @@ dependencies = [
"serde_json",
"serde_qs",
"tokio",
"tokio-tungstenite",
"tokio-util",
"tracing",
"wildcard",
]
[[package]]
name = "komodo_client"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"async_timing_util",
"bson",
"bytes",
"clap",
"derive_builder",
"derive_default_builder",
@@ -2874,7 +2874,7 @@ dependencies = [
[[package]]
name = "komodo_core"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"arc-swap",
@@ -2921,10 +2921,11 @@ dependencies = [
"reqwest",
"resolver_api",
"response",
"rustls 0.23.33",
"rustls 0.23.34",
"secret_file",
"serde",
"serde_json",
"serde_qs",
"serde_yaml_ng",
"serror",
"sha2",
@@ -2947,7 +2948,7 @@ dependencies = [
[[package]]
name = "komodo_periphery"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"arc-swap",
@@ -2978,14 +2979,14 @@ dependencies = [
"periphery_client",
"portable-pty",
"resolver_api",
"run_command",
"rustls 0.23.33",
"rustls 0.23.34",
"secret_file",
"serde",
"serde_json",
"serde_yaml_ng",
"serror",
"shell-escape",
"shlex",
"sysinfo",
"tokio",
"tokio-stream",
@@ -3068,7 +3069,7 @@ checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
[[package]]
name = "logger"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"komodo_client",
@@ -3299,7 +3300,7 @@ dependencies = [
"percent-encoding",
"rand 0.8.5",
"rustc_version_runtime",
"rustls 0.23.33",
"rustls 0.23.34",
"rustversion",
"serde",
"serde_bytes",
@@ -3360,7 +3361,7 @@ dependencies = [
[[package]]
name = "noise"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"arc-swap",
@@ -3775,7 +3776,7 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "periphery_client"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"derive_variants",
@@ -3995,7 +3996,7 @@ dependencies = [
"quinn-proto",
"quinn-udp",
"rustc-hash",
"rustls 0.23.33",
"rustls 0.23.34",
"socket2 0.6.1",
"thiserror 2.0.17",
"tokio",
@@ -4015,7 +4016,7 @@ dependencies = [
"rand 0.9.2",
"ring",
"rustc-hash",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-pki-types",
"slab",
"thiserror 2.0.17",
@@ -4207,7 +4208,7 @@ dependencies = [
"percent-encoding",
"pin-project-lite",
"quinn",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-native-certs 0.8.1",
"rustls-pki-types",
"serde",
@@ -4256,7 +4257,7 @@ dependencies = [
[[package]]
name = "response"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"axum",
@@ -4309,15 +4310,6 @@ dependencies = [
"zeroize",
]
[[package]]
name = "run_command"
version = "0.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388e1106aa4bd809ba57afecb8b3efc60b6599cd06a774d5798a7c5a29675307"
dependencies = [
"tokio",
]
[[package]]
name = "rustc-hash"
version = "2.1.1"
@@ -4370,9 +4362,9 @@ dependencies = [
[[package]]
name = "rustls"
version = "0.23.33"
version = "0.23.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "751e04a496ca00bb97a5e043158d23d66b5aabf2e1d5aa2a0aaebb1aafe6f82c"
checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7"
dependencies = [
"aws-lc-rs",
"log",
@@ -4535,7 +4527,7 @@ dependencies = [
[[package]]
name = "secret_file"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"tokio",
]
@@ -5281,7 +5273,7 @@ version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
dependencies = [
"rustls 0.23.33",
"rustls 0.23.34",
"tokio",
]
@@ -5305,7 +5297,7 @@ checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857"
dependencies = [
"futures-util",
"log",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-native-certs 0.8.1",
"rustls-pki-types",
"tokio",
@@ -5568,19 +5560,20 @@ dependencies = [
[[package]]
name = "transport"
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
dependencies = [
"anyhow",
"axum",
"base64 0.22.1",
"bytes",
"colored",
"encoding",
"futures-util",
"noise",
"periphery_client",
"pin-project-lite",
"rand 0.9.2",
"rustls 0.23.33",
"rustls 0.23.34",
"serde",
"serror",
"sha1",
@@ -5611,7 +5604,7 @@ dependencies = [
"httparse",
"log",
"rand 0.9.2",
"rustls 0.23.33",
"rustls 0.23.34",
"rustls-pki-types",
"sha1",
"thiserror 2.0.17",

View File

@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
version = "2.0.0-dev-72"
version = "2.0.0-dev-86"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -39,7 +39,6 @@ noise = { path = "lib/noise" }
git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.5.3", default-features = false }
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
derive_default_builder = "0.1.8"
@@ -95,14 +94,14 @@ opentelemetry = "0.31.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.49", features = ["derive"] }
clap = { version = "4.5.50", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "10.0.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.33", features = ["aws-lc-rs"] }
jsonwebtoken = { version = "10.1.0", features = ["aws_lc_rs"] } # locked back with octorust
rustls = { version = "0.23.34", features = ["aws-lc-rs"] }
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
openidconnect = "4.0.1"
urlencoding = "2.1.3"
@@ -125,10 +124,11 @@ shell-escape = "0.1.5"
crossterm = "0.29.0"
bollard = "0.19.3"
sysinfo = "0.37.1"
shlex = "1.3.0"
# CLOUD
aws-config = "1.8.8"
aws-sdk-ec2 = "1.175.1"
aws-sdk-ec2 = "1.176.0"
aws-credential-types = "1.2.8"
## CRON
@@ -145,5 +145,5 @@ typeshare = "1.0.4"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.0.0"
bytes = "1.10.1"
regex = "1.12.2"
bytes = "1.10.1"

View File

@@ -21,9 +21,9 @@ config.workspace = true
logger.workspace = true
noise.workspace = true
# external
tokio-tungstenite.workspace = true
futures-util.workspace = true
comfy-table.workspace = true
tokio-util.workspace = true
serde_json.workspace = true
crossterm.workspace = true
serde_qs.workspace = true
@@ -33,7 +33,6 @@ colored.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
chrono.workspace = true
bytes.workspace = true
tokio.workspace = true
serde.workspace = true
clap.workspace = true

View File

@@ -61,7 +61,8 @@ async fn list_containers(
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default()
servers: Default::default(),
containers: Default::default(),
}),
)?;
@@ -145,7 +146,8 @@ pub async fn inspect_container(
.map(|s| (s.id.clone(), s))
.collect::<HashMap<_, _>>())),
client.read(ListAllDockerContainers {
servers: Default::default()
servers: Default::default(),
containers: Default::default()
}),
)?;

View File

@@ -5,10 +5,9 @@ use futures_util::{FutureExt, try_join};
use komodo_client::{
KomodoClient,
api::read::{
ListActions, ListAlerters, ListAllTerminals, ListBuilders,
ListBuilds, ListDeployments, ListProcedures, ListRepos,
ListResourceSyncs, ListSchedules, ListServers, ListStacks,
ListTags,
ListActions, ListAlerters, ListBuilders, ListBuilds,
ListDeployments, ListProcedures, ListRepos, ListResourceSyncs,
ListSchedules, ListServers, ListStacks, ListTags, ListTerminals,
},
entities::{
ResourceTargetVariant,
@@ -27,20 +26,16 @@ use komodo_client::{
ProcedureListItem, ProcedureListItemInfo, ProcedureState,
},
repo::{RepoListItem, RepoListItemInfo, RepoState},
resource::{
ResourceListItem, ResourceQuery, TemplatesQueryBehavior,
},
resource::{ResourceListItem, ResourceQuery},
resource_link,
schedule::Schedule,
server::{
ServerListItem, ServerListItemInfo, ServerState,
TerminalInfoWithServer,
},
server::{ServerListItem, ServerListItemInfo, ServerState},
stack::{StackListItem, StackListItemInfo, StackState},
sync::{
ResourceSyncListItem, ResourceSyncListItemInfo,
ResourceSyncState,
},
terminal::Terminal,
},
};
use serde::Serialize;
@@ -202,30 +197,16 @@ async fn list_terminals(
filters: &ResourceFilters,
) -> anyhow::Result<()> {
let client = crate::command::komodo_client().await?;
let query = ResourceQuery::builder()
.tags(filters.tags.clone())
.templates(TemplatesQueryBehavior::Exclude)
.build();
let (mut terminals, servers) = tokio::try_join!(
client.read(ListAllTerminals {
query: query.clone(),
fresh: true,
}),
client
.read(ListServers { query })
.map(|res| res.map(|res| res
.into_iter()
.map(|t| (t.id, t.name))
.collect::<HashMap<_, _>>()))
)?;
// Fix server ids -> names
terminals.iter_mut().for_each(|terminal| {
let Some(name) = servers.get(&terminal.server_id) else {
terminal.server_id = String::new();
return;
};
terminal.server_id.clone_from(name);
});
// let query = ResourceQuery::builder()
// .tags(filters.tags.clone())
// .templates(TemplatesQueryBehavior::Exclude)
// .build();
let terminals = client
.read(ListTerminals {
target: None,
use_names: true,
})
.await?;
if !terminals.is_empty() {
print_items(terminals, filters.format, filters.links)?;
}
@@ -1177,14 +1158,14 @@ impl PrintTable for ResourceListItem<AlerterListItemInfo> {
}
}
impl PrintTable for TerminalInfoWithServer {
impl PrintTable for Terminal {
fn header(_links: bool) -> &'static [&'static str] {
&["Terminal", "Server", "Command", "Size", "Created"]
&["Terminal", "Target", "Command", "Size", "Created"]
}
fn row(self, _links: bool) -> Vec<comfy_table::Cell> {
vec![
Cell::new(self.name).add_attribute(Attribute::Bold),
Cell::new(self.server_id),
Cell::new(format!("{:?}", self.target)),
Cell::new(self.command),
Cell::new(if self.stored_size_kb < 1.0 {
format!("{:.1} KiB", self.stored_size_kb)

View File

@@ -18,7 +18,7 @@ pub mod container;
pub mod database;
pub mod execute;
pub mod list;
pub mod ssh;
pub mod terminal;
pub mod update;
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {

View File

@@ -1,193 +0,0 @@
use anyhow::Context;
use bytes::Bytes;
use colored::Colorize;
use futures_util::{SinkExt, StreamExt};
use komodo_client::{
api::write::{CreateTerminal, TerminalRecreateMode},
entities::config::cli::args::ssh::Ssh,
};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio_tungstenite::tungstenite;
pub async fn handle(
Ssh {
server,
name,
command,
recreate,
}: &Ssh,
) -> anyhow::Result<()> {
// Need to forward multiple sources into ws write
let (write_tx, mut write_rx) =
tokio::sync::mpsc::channel::<Bytes>(1024);
// ================
// SETUP RESIZING
// ================
// Subscribe to SIGWINCH for resize messages
let mut sigwinch = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::window_change(),
)
.context("failed to register SIGWINCH handler")?;
// Send first resize messsage, bailing if it fails to get the size.
write_tx.send(resize_message()?).await?;
let forward_resize = async {
while sigwinch.recv().await.is_some() {
if let Ok(resize_message) = resize_message()
&& write_tx.send(resize_message).await.is_err()
{
break;
}
}
};
let forward_stdin = async {
let mut stdin = tokio::io::stdin();
let mut buf = [0u8; 8192];
loop {
// Read into buffer starting from index 1,
// leaving first byte to represent 'data' message.
let n = match stdin.read(&mut buf[1..]).await {
Ok(0) => break, // EOF
Ok(n) => n,
Err(_) => break,
};
// Check for disconnect sequence (alt + q)
if buf[1..(n + 1)] == [197, 147] {
break;
}
let bytes = Bytes::copy_from_slice(&buf[..(n + 1)]);
if write_tx.send(bytes).await.is_err() {
break;
};
}
};
// =====================
// CONNECT AND FORWARD
// =====================
let client = super::komodo_client().await?;
// Init the terminal if it doesn't exist already.
client
.write(CreateTerminal {
server: server.to_string(),
name: name.to_string(),
command: command.clone(),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
})
.await?;
let (mut ws_write, mut ws_read) = client
.connect_terminal_websocket(server, name)
.await?
.split();
let forward_write = async {
while let Some(bytes) = write_rx.recv().await {
if let Err(e) =
ws_write.send(tungstenite::Message::Binary(bytes)).await
{
return Some(e);
};
}
None
};
let forward_read = async {
let mut stdout = tokio::io::stdout();
loop {
match ws_read.next().await {
Some(Ok(tungstenite::Message::Binary(bytes))) => {
if let Err(e) =
tokio::io::copy(&mut bytes.as_ref(), &mut stdout)
.await
.context("Failed to copy bytes to stdout")
{
return Some(e);
}
let _ = stdout.flush().await;
}
Some(Ok(tungstenite::Message::Text(text))) => {
if let Err(e) =
tokio::io::copy(&mut text.as_ref(), &mut stdout)
.await
.context("Failed to copy text to stdout")
{
return Some(e);
}
let _ = stdout.flush().await;
}
Some(Ok(tungstenite::Message::Close(_))) => break,
Some(Err(e)) => {
return Some(
anyhow::Error::from(e).context("Websocket read error"),
);
}
None => break,
_ => {}
}
}
None
};
let guard = RawModeGuard::enable_raw_mode()?;
tokio::select! {
_ = forward_resize => drop(guard),
_ = forward_stdin => drop(guard),
e = forward_write => {
drop(guard);
if let Some(e) = e {
eprintln!("\nFailed to forward stdin | {e:#}");
}
},
e = forward_read => {
drop(guard);
if let Some(e) = e {
eprintln!("\nFailed to forward stdout | {e:#}");
}
},
};
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
// It doesn't seem to exit by itself after the raw mode stuff.
std::process::exit(0)
}
fn resize_message() -> anyhow::Result<Bytes> {
let (cols, rows) = crossterm::terminal::size()
.context("Failed to get terminal size")?;
let bytes: Vec<u8> =
format!(r#"{{"rows":{rows},"cols":{cols}}}"#).into();
let mut msg = Vec::with_capacity(bytes.len() + 1);
msg.push(0xff); // resize prefix
msg.extend(bytes);
Ok(msg.into())
}
struct RawModeGuard;
impl RawModeGuard {
fn enable_raw_mode() -> anyhow::Result<Self> {
crossterm::terminal::enable_raw_mode()
.context("Failed to enable terminal raw mode")?;
Ok(Self)
}
}
impl Drop for RawModeGuard {
fn drop(&mut self) {
if let Err(e) = crossterm::terminal::disable_raw_mode() {
eprintln!("Failed to disable terminal raw mode | {e:?}");
}
}
}

View File

@@ -0,0 +1,334 @@
use anyhow::{Context, anyhow};
use colored::Colorize;
use komodo_client::{
api::{
read::{ListAllDockerContainers, ListServers},
terminal::InitTerminal,
},
entities::{
config::cli::args::terminal::{Attach, Connect, Exec},
server::ServerQuery,
terminal::{
ContainerTerminalMode, TerminalRecreateMode,
TerminalResizeMessage, TerminalStdinMessage,
},
},
ws::terminal::TerminalWebsocket,
};
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio_util::sync::CancellationToken;
pub async fn handle_connect(
Connect {
server,
name,
command,
recreate,
}: &Connect,
) -> anyhow::Result<()> {
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_server_terminal(
server.to_string(),
Some(name.to_string()),
Some(InitTerminal {
command: command.clone(),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: None,
}),
)
.await
})
.await
}
pub async fn handle_exec(
Exec {
server,
container,
shell,
recreate,
}: &Exec,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: Some(shell.to_string()),
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Exec),
}),
)
.await
})
.await
}
pub async fn handle_attach(
Attach {
server,
container,
recreate,
}: &Attach,
) -> anyhow::Result<()> {
let server = get_server(server.clone(), container).await?;
handle_terminal_forwarding(async {
super::komodo_client()
.await?
.connect_container_terminal(
server,
container.to_string(),
None,
Some(InitTerminal {
command: None,
recreate: if *recreate {
TerminalRecreateMode::Always
} else {
TerminalRecreateMode::DifferentCommand
},
mode: Some(ContainerTerminalMode::Attach),
}),
)
.await
})
.await
}
async fn get_server(
server: Option<String>,
container: &str,
) -> anyhow::Result<String> {
if let Some(server) = server {
return Ok(server);
}
let client = super::komodo_client().await?;
let mut containers = client
.read(ListAllDockerContainers {
servers: Default::default(),
containers: vec![container.to_string()],
})
.await?;
if containers.is_empty() {
return Err(anyhow!(
"Did not find any container matching {container}"
));
}
if containers.len() == 1 {
return containers
.pop()
.context("Shouldn't happen")?
.server_id
.context("Container doesn't have server_id");
}
let servers = containers
.into_iter()
.flat_map(|container| container.server_id)
.collect::<Vec<_>>();
let servers = client
.read(ListServers {
query: ServerQuery::builder().names(servers).build(),
})
.await?
.into_iter()
.map(|server| format!("\t- {}", server.name.bold()))
.collect::<Vec<_>>()
.join("\n");
Err(anyhow!(
"Multiple containers matching '{}' on Servers:\n{servers}",
container.bold(),
))
}
async fn handle_terminal_forwarding<
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
>(
connect: C,
) -> anyhow::Result<()> {
// Need to forward multiple sources into ws write
let (write_tx, mut write_rx) =
tokio::sync::mpsc::channel::<TerminalStdinMessage>(1024);
// ================
// SETUP RESIZING
// ================
// Subscribe to SIGWINCH for resize messages
let mut sigwinch = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::window_change(),
)
.context("failed to register SIGWINCH handler")?;
// Send first resize messsage, bailing if it fails to get the size.
write_tx.send(resize_message()?).await?;
let cancel = CancellationToken::new();
let forward_resize = async {
while future_or_cancel(sigwinch.recv(), &cancel)
.await
.flatten()
.is_some()
{
if let Ok(resize_message) = resize_message()
&& write_tx.send(resize_message).await.is_err()
{
break;
}
}
cancel.cancel();
};
let forward_stdin = async {
let mut stdin = tokio::io::stdin();
let mut buf = [0u8; 8192];
while let Some(Ok(n)) =
future_or_cancel(stdin.read(&mut buf), &cancel).await
{
// EOF
if n == 0 {
break;
}
let bytes = &buf[..n];
// Check for disconnect sequence (alt + q)
if bytes == [197, 147] {
break;
}
// Forward bytes
if write_tx
.send(TerminalStdinMessage::Forward(bytes.to_vec()))
.await
.is_err()
{
break;
};
}
cancel.cancel();
};
// =====================
// CONNECT AND FORWARD
// =====================
let (mut ws_write, mut ws_read) = connect.await?.split();
let forward_write = async {
while let Some(message) =
future_or_cancel(write_rx.recv(), &cancel).await.flatten()
{
if let Err(e) = ws_write.send_stdin_message(message).await {
cancel.cancel();
return Some(e);
};
}
cancel.cancel();
None
};
let forward_read = async {
let mut stdout = tokio::io::stdout();
while let Some(msg) =
future_or_cancel(ws_read.receive_stdout(), &cancel).await
{
let bytes = match msg {
Ok(Some(bytes)) => bytes,
Ok(None) => break,
Err(e) => {
cancel.cancel();
return Some(e.context("Websocket read error"));
}
};
if let Err(e) = stdout
.write_all(&bytes)
.await
.context("Failed to write text to stdout")
{
cancel.cancel();
return Some(e);
}
let _ = stdout.flush().await;
}
cancel.cancel();
None
};
let guard = RawModeGuard::enable_raw_mode()?;
let (_, _, write_error, read_error) = tokio::join!(
forward_resize,
forward_stdin,
forward_write,
forward_read
);
drop(guard);
if let Some(e) = write_error {
eprintln!("\nFailed to forward stdin | {e:#}");
}
if let Some(e) = read_error {
eprintln!("\nFailed to forward stdout | {e:#}");
}
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
// It doesn't seem to exit by itself after the raw mode stuff.
std::process::exit(0)
}
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
let (cols, rows) = crossterm::terminal::size()
.context("Failed to get terminal size")?;
Ok(TerminalStdinMessage::Resize(TerminalResizeMessage {
rows,
cols,
}))
}
struct RawModeGuard;
impl RawModeGuard {
fn enable_raw_mode() -> anyhow::Result<Self> {
crossterm::terminal::enable_raw_mode()
.context("Failed to enable terminal raw mode")?;
Ok(Self)
}
}
impl Drop for RawModeGuard {
fn drop(&mut self) {
if let Err(e) = crossterm::terminal::disable_raw_mode() {
eprintln!("Failed to disable terminal raw mode | {e:?}");
}
}
}
async fn future_or_cancel<T, F: Future<Output = T>>(
fut: F,
cancel: &CancellationToken,
) -> Option<T> {
tokio::select! {
res = fut => Some(res),
_ = cancel.cancelled() => None
}
}

View File

@@ -2,6 +2,7 @@
extern crate tracing;
use anyhow::Context;
use colored::Colorize;
use komodo_client::entities::config::cli::args;
use crate::config::cli_config;
@@ -54,7 +55,15 @@ async fn app() -> anyhow::Result<()> {
args::Command::Update { command } => {
command::update::handle(command).await
}
args::Command::Ssh(ssh) => command::ssh::handle(ssh).await,
args::Command::Connect(connect) => {
command::terminal::handle_connect(connect).await
}
args::Command::Exec(exec) => {
command::terminal::handle_exec(exec).await
}
args::Command::Attach(attach) => {
command::terminal::handle_attach(attach).await
}
args::Command::Key { command } => {
noise::key::command::handle(command).await
}
@@ -70,7 +79,18 @@ async fn main() -> anyhow::Result<()> {
tokio::signal::unix::SignalKind::terminate(),
)?;
tokio::select! {
res = tokio::spawn(app()) => res?,
_ = term_signal.recv() => Ok(()),
res = tokio::spawn(app()) => match res {
Ok(Err(e)) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
}
Err(e) => {
eprintln!("{}: {e}", "ERROR".red());
std::process::exit(1)
},
Ok(_) => {}
},
_ = term_signal.recv() => {},
}
Ok(())
}

View File

@@ -55,6 +55,7 @@ axum-extra.workspace = true
tower-http.workspace = true
serde_json.workspace = true
serde_yaml_ng.workspace = true
serde_qs.workspace = true
typeshare.workspace = true
chrono-tz.workspace = true
indexmap.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.90.0-bullseye AS core-builder
FROM rust:1.90.0-trixie AS core-builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -26,7 +26,7 @@ RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .

View File

@@ -13,7 +13,7 @@ FROM ${AARCH64_BINARIES} AS aarch64
FROM ${FRONTEND_IMAGE} AS frontend
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .

View File

@@ -14,7 +14,7 @@ COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link komodo_client && yarn && yarn build
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/core/starship.toml /starship.toml
COPY ./bin/core/debian-deps.sh .

View File

@@ -5,7 +5,7 @@ use std::{
};
use anyhow::Context;
use command::run_komodo_command;
use command::run_komodo_standard_command;
use config::merge_objects;
use database::mungos::{
by_id::update_one_by_id, mongodb::bson::to_document,
@@ -178,7 +178,7 @@ impl Resolve<ExecuteArgs> for RunAction {
""
};
let mut res = run_komodo_command(
let mut res = run_komodo_standard_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,

View File

@@ -104,22 +104,23 @@ impl Resolve<ExecuteArgs> for SendAlert {
self,
ExecuteArgs { user, update, id }: &ExecuteArgs,
) -> Result<Self::Response, Self::Error> {
let alerters =
list_full_for_user::<Alerter>(Default::default(), user, &[])
.await?
.into_iter()
.filter(|a| {
a.config.enabled
&& (self.alerters.is_empty()
|| self.alerters.contains(&a.name)
|| self.alerters.contains(&a.id))
&& (a.config.alert_types.is_empty()
|| a
.config
.alert_types
.contains(&AlertDataVariant::Custom))
})
.collect::<Vec<_>>();
let alerters = list_full_for_user::<Alerter>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
.into_iter()
.filter(|a| {
a.config.enabled
&& (self.alerters.is_empty()
|| self.alerters.contains(&a.name)
|| self.alerters.contains(&a.id))
&& (a.config.alert_types.is_empty()
|| a.config.alert_types.contains(&AlertDataVariant::Custom))
})
.collect::<Vec<_>>();
let alerters = if user.admin {
alerters

View File

@@ -1,7 +1,7 @@
use std::{fmt::Write as _, sync::OnceLock};
use anyhow::{Context, anyhow};
use command::run_komodo_command;
use command::run_komodo_standard_command;
use database::{
bson::{Document, doc},
mungos::find::find_collect,
@@ -151,7 +151,7 @@ impl Resolve<ExecuteArgs> for BackupCoreDatabase {
update_update(update.clone()).await?;
let res = run_komodo_command(
let res = run_komodo_standard_command(
"Backup Core Database",
None,
"km database backup --yes",

View File

@@ -8,9 +8,7 @@ use axum_extra::{TypedHeader, headers::ContentType};
use database::mungos::by_id::find_one_by_id;
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures_util::{
StreamExt as _, future::join_all, stream::FuturesUnordered,
};
use futures_util::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
@@ -32,7 +30,6 @@ use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
permission::get_check_permissions,
resource::{KomodoResource, list_full_for_user_using_pattern},
state::db_client,
};
@@ -324,32 +321,11 @@ async fn batch_execute<E: BatchExecute>(
pattern,
Default::default(),
user,
PermissionLevel::Execute.into(),
&[],
)
.await?;
let resources = if user.admin {
resources
} else {
// Only keep resources with execute permissions
resources
.into_iter()
.map(|resource| async move {
get_check_permissions::<E::Resource>(
&resource.id,
user,
PermissionLevel::Execute.into(),
)
.await
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flatten()
.collect()
};
let futures = resources.into_iter().map(|resource| {
let user = user.clone();
async move {

View File

@@ -46,8 +46,13 @@ impl Resolve<ReadArgs> for ListActions {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Action>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Action>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -64,7 +69,10 @@ impl Resolve<ReadArgs> for ListFullActions {
};
Ok(
resource::list_full_for_user::<Action>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -100,6 +108,7 @@ impl Resolve<ReadArgs> for GetActionsSummary {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -9,14 +9,14 @@ use komodo_client::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{
deployment::Deployment, server::Server, stack::Stack,
sync::ResourceSync,
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, sync::ResourceSync,
},
};
use resolver_api::Resolve;
use crate::{
config::core_config, permission::get_resource_ids_for_user,
config::core_config, permission::list_resource_ids_for_user,
state::db_client,
};
@@ -31,14 +31,29 @@ impl Resolve<ReadArgs> for ListAlerts {
) -> serror::Result<ListAlertsResponse> {
let mut query = self.query.unwrap_or_default();
if !user.admin && !core_config().transparent_mode {
let server_ids =
get_resource_ids_for_user::<Server>(user).await?;
let stack_ids =
get_resource_ids_for_user::<Stack>(user).await?;
let deployment_ids =
get_resource_ids_for_user::<Deployment>(user).await?;
let sync_ids =
get_resource_ids_for_user::<ResourceSync>(user).await?;
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
),
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
)?;
// All of the vecs will be non-none if !admin and !transparent mode.
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },

View File

@@ -11,8 +11,10 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -44,8 +46,13 @@ impl Resolve<ReadArgs> for ListAlerters {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Alerter>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Alerter>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -62,7 +69,10 @@ impl Resolve<ReadArgs> for ListFullAlerters {
};
Ok(
resource::list_full_for_user::<Alerter>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -74,9 +84,11 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(user)
let query = match list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -54,8 +54,13 @@ impl Resolve<ReadArgs> for ListBuilds {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Build>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Build>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -72,7 +77,10 @@ impl Resolve<ReadArgs> for ListFullBuilds {
};
Ok(
resource::list_full_for_user::<Build>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -108,6 +116,7 @@ impl Resolve<ReadArgs> for GetBuildsSummary {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -271,7 +280,10 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
get_all_tags(None).await?
};
let builds = resource::list_full_for_user::<Build>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await
.context("failed to get resources matching query")?;

View File

@@ -11,8 +11,10 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags,
permission::{get_check_permissions, list_resource_ids_for_user},
resource,
state::db_client,
};
use super::ReadArgs;
@@ -44,8 +46,13 @@ impl Resolve<ReadArgs> for ListBuilders {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Builder>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Builder>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -62,7 +69,10 @@ impl Resolve<ReadArgs> for ListFullBuilders {
};
Ok(
resource::list_full_for_user::<Builder>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -74,9 +84,11 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(user)
let query = match list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
{
Some(ids) => doc! {

View File

@@ -56,7 +56,10 @@ impl Resolve<ReadArgs> for ListDeployments {
};
let only_update_available = self.query.specific.update_available;
let deployments = resource::list_for_user::<Deployment>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?;
let deployments = if only_update_available {
@@ -83,7 +86,10 @@ impl Resolve<ReadArgs> for ListFullDeployments {
};
Ok(
resource::list_full_for_user::<Deployment>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -297,6 +303,7 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -342,7 +349,10 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await
.context("failed to get resources matching query")?;

View File

@@ -11,6 +11,7 @@ use komodo_client::{
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
permission::PermissionLevel,
repo::Repo,
server::Server,
sync::ResourceSync,
@@ -50,6 +51,7 @@ mod server;
mod stack;
mod sync;
mod tag;
mod terminal;
mod toml;
mod update;
mod user;
@@ -113,8 +115,9 @@ enum ReadRequest {
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
// ==== TERMINAL ====
ListTerminals(ListTerminals),
ListAllTerminals(ListAllTerminals),
// ==== DOCKER ====
GetDockerContainersSummary(GetDockerContainersSummary),
@@ -398,16 +401,19 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
resource::list_full_for_user::<Build>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
)?;

View File

@@ -45,7 +45,10 @@ impl Resolve<ReadArgs> for ListProcedures {
};
Ok(
resource::list_for_user::<Procedure>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -64,7 +67,10 @@ impl Resolve<ReadArgs> for ListFullProcedures {
};
Ok(
resource::list_full_for_user::<Procedure>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -79,6 +85,7 @@ impl Resolve<ReadArgs> for GetProceduresSummary {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -44,8 +44,13 @@ impl Resolve<ReadArgs> for ListRepos {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Repo>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Repo>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -62,7 +67,10 @@ impl Resolve<ReadArgs> for ListFullRepos {
};
Ok(
resource::list_full_for_user::<Repo>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -98,6 +106,7 @@ impl Resolve<ReadArgs> for GetReposSummary {
let repos = resource::list_full_for_user::<Repo>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -4,6 +4,7 @@ use komodo_client::{
entities::{
ResourceTarget,
action::Action,
permission::PermissionLevel,
procedure::Procedure,
resource::{ResourceQuery, TemplatesQueryBehavior},
schedule::Schedule,
@@ -35,6 +36,7 @@ impl Resolve<ReadArgs> for ListSchedules {
specific: Default::default(),
},
&args.user,
PermissionLevel::Read.into(),
&all_tags,
),
list_full_for_user::<Procedure>(
@@ -46,6 +48,7 @@ impl Resolve<ReadArgs> for ListSchedules {
specific: Default::default(),
},
&args.user,
PermissionLevel::Read.into(),
&all_tags,
)
)?;

View File

@@ -12,7 +12,6 @@ use database::mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use futures_util::{StreamExt as _, stream::FuturesUnordered};
use komodo_client::{
api::read::*,
entities::{
@@ -26,11 +25,10 @@ use komodo_client::{
network::Network,
volume::Volume,
},
komodo_timestamp,
permission::PermissionLevel,
server::{
Server, ServerActionState, ServerListItem, ServerState,
TerminalInfo, TerminalInfoWithServer,
Server, ServerActionState, ServerListItem, ServerQuery,
ServerState,
},
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
@@ -51,7 +49,7 @@ use tokio::sync::Mutex;
use crate::{
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
permission::{get_check_permissions, list_resources_for_user},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache},
@@ -67,6 +65,7 @@ impl Resolve<ReadArgs> for GetServersSummary {
let servers = resource::list_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?;
@@ -127,8 +126,13 @@ impl Resolve<ReadArgs> for ListServers {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Server>(self.query, user, &all_tags)
.await?,
resource::list_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
}
}
@@ -145,7 +149,10 @@ impl Resolve<ReadArgs> for ListFullServers {
};
Ok(
resource::list_full_for_user::<Server>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -390,17 +397,12 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
ServerQuery::builder().names(self.servers.clone()).build(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
.into_iter()
.filter(|server| {
self.servers.is_empty()
|| self.servers.contains(&server.id)
|| self.servers.contains(&server.name)
});
.await?;
let mut containers = Vec::<ContainerListItem>::new();
@@ -408,9 +410,17 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(more_containers) = &cache.containers {
containers.extend(more_containers.clone());
}
let Some(more) = &cache.containers else {
continue;
};
let more = more
.iter()
.filter(|container| {
self.containers.is_empty()
|| self.containers.contains(&container.name)
})
.cloned();
containers.extend(more);
}
Ok(containers)
@@ -425,6 +435,7 @@ impl Resolve<ReadArgs> for GetDockerContainersSummary {
let servers = resource::list_full_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -576,12 +587,12 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
}
// then check stacks
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
user,
)
.await?;
let stacks = list_resources_for_user::<Stack>(
doc! { "config.server_id": &server.id },
user,
PermissionLevel::Read.into(),
)
.await?;
// check matching stack
for stack in stacks {
@@ -822,126 +833,46 @@ impl Resolve<ReadArgs> for ListComposeProjects {
}
}
#[derive(Default)]
struct TerminalCacheItem {
list: Vec<TerminalInfo>,
ttl: i64,
}
// impl Resolve<ReadArgs> for ListAllTerminals {
// async fn resolve(
// self,
// args: &ReadArgs,
// ) -> Result<Self::Response, Self::Error> {
// // match self.tar
// let mut terminals = resource::list_full_for_user::<Server>(
// self.query, &args.user, &all_tags,
// )
// .await?
// .into_iter()
// .map(|server| async move {
// (
// list_terminals_inner(&server, self.fresh).await,
// (server.id, server.name),
// )
// })
// .collect::<FuturesUnordered<_>>()
// .collect::<Vec<_>>()
// .await
// .into_iter()
// .flat_map(|(terminals, server)| {
// let terminals = terminals.ok()?;
// Some((terminals, server))
// })
// .flat_map(|(terminals, (server_id, server_name))| {
// terminals.into_iter().map(move |info| {
// TerminalInfoWithServer::from_terminal_info(
// &server_id,
// &server_name,
// info,
// )
// })
// })
// .collect::<Vec<_>>();
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
// terminals.sort_by(|a, b| {
// a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
// });
#[derive(Default)]
struct TerminalCache(
std::sync::Mutex<
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
>,
);
impl TerminalCache {
fn get_or_insert(
&self,
server_id: String,
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
if let Some(cached) =
self.0.lock().unwrap().get(&server_id).cloned()
{
return cached;
}
let to_cache =
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
self.0.lock().unwrap().insert(server_id, to_cache.clone());
to_cache
}
}
fn terminals_cache() -> &'static TerminalCache {
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
TERMINALS.get_or_init(Default::default)
}
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_inner(&server, self.fresh)
.await
.map_err(Into::into)
}
}
impl Resolve<ReadArgs> for ListAllTerminals {
async fn resolve(
self,
args: &ReadArgs,
) -> Result<Self::Response, Self::Error> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let mut terminals = resource::list_full_for_user::<Server>(
self.query, &args.user, &all_tags,
)
.await?
.into_iter()
.map(|server| async move {
(
list_terminals_inner(&server, self.fresh).await,
(server.id, server.name),
)
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flat_map(|(terminals, server)| {
let terminals = terminals.ok()?;
Some((terminals, server))
})
.flat_map(|(terminals, (server_id, server_name))| {
terminals.into_iter().map(move |info| {
TerminalInfoWithServer::from_terminal_info(
&server_id,
&server_name,
info,
)
})
})
.collect::<Vec<_>>();
terminals.sort_by(|a, b| {
a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
});
Ok(terminals)
}
}
async fn list_terminals_inner(
server: &Server,
fresh: bool,
) -> anyhow::Result<Vec<TerminalInfo>> {
let cache = terminals_cache().get_or_insert(server.id.clone());
let mut cache = cache.lock().await;
if fresh || komodo_timestamp() > cache.ttl {
cache.list = periphery_client(server)
.await?
.request(periphery_client::api::terminal::ListTerminals {
container: None,
})
.await
.context("Failed to get fresh terminal list")?;
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
Ok(cache.list.clone())
} else {
Ok(cache.list.clone())
}
}
// Ok(terminals)
// }
// }

View File

@@ -200,7 +200,10 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -231,7 +234,10 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -262,9 +268,13 @@ impl Resolve<ReadArgs> for ListStacks {
get_all_tags(None).await?
};
let only_update_available = self.query.specific.update_available;
let stacks =
resource::list_for_user::<Stack>(self.query, user, &all_tags)
.await?;
let stacks = resource::list_for_user::<Stack>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?;
let stacks = if only_update_available {
stacks
.into_iter()
@@ -295,7 +305,10 @@ impl Resolve<ReadArgs> for ListFullStacks {
};
Ok(
resource::list_full_for_user::<Stack>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -331,6 +344,7 @@ impl Resolve<ReadArgs> for GetStacksSummary {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -45,7 +45,10 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
};
Ok(
resource::list_for_user::<ResourceSync>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -64,7 +67,10 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
};
Ok(
resource::list_full_for_user::<ResourceSync>(
self.query, user, &all_tags,
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
)
@@ -101,6 +107,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
resource::list_full_for_user::<ResourceSync>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -0,0 +1,247 @@
use anyhow::Context as _;
use futures_util::{
FutureExt, StreamExt as _, stream::FuturesUnordered,
};
use komodo_client::{
api::read::{ListTerminals, ListTerminalsResponse},
entities::{
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{Terminal, TerminalTarget},
user::User,
},
};
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::periphery_client, permission::get_check_permissions,
resource,
};
use super::ReadArgs;
//
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let Some(target) = self.target else {
return list_all_terminals_for_user(user, self.use_names).await;
};
match &target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Container { server, .. } => {
let server = get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
let server = resource::get::<Server>(&server).await?;
list_terminals_on_server(&server, Some(target)).await
}
}
}
}
async fn list_all_terminals_for_user(
user: &User,
use_names: bool,
) -> serror::Result<Vec<Terminal>> {
let (mut servers, stacks, deployments) = tokio::try_join!(
resource::list_full_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
)
.map(|res| res.map(|servers| servers
.into_iter()
// true denotes user actually has permission on this Server.
.map(|server| (server, true))
.collect::<Vec<_>>())),
resource::list_full_for_user::<Stack>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
resource::list_full_for_user::<Deployment>(
Default::default(),
user,
PermissionLevel::Read.terminal(),
&[]
),
)?;
// Ensure any missing servers are present to query
for stack in &stacks {
if !stack.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == stack.config.server_id)
{
let server =
resource::get::<Server>(&stack.config.server_id).await?;
servers.push((server, false));
}
}
for deployment in &deployments {
if !deployment.config.server_id.is_empty()
&& !servers
.iter()
.any(|(server, _)| server.id == deployment.config.server_id)
{
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
servers.push((server, false));
}
}
let mut terminals = servers
.into_iter()
.map(|(server, server_permission)| async move {
(
list_terminals_on_server(&server, None).await,
(server.id, server.name, server_permission),
)
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.flat_map(
|(terminals, (server_id, server_name, server_permission))| {
let terminals = terminals
.ok()?
.into_iter()
.filter_map(|mut terminal| {
// Only keep terminals with appropriate perms.
match terminal.target.clone() {
TerminalTarget::Server { .. } => server_permission
.then(|| {
terminal.target = TerminalTarget::Server {
server: Some(if use_names {
server_name.clone()
} else {
server_id.clone()
}),
};
terminal
}),
TerminalTarget::Container { container, .. } => {
server_permission.then(|| {
terminal.target = TerminalTarget::Container {
server: if use_names {
server_name.clone()
} else {
server_id.clone()
},
container,
};
terminal
})
}
TerminalTarget::Stack { stack, service } => {
stacks.iter().find(|s| s.id == stack).map(|s| {
terminal.target = TerminalTarget::Stack {
stack: if use_names {
s.name.clone()
} else {
s.id.clone()
},
service,
};
terminal
})
}
TerminalTarget::Deployment { deployment } => {
deployments.iter().find(|d| d.id == deployment).map(
|d| {
terminal.target = TerminalTarget::Deployment {
deployment: if use_names {
d.name.clone()
} else {
d.id.clone()
},
};
terminal
},
)
}
}
})
.collect::<Vec<_>>();
Some(terminals)
},
)
.flatten()
.collect::<Vec<_>>();
terminals.sort_by(|a, b| {
a.target.cmp(&b.target).then(a.name.cmp(&b.name))
});
Ok(terminals)
}
async fn list_terminals_on_server(
server: &Server,
target: Option<TerminalTarget>,
) -> serror::Result<Vec<Terminal>> {
periphery_client(server)
.await?
.request(periphery_client::api::terminal::ListTerminals {
target,
})
.await
.with_context(|| {
format!(
"Failed to get Terminal list from Server {} ({})",
server.name, server.id
)
})
.map_err(Into::into)
}

View File

@@ -46,6 +46,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Alerter>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -56,6 +57,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Builder>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -66,6 +68,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Server>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -76,6 +79,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Stack>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -86,6 +90,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Deployment>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -96,6 +101,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Build>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -106,6 +112,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Repo>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -116,6 +123,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Procedure>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -126,6 +134,7 @@ async fn get_all_targets(
resource::list_full_for_user::<Action>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -136,6 +145,7 @@ async fn get_all_targets(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?

View File

@@ -29,7 +29,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{get_check_permissions, get_resource_ids_for_user},
permission::{get_check_permissions, list_resource_ids_for_user},
state::db_client,
};
@@ -45,99 +45,137 @@ impl Resolve<ReadArgs> for ListUpdates {
let query = if user.admin || core_config().transparent_mode {
self.query
} else {
let server_query = get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
get_resource_ids_for_user::<Deployment>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
get_resource_ids_for_user::<Procedure>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = get_resource_ids_for_user::<Action>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query = get_resource_ids_for_user::<
ResourceSync,
>(user)
let server_query = list_resource_ids_for_user::<Server>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
list_resource_ids_for_user::<Deployment>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Deployment", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = list_resource_ids_for_user::<Stack>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = list_resource_ids_for_user::<Build>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = list_resource_ids_for_user::<Repo>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query = list_resource_ids_for_user::<Procedure>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Procedure", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = list_resource_ids_for_user::<Action>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = list_resource_ids_for_user::<Builder>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = list_resource_ids_for_user::<Alerter>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query =
list_resource_ids_for_user::<ResourceSync>(
None,
user,
PermissionLevel::Read.into(),
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = self.query.unwrap_or_default();
query.extend(doc! {

View File

@@ -1,27 +1,15 @@
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::{terminal::*, write::TerminalRecreateMode},
entities::{
deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, user::User,
},
};
use periphery_client::api::terminal::CreateTerminal;
use komodo_client::{api::terminal::*, entities::user::User};
use serror::Json;
use crate::{
auth::auth_request, helpers::periphery_client,
permission::get_check_permissions, resource::get,
state::stack_status_cache,
auth::auth_request, helpers::terminal::setup_target_for_user,
};
pub fn router() -> Router {
Router::new()
.route("/execute", post(execute_terminal))
.route("/execute/container", post(execute_container_exec))
.route("/execute/deployment", post(execute_deployment_exec))
.route("/execute/stack", post(execute_stack_exec))
.layer(middleware::from_fn(auth_request))
}
@@ -34,14 +22,15 @@ pub fn router() -> Router {
skip_all,
fields(
operator = user.id,
server,
target,
terminal,
init = format!("{init:?}")
)
)]
async fn execute_terminal(
Extension(user): Extension<User>,
Json(ExecuteTerminalBody {
server,
target,
terminal,
command,
init,
@@ -49,200 +38,13 @@ async fn execute_terminal(
) -> serror::Result<axum::body::Body> {
info!("/terminal/execute request | user: {}", user.username);
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
// Maybe init terminal.
if let Some(InitTerminal { command, recreate }) = init {
periphery
.request(CreateTerminal {
name: terminal.clone(),
command,
recreate,
})
.await?;
}
let (target, terminal, periphery) =
setup_target_for_user(target, terminal, init, &user).await?;
let stream = periphery
.execute_terminal(terminal, command)
.execute_terminal(target, terminal, command)
.await
.context("Failed to execute command on periphery")?;
Ok(axum::body::Body::from_stream(stream))
}
// ======================
// ExecuteContainerExec
// ======================
#[instrument(
name = "ExecuteContainerExec",
skip_all,
fields(
operator = user.id,
server,
container,
shell,
recreate = format!("{recreate:?}"),
)
)]
async fn execute_container_exec(
Extension(user): Extension<User>,
Json(ExecuteContainerExecBody {
server,
container,
shell,
command,
recreate,
}): Json<ExecuteContainerExecBody>,
) -> serror::Result<axum::body::Body> {
info!("ExecuteContainerExec request | user: {}", user.username);
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(
container,
shell,
command,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
Ok(axum::body::Body::from_stream(stream))
}
// =======================
// ExecuteDeploymentExec
// =======================
#[instrument(
name = "ExecuteDeploymentExec",
skip_all,
fields(
operator = user.id,
deployment,
shell,
recreate = format!("{recreate:?}"),
)
)]
async fn execute_deployment_exec(
Extension(user): Extension<User>,
Json(ExecuteDeploymentExecBody {
deployment,
shell,
command,
recreate,
}): Json<ExecuteDeploymentExecBody>,
) -> serror::Result<axum::body::Body> {
info!("ExecuteDeploymentExec request | user: {}", user.username);
let deployment = get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(
deployment.name,
shell,
command,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
Ok(axum::body::Body::from_stream(stream))
}
// ==================
// ExecuteStackExec
// ==================
#[instrument(
name = "ExecuteStackExec",
skip_all,
fields(
operator = user.id,
stack,
service,
shell,
recreate = format!("{recreate:?}"),
)
)]
async fn execute_stack_exec(
Extension(user): Extension<User>,
Json(ExecuteStackExecBody {
stack,
service,
shell,
command,
recreate,
}): Json<ExecuteStackExecBody>,
) -> serror::Result<axum::body::Body> {
info!("ExecuteStackExec request | user: {}", user.username);
let stack = get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let server = get::<Server>(&stack.config.server_id).await?;
let container = stack_status_cache()
.get(&stack.id)
.await
.context("could not get stack status")?
.curr
.services
.iter()
.find(|s| s.service == service)
.context("could not find service")?
.container
.as_ref()
.context("could not find service container")?
.name
.clone();
let periphery = periphery_client(&server).await?;
let stream = periphery
.execute_container_exec(
container,
shell,
command,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
.context(
"Failed to execute container exec command on periphery",
)?;
.context("Failed to execute command on Terminal")?;
Ok(axum::body::Body::from_stream(stream))
}

View File

@@ -34,6 +34,7 @@ mod service_user;
mod stack;
mod sync;
mod tag;
mod terminal;
mod user;
mod user_group;
mod variable;
@@ -89,10 +90,6 @@ pub enum WriteRequest {
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
UpdateServerPublicKey(UpdateServerPublicKey),
RotateServerKeys(RotateServerKeys),
@@ -168,6 +165,12 @@ pub enum WriteRequest {
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
// ==== TERMINAL ====
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),

View File

@@ -1,10 +1,9 @@
use anyhow::Context;
use formatting::{bold, format_serror};
use futures_util::{StreamExt, stream::FuturesUnordered};
use komodo_client::{
api::write::*,
entities::{
NoData, Operation,
Operation,
permission::PermissionLevel,
server::{Server, ServerInfo},
to_docker_compatible_name,
@@ -17,7 +16,6 @@ use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client,
query::get_all_tags,
update::{add_update, make_update, update_update},
},
permission::get_check_permissions,
@@ -189,162 +187,6 @@ impl Resolve<WriteArgs> for CreateNetwork {
}
}
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(
"CreateTerminal",
skip_all,
fields(
operator = user.id,
server = self.server,
terminal = self.name,
command = self.command,
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateTerminal {
name: self.name,
command: self.command,
recreate: self.recreate,
})
.await
.context("Failed to create terminal on Periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(
"DeleteTerminal",
skip_all,
fields(
operator = user.id,
server = self.server,
terminal = self.terminal,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(
"DeleteAllTerminals",
skip_all,
fields(
operator = user.id,
server = self.server,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for BatchDeleteAllTerminals {
#[instrument(
"BatchDeleteAllTerminals",
skip_all,
fields(
operator = user.id,
query = format!("{:?}", self.query),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(
self.query, user, &all_tags,
)
.await?
.into_iter()
.map(|server| async move {
let res = async {
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
anyhow::Ok(())
}
.await;
if let Err(e) = res {
warn!(
"Failed to delete all terminals on {} ({}) | {e:#}",
server.name, server.id
)
}
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for UpdateServerPublicKey {

View File

@@ -0,0 +1,309 @@
use anyhow::Context as _;
use futures_util::{StreamExt as _, stream::FuturesUnordered};
use komodo_client::{
api::write::*,
entities::{
NoData, deployment::Deployment, permission::PermissionLevel,
server::Server, stack::Stack, terminal::TerminalTarget,
user::User,
},
};
use periphery_client::api;
use reqwest::StatusCode;
use resolver_api::Resolve;
use serror::AddStatusCode;
use crate::{
helpers::{
periphery_client,
query::get_all_tags,
terminal::{
create_container_terminal_inner,
get_deployment_periphery_container,
get_stack_service_periphery_container,
},
},
permission::get_check_permissions,
resource,
};
use super::WriteArgs;
//
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(
"CreateTerminal",
skip_all,
fields(
operator = user.id,
terminal = self.name,
target = format!("{:?}", self.target),
command = self.command,
mode = format!("{:?}", self.mode),
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
match self.target.clone() {
TerminalTarget::Server { server } => {
let server = server
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
create_server_terminal(self, server, user).await?;
}
TerminalTarget::Container { server, container } => {
create_container_terminal(self, server, container, user)
.await?;
}
TerminalTarget::Stack { stack, service } => {
let service = service
.context("Must provide 'target.params.service'")
.status_code(StatusCode::BAD_REQUEST)?;
create_stack_service_terminal(self, stack, service, user)
.await?;
}
TerminalTarget::Deployment { deployment } => {
create_deployment_terminal(self, deployment, user).await?;
}
};
Ok(NoData {})
}
}
async fn create_server_terminal(
CreateTerminal {
name,
command,
recreate,
target: _,
mode: _,
}: CreateTerminal,
server: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::CreateServerTerminal {
name,
command,
recreate,
})
.await
.context("Failed to create Server Terminal on Periphery")?;
Ok(())
}
async fn create_container_terminal(
req: CreateTerminal,
server: String,
container: String,
user: &User,
) -> anyhow::Result<()> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_stack_service_terminal(
req: CreateTerminal,
stack: String,
service: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
create_container_terminal_inner(req, &periphery, container).await
}
async fn create_deployment_terminal(
req: CreateTerminal,
deployment: String,
user: &User,
) -> anyhow::Result<()> {
let (_, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
create_container_terminal_inner(req, &periphery, container).await
}
//
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(
"DeleteTerminal",
skip_all,
fields(
operator = user.id,
target = format!("{:?}", self.target),
terminal = self.terminal,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = match &self.target {
TerminalTarget::Server { server } => {
let server = server
.as_ref()
.context("Must provide 'target.params.server'")
.status_code(StatusCode::BAD_REQUEST)?;
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Container { server, .. } => {
get_check_permissions::<Server>(
server,
user,
PermissionLevel::Read.terminal(),
)
.await?
}
TerminalTarget::Stack { stack, .. } => {
let server = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
TerminalTarget::Deployment { deployment } => {
let server = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?
.config
.server_id;
resource::get::<Server>(&server).await?
}
};
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteTerminal {
target: self.target,
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(
"DeleteAllTerminals",
skip_all,
fields(
operator = user.id,
server = self.server,
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
Ok(NoData {})
}
}
//
impl Resolve<WriteArgs> for BatchDeleteAllTerminals {
#[instrument(
"BatchDeleteAllTerminals",
skip_all,
fields(
operator = user.id,
query = format!("{:?}", self.query),
)
)]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> Result<Self::Response, Self::Error> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.terminal(),
&all_tags,
)
.await?
.into_iter()
.map(|server| async move {
let res = async {
let periphery = periphery_client(&server).await?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on Periphery")?;
anyhow::Ok(())
}
.await;
if let Err(e) = res {
warn!(
"Failed to delete all terminals on {} ({}) | {e:#}",
server.name, server.id
)
}
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await;
Ok(NoData {})
}
}

View File

@@ -1,7 +1,7 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use jsonwebtoken::{DecodingKey, Validation, decode};
use jsonwebtoken::dangerous::insecure_decode;
use komodo_client::entities::{
config::core::{CoreConfig, OauthCredentials},
random_string,
@@ -138,15 +138,8 @@ impl GoogleOauthClient {
&self,
id_token: &str,
) -> anyhow::Result<GoogleUser> {
let mut v = Validation::new(Default::default());
v.insecure_disable_signature_validation();
v.validate_aud = false;
let res = decode::<GoogleUser>(
id_token,
&DecodingKey::from_secret(b""),
&v,
)
.context("failed to decode google id token")?;
let res = insecure_decode::<GoogleUser>(id_token)
.context("failed to decode google id token")?;
Ok(res.claims)
}

View File

@@ -31,7 +31,7 @@ use transport::{
},
channel::{BufferedReceiver, Sender, buffered_channel},
websocket::{
Websocket, WebsocketMessage, WebsocketReceiver as _,
Websocket, WebsocketReceiver as _, WebsocketReceiverExt,
WebsocketSender as _,
},
};
@@ -367,8 +367,22 @@ impl PeripheryConnection {
let forward_writes = async {
loop {
let Ok(message) = receiver.recv().await else {
break;
let message = match tokio::time::timeout(
Duration::from_secs(5),
receiver.recv(),
)
.await
{
Ok(Ok(message)) => message,
Ok(Err(_)) => break,
// Handle sending Ping
Err(_) => {
if let Err(e) = ws_write.ping().await {
self.set_error(e).await;
break;
}
continue;
}
};
match ws_write.send(message.into_bytes()).await {
Ok(_) => receiver.clear_buffer(),
@@ -385,19 +399,13 @@ impl PeripheryConnection {
let handle_reads = async {
loop {
match ws_read.recv().await {
Ok(WebsocketMessage::Message(message)) => {
self.handle_incoming_message(message).await
}
Ok(WebsocketMessage::Close(_))
| Ok(WebsocketMessage::Closed) => {
self.set_error(anyhow!("Connection closed")).await;
break;
}
match ws_read.recv_message().await {
Ok(message) => self.handle_incoming_message(message).await,
Err(e) => {
self.set_error(e).await;
break;
}
};
}
}
// Cancel again if not already
cancel.cancel();
@@ -410,15 +418,8 @@ impl PeripheryConnection {
pub async fn handle_incoming_message(
&self,
message: EncodedTransportMessage,
message: TransportMessage,
) {
let message: TransportMessage = match message.decode() {
Ok(res) => res,
Err(e) => {
warn!("Failed to parse Message bytes | {e:#}");
return;
}
};
match message {
TransportMessage::Response(data) => {
match data.decode().map(ResponseMessage::into_inner) {

View File

@@ -30,6 +30,7 @@ pub mod matcher;
pub mod procedure;
pub mod prune;
pub mod query;
pub mod terminal;
pub mod update;
// pub mod resource;

View File

@@ -10,6 +10,7 @@ use komodo_client::{
action::Action,
build::Build,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
stack::Stack,
@@ -1311,6 +1312,7 @@ async fn extend_batch_exection<E: ExtendBatch>(
pattern,
Default::default(),
procedure_user(),
PermissionLevel::Read.into(),
&[],
)
.await?

View File

@@ -0,0 +1,345 @@
use anyhow::{Context as _, anyhow};
use komodo_client::{
api::{terminal::InitTerminal, write::CreateTerminal},
entities::{
deployment::Deployment,
permission::PermissionLevel,
server::Server,
stack::Stack,
terminal::{ContainerTerminalMode, TerminalTarget},
user::User,
},
};
use periphery_client::api;
use crate::{
helpers::periphery_client, periphery::PeripheryClient,
permission::get_check_permissions, resource,
state::stack_status_cache,
};
pub async fn setup_target_for_user(
target: TerminalTarget,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
match target {
TerminalTarget::Server { server } => {
setup_server_target_for_user(
server.context("Missing 'target.params.server'")?,
terminal,
init,
user,
)
.await
}
TerminalTarget::Container { server, container } => {
setup_container_target_for_user(
server, container, terminal, init, user,
)
.await
}
TerminalTarget::Stack { stack, service } => {
setup_stack_service_target_for_user(
stack,
service.context("Missing 'target.params.service'")?,
terminal,
init,
user,
)
.await
}
TerminalTarget::Deployment { deployment } => {
setup_deployment_target_for_user(
deployment, terminal, init, user,
)
.await
}
}
}
async fn setup_server_target_for_user(
server: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let terminal = terminal.unwrap_or_else(|| {
init
.as_ref()
.and_then(|init| init.command.clone())
.unwrap_or_else(|| String::from("term"))
});
let periphery = periphery_client(&server).await?;
if let Some(init) = init {
periphery
.request(api::terminal::CreateServerTerminal {
name: terminal.clone(),
command: init.command,
recreate: init.recreate,
})
.await
.context("Failed to create Server Terminal on Periphery")?;
}
Ok((
TerminalTarget::Server {
server: Some(server.id),
},
terminal,
periphery,
))
}
async fn setup_container_target_for_user(
server: String,
container: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let server = get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
let periphery = periphery_client(&server).await?;
let target = TerminalTarget::Container {
server: server.id,
container: container.clone(),
};
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
async fn setup_stack_service_target_for_user(
stack: String,
service: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let (target, periphery, container) =
get_stack_service_periphery_container(&stack, &service, user)
.await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
async fn setup_deployment_target_for_user(
deployment: String,
terminal: Option<String>,
init: Option<InitTerminal>,
user: &User,
) -> anyhow::Result<(TerminalTarget, String, PeripheryClient)> {
let (target, periphery, container) =
get_deployment_periphery_container(&deployment, user).await?;
let terminal = default_container_terminal_name(
terminal,
&container,
init.as_ref(),
);
if let Some(init) = init {
create_container_terminal_inner(
CreateTerminal {
name: terminal.clone(),
target: target.clone(),
command: init.command,
mode: init.mode,
recreate: init.recreate,
},
&periphery,
container,
)
.await?;
}
Ok((target, terminal, periphery))
}
fn default_container_terminal_name(
terminal: Option<String>,
container: &str,
init: Option<&InitTerminal>,
) -> String {
terminal.unwrap_or_else(|| {
init
.as_ref()
.map(|init| {
init.command.clone().unwrap_or_else(|| {
init.mode.unwrap_or_default().as_ref().to_string()
})
})
.unwrap_or_else(|| container.to_string())
})
}
pub async fn create_container_terminal_inner(
CreateTerminal {
name,
target,
command,
mode,
recreate,
}: CreateTerminal,
periphery: &PeripheryClient,
container: String,
) -> anyhow::Result<()> {
match mode.unwrap_or_default() {
ContainerTerminalMode::Exec => periphery
.request(periphery_client::api::terminal::CreateContainerExecTerminal {
name,
target,
container,
command,
recreate,
})
.await
.context(
"Failed to create Container Exec Terminal on Periphery",
)?,
ContainerTerminalMode::Attach => periphery
.request(periphery_client::api::terminal::CreateContainerAttachTerminal {
name,
target,
container,
recreate,
})
.await
.context(
"Failed to create Container Attach Terminal on Periphery",
)?,
};
Ok(())
}
pub async fn get_stack_service_periphery_container(
stack: &str,
service: &str,
user: &User,
) -> anyhow::Result<(TerminalTarget, PeripheryClient, String)> {
let stack = get_check_permissions::<Stack>(
stack,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let server =
resource::get::<Server>(&stack.config.server_id).await?;
let Some(status) = stack_status_cache().get(&stack.id).await else {
return Err(anyhow!("Could not get Stack status"));
};
let container = status
.curr
.services
.iter()
.find(|s| s.service.as_str() == service)
.with_context(|| {
format!("Did not find Stack service matching {service}")
})?
.container
.as_ref()
.with_context(|| {
format!("Did not find container for Stack service {service}")
})?
.name
.clone();
let periphery = periphery_client(&server).await?;
Ok((
TerminalTarget::Stack {
stack: stack.id,
service: Some(service.to_string()),
},
periphery,
container,
))
}
pub async fn get_deployment_periphery_container(
deployment: &str,
user: &User,
) -> anyhow::Result<(TerminalTarget, PeripheryClient, String)> {
let deployment = get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
let periphery = periphery_client(&server).await?;
let container = deployment.name.clone();
Ok((
TerminalTarget::Deployment {
deployment: deployment.id,
},
periphery,
container,
))
}

View File

@@ -43,6 +43,7 @@ async fn app() -> anyhow::Result<()> {
logger::init(&config.logging)?;
let startup_span = info_span!("CoreStartup");
async {
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));

View File

@@ -2,7 +2,8 @@ use std::collections::HashMap;
use anyhow::Context;
use komodo_client::entities::{
resource::ResourceQuery, server::Server, user::User,
permission::PermissionLevel, resource::ResourceQuery,
server::Server, user::system_user,
};
use crate::resource;
@@ -33,10 +34,8 @@ async fn get_all_servers_map()
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),
&User {
admin: true,
..Default::default()
},
system_user(),
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -21,6 +21,7 @@ pub mod terminal;
#[derive(Debug)]
pub struct PeripheryClient {
/// Usually the server id
pub id: String,
pub responses: Arc<ResponseChannels>,
pub terminals: Arc<TerminalChannels>,

View File

@@ -7,12 +7,11 @@ use std::{
use anyhow::Context;
use cache::CloneCache;
use futures_util::Stream;
use komodo_client::api::write::TerminalRecreateMode;
use komodo_client::entities::terminal::{
TerminalStdinMessageVariant, TerminalTarget,
};
use periphery_client::{
api::terminal::{
ConnectContainerAttach, ConnectContainerExec, ConnectTerminal,
END_OF_OUTPUT, ExecuteContainerExec, ExecuteTerminal,
},
api::terminal::{ConnectTerminal, END_OF_OUTPUT, ExecuteTerminal},
transport::EncodedTransportMessage,
};
use transport::channel::{Receiver, Sender};
@@ -33,9 +32,10 @@ impl PeripheryClient {
pub async fn connect_terminal(
&self,
terminal: String,
target: TerminalTarget,
) -> anyhow::Result<ConnectTerminalResponse> {
tracing::trace!(
"request | type: ConnectTerminal | terminal name: {terminal}",
"request | type: ConnectTerminal | Terminal: {terminal} | Target: {target:?}",
);
let connection =
@@ -44,7 +44,7 @@ impl PeripheryClient {
)?;
let channel = self
.request(ConnectTerminal { terminal })
.request(ConnectTerminal { terminal, target })
.await
.context("Failed to create terminal connection")?;
@@ -53,94 +53,13 @@ impl PeripheryClient {
connection
.sender
.send_terminal(channel, Ok(Vec::with_capacity(17))) // 16 bytes uuid + 1 EncodedResponse
.send_terminal(
channel,
Ok(vec![TerminalStdinMessageVariant::Begin.as_byte()]),
)
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok(ConnectTerminalResponse {
channel,
sender: connection.sender.clone(),
receiver,
})
}
#[instrument("ConnectContainerExec", skip(self), fields(server_id = self.id))]
pub async fn connect_container_exec(
&self,
container: String,
shell: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<ConnectTerminalResponse> {
tracing::trace!(
"request | type: ConnectContainerExec | container name: {container} | shell: {shell}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel = self
.request(ConnectContainerExec {
container,
shell,
recreate,
})
.await
.context("Failed to create container exec connection")?;
let (sender, receiver) = transport::channel::channel();
connection.terminals.insert(channel, sender).await;
connection
.sender
.send_terminal(channel, Ok(Vec::with_capacity(17)))
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok(ConnectTerminalResponse {
channel,
sender: connection.sender.clone(),
receiver,
})
}
#[instrument("ConnectContainerAttach", skip(self), fields(server_id = self.id))]
pub async fn connect_container_attach(
&self,
container: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<ConnectTerminalResponse> {
tracing::trace!(
"request | type: ConnectContainerAttach | container name: {container}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel = self
.request(ConnectContainerAttach {
container,
recreate,
})
.await
.context("Failed to create container attach connection")?;
let (sender, receiver) = transport::channel::channel();
connection.terminals.insert(channel, sender).await;
connection
.sender
.send_terminal(channel, Ok(Vec::with_capacity(17)))
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
"Failed to send TerminalMessage Begin byte to begin forwarding.",
)?;
Ok(ConnectTerminalResponse {
@@ -166,13 +85,14 @@ impl PeripheryClient {
#[instrument("ExecuteTerminal", skip(self), fields(server_id = self.id))]
pub async fn execute_terminal(
&self,
target: TerminalTarget,
terminal: String,
command: String,
) -> anyhow::Result<
impl Stream<Item = anyhow::Result<Vec<u8>>> + 'static,
> {
trace!(
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
"sending request | type: ExecuteTerminal | {target:?} | terminal name: {terminal} | command: {command}",
);
let connection =
@@ -181,65 +101,10 @@ impl PeripheryClient {
)?;
let channel = self
.request(ExecuteTerminal { terminal, command })
.await
.context("Failed to create execute terminal connection")?;
let (terminal_sender, terminal_receiver) =
transport::channel::channel();
connection.terminals.insert(channel, terminal_sender).await;
connection
.sender
.send_terminal(channel, Ok(Vec::with_capacity(17)))
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok(ReceiverStream {
channel,
receiver: terminal_receiver,
channels: connection.terminals.clone(),
})
}
/// Executes command on specified container,
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
/// sentinal value as the expected final line of the stream.
///
/// Example final line:
/// ```text
/// __KOMODO_EXIT_CODE:0
/// ```
///
/// This means the command exited with code 0 (success).
///
/// If this value is NOT the final item before stream closes, it means
/// the container shell exited mid command, before giving status. Example: running `exit`.
#[instrument("ExecuteContainerExec", skip(self), fields(server_id = self.id))]
pub async fn execute_container_exec(
&self,
container: String,
shell: String,
command: String,
recreate: TerminalRecreateMode,
) -> anyhow::Result<ReceiverStream> {
tracing::trace!(
"sending request | type: ExecuteContainerExec | container: {container} | shell: {shell} | command: {command}",
);
let connection =
periphery_connections().get(&self.id).await.with_context(
|| format!("No connection found for server {}", self.id),
)?;
let channel = self
.request(ExecuteContainerExec {
container,
shell,
.request(ExecuteTerminal {
terminal,
target,
command,
recreate,
})
.await
.context("Failed to create execute terminal connection")?;
@@ -248,12 +113,16 @@ impl PeripheryClient {
transport::channel::channel();
connection.terminals.insert(channel, terminal_sender).await;
// Trigger forwarding to begin now that forwarding channel is ready.
// This is required to not miss messages.
connection
.sender
.send_terminal(channel, Ok(Vec::with_capacity(17)))
.await?;
.send_terminal(
channel,
Ok(vec![TerminalStdinMessageVariant::Begin.as_byte()]),
)
.await
.context(
"Failed to send TerminalTrigger to begin forwarding.",
)?;
Ok(ReceiverStream {
channel,

View File

@@ -1,10 +1,12 @@
use std::collections::HashSet;
use std::collections::HashMap;
use anyhow::{Context, anyhow};
use database::mongo_indexed::doc;
use database::mungos::find::find_collect;
use database::{bson::Document, mongo_indexed::doc};
use futures_util::{FutureExt, future::BoxFuture};
use indexmap::IndexSet;
use komodo_client::entities::ResourceTarget;
use komodo_client::entities::permission::SpecificPermission;
use komodo_client::{
api::read::GetPermission,
entities::{
@@ -15,6 +17,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::resource::list_all_resources;
use crate::{
api::read::ReadArgs,
config::core_config,
@@ -162,68 +165,186 @@ pub fn get_user_permission_on_resource<'a, T: KomodoResource>(
})
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
pub async fn get_resource_ids_for_user<T: KomodoResource>(
pub async fn list_resources_for_user<T: KomodoResource>(
filters: impl Into<Option<Document>>,
user: &User,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
// Check admin
if user.admin {
return list_all_resources::<T>(filters).await;
}
let mut base = PermissionLevelAndSpecifics {
level: if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
},
specific: Default::default(),
};
// 'transparent_mode' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
let resource_type = T::resource_type();
// Check user 'all' on variant
if let Some(permission) = user.all.get(&resource_type).cloned()
&& permission.level > PermissionLevel::None
{
return Ok(None);
if let Some(all_permission) = user.all.get(&resource_type) {
base.elevate(all_permission);
// 'user.all' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(permission) = group.all.get(&resource_type).cloned()
&& permission.level > PermissionLevel::None
{
return Ok(None);
if let Some(all_permission) = group.all.get(&resource_type) {
base.elevate(all_permission);
// 'group.all' early return.
if base.fulfills(&permission) {
return list_all_resources::<T>(filters).await;
}
}
}
let (base, perms) = tokio::try_join!(
// Get any resources with non-none base permission,
find_collect(
T::coll(),
doc! { "$or": [
{ "base_permission": { "$in": ["Read", "Execute", "Write"] } },
{ "base_permission.level": { "$in": ["Read", "Execute", "Write"] } }
] },
None,
)
.map(|res| res.with_context(|| format!(
"failed to query {resource_type} on db"
))),
let (all, permissions) = tokio::try_join!(
list_all_resources::<T>(filters),
// And any ids using the permissions table
find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
None,
)
.map(|res| res.context("failed to query permissions on db"))
)?;
// Add specific ids
let ids = perms
let permission_by_resource_id = permissions
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// Chain in the ones with non-None base permissions
.chain(base.into_iter().map(|res| res.id))
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
.map(|perm| {
(
perm.resource_target.extract_variant_id().1.to_string(),
perm,
)
})
.collect::<HashMap<_, _>>();
Ok(Some(ids.into_iter().collect()))
let mut resources = Vec::new();
let mut additional_specific_cache =
HashMap::<ResourceTarget, IndexSet<SpecificPermission>>::new();
for resource in all {
let mut perm = if let Some(perm) =
permission_by_resource_id.get(&resource.id)
{
base.join(perm)
} else {
base.clone()
};
// Check if already fulfils
if perm.fulfills(&permission) {
resources.push(resource);
continue;
}
// Also check if fulfills with inherited specific
let additional_target = if let Some(additional_target) =
T::inherit_specific_permissions_from(&resource)
&& !additional_target.is_empty()
{
additional_target
} else {
continue;
};
let additional_specific = match additional_specific_cache
.get(&additional_target)
.cloned()
{
Some(specific) => specific,
None => {
let specific = GetPermission {
target: additional_target.clone(),
}
.resolve(&ReadArgs { user: user.clone() })
.await
.map_err(|e| e.error)
.context(
"failed to get user permission on additional target",
)?
.specific;
additional_specific_cache
.insert(additional_target, specific.clone());
specific
}
};
perm.specific.extend(additional_specific);
if perm.fulfills(&permission) {
resources.push(resource);
}
}
Ok(resources)
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
pub async fn list_resource_ids_for_user<T: KomodoResource>(
filters: Option<Document>,
user: &User,
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin
if user.admin {
return Ok(None);
}
let mut base = PermissionLevelAndSpecifics {
level: if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
},
specific: Default::default(),
};
// 'transparent_mode' early return.
if base.fulfills(&permission) {
return Ok(None);
}
let resource_type = T::resource_type();
if let Some(all) = user.all.get(&resource_type) {
base.elevate(all);
// 'user.all' early return.
if base.fulfills(&permission) {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(all) = group.all.get(&resource_type) {
base.elevate(all);
// 'group.all' early return.
if base.fulfills(&permission) {
return Ok(None);
}
}
}
let ids = list_resources_for_user::<T>(filters, user, permission)
.await?
.into_iter()
.map(|resource| resource.id)
.collect();
Ok(Some(ids))
}

View File

@@ -21,7 +21,10 @@ use komodo_client::{
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
komodo_timestamp,
permission::{PermissionLevel, SpecificPermission},
permission::{
PermissionLevel, PermissionLevelAndSpecifics,
SpecificPermission,
},
resource::{AddFilters, Resource, ResourceQuery},
tag::Tag,
to_general_name,
@@ -43,7 +46,7 @@ use crate::{
query::{get_tag, id_or_name_filter},
update::{add_update, make_update},
},
permission::{get_check_permissions, get_resource_ids_for_user},
permission::{get_check_permissions, list_resources_for_user},
state::db_client,
};
@@ -251,30 +254,31 @@ pub async fn get<T: KomodoResource>(
// LIST
// ======
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
pub async fn get_resource_object_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<ObjectId>>> {
get_resource_ids_for_user::<T>(user).await.map(|ids| {
ids.map(|ids| {
ids
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect()
})
/// Get full resource list with no permissions check.
pub async fn list_all_resources<T: KomodoResource>(
filters: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
find_collect(
T::coll(),
filters,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.with_context(|| {
format!("Failed to pull {}s from mongo", T::resource_type())
})
}
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
// permissions: PermissionLevelAndSpecifics,
permission: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_for_user_using_document::<T>(filters, user).await
list_for_user_using_document::<T>(filters, user, permission).await
}
// // pub async fn list_for_user_using_pattern<T: KomodoResource>(
@@ -300,9 +304,9 @@ pub async fn list_for_user<T: KomodoResource>(
pub async fn list_for_user_using_document<T: KomodoResource>(
filters: Document,
user: &User,
// permissions: PermissionLevelAndSpecifics,
permission: PermissionLevelAndSpecifics,
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_document::<T>(filters, user)
let list = list_resources_for_user::<T>(filters, user, permission)
.await?
.into_iter()
.map(|resource| T::to_list_item(resource));
@@ -321,10 +325,12 @@ pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permission: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
let resources =
list_full_for_user::<T>(query, user, all_tags).await?;
list_full_for_user::<T>(query, user, permission, all_tags)
.await?;
let patterns = parse_string_list(pattern);
let mut names = HashSet::<String>::new();
@@ -360,32 +366,13 @@ pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_full_for_user_using_document::<T>(filters, user).await
}
pub async fn list_full_for_user_using_document<T: KomodoResource>(
mut filters: Document,
user: &User,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
if let Some(ids) =
get_resource_object_ids_for_user::<T>(user).await?
{
filters.insert("_id", doc! { "$in": ids });
}
find_collect(
T::coll(),
filters,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.with_context(|| {
format!("Failed to pull {}s from mongo", T::resource_type())
})
list_resources_for_user::<T>(filters, user, permissions).await
}
pub type IdResourceMap<T> = HashMap<
@@ -477,11 +464,13 @@ pub async fn create<T: KomodoResource>(
// Ensure an existing resource with same name doesn't already exist
// The database indexing also ensures this but doesn't give a good error message.
if list_full_for_user::<T>(Default::default(), system_user(), &[])
if T::coll()
.find_one(doc! { "name": &name })
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
.any(|r| r.name == name)
.context(
"Failed to check existing resources for duplicate name check",
)?
.is_some()
{
return Err(
anyhow!("Resource with name '{}' already exists", name)

View File

@@ -1,106 +0,0 @@
use axum::{
extract::{Query, WebSocketUpgrade, ws::Message},
response::IntoResponse,
};
use futures_util::SinkExt;
use komodo_client::{
api::{
terminal::{
ConnectContainerAttachQuery, ConnectContainerExecQuery,
},
write::TerminalRecreateMode,
},
entities::{permission::PermissionLevel, server::Server},
};
use crate::permission::get_check_permissions;
#[instrument("ConnectContainerExec", skip(ws))]
pub async fn exec(
Query(ConnectContainerExecQuery {
server,
container,
shell,
recreate,
}): Query<ConnectContainerExecQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((mut client_socket, user)) =
super::user_ws_login(socket).await
else {
return;
};
let server = match get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
super::handle_container_exec_terminal(
client_socket,
&server,
container,
shell,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}
#[instrument("ConnectContainerAttach", skip(ws))]
pub async fn attach(
Query(ConnectContainerAttachQuery {
server,
container,
recreate,
}): Query<ConnectContainerAttachQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((mut client_socket, user)) =
super::user_ws_login(socket).await
else {
return;
};
let server = match get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
super::handle_container_attach_terminal(
client_socket,
&server,
container,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}

View File

@@ -1,133 +0,0 @@
use axum::{
extract::{Query, WebSocketUpgrade, ws::Message},
response::IntoResponse,
};
use futures_util::SinkExt;
use komodo_client::{
api::{
terminal::{
ConnectDeploymentAttachQuery, ConnectDeploymentExecQuery,
},
write::TerminalRecreateMode,
},
entities::{
deployment::Deployment, permission::PermissionLevel,
server::Server,
},
};
use crate::{permission::get_check_permissions, resource::get};
#[instrument("ConnectDeploymentExec", skip(ws))]
pub async fn exec(
Query(ConnectDeploymentExecQuery {
deployment,
shell,
recreate,
}): Query<ConnectDeploymentExecQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((mut client_socket, user)) =
super::user_ws_login(socket).await
else {
return;
};
let deployment = match get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(deployment) => deployment,
Err(e) => {
debug!("could not get deployment | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
let server =
match get::<Server>(&deployment.config.server_id).await {
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
super::handle_container_exec_terminal(
client_socket,
&server,
deployment.name,
shell,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}
#[instrument("ConnectDeploymentAttach", skip(ws))]
pub async fn attach(
Query(ConnectDeploymentAttachQuery {
deployment,
recreate,
}): Query<ConnectDeploymentAttachQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((mut client_socket, user)) =
super::user_ws_login(socket).await
else {
return;
};
let deployment = match get_check_permissions::<Deployment>(
&deployment,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(deployment) => deployment,
Err(e) => {
debug!("could not get deployment | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
let server =
match get::<Server>(&deployment.config.server_id).await {
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
super::handle_container_attach_terminal(
client_socket,
&server,
deployment.name,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}

View File

@@ -1,8 +1,6 @@
use crate::{
auth::{auth_api_key_check_enabled, auth_jwt_check_enabled},
helpers::query::get_user,
periphery::{PeripheryClient, terminal::ConnectTerminalResponse},
state::periphery_connections,
};
use anyhow::anyhow;
use axum::{
@@ -10,19 +8,9 @@ use axum::{
extract::ws::{self, WebSocket},
routing::get,
};
use bytes::Bytes;
use futures_util::{SinkExt, StreamExt};
use komodo_client::{
api::write::TerminalRecreateMode,
entities::{server::Server, user::User},
ws::WsLoginMessage,
};
use periphery_client::api::terminal::DisconnectTerminal;
use tokio_util::sync::CancellationToken;
use futures_util::SinkExt;
use komodo_client::{entities::user::User, ws::WsLoginMessage};
mod container;
mod deployment;
mod stack;
mod terminal;
mod update;
@@ -33,12 +21,6 @@ pub fn router() -> Router {
// User facing
.route("/update", get(update::handler))
.route("/terminal", get(terminal::handler))
.route("/container/terminal", get(container::exec))
.route("/container/terminal/attach", get(container::attach))
.route("/deployment/terminal", get(deployment::exec))
.route("/deployment/terminal/attach", get(deployment::attach))
.route("/stack/terminal", get(stack::exec))
.route("/stack/terminal/attach", get(stack::attach))
}
async fn user_ws_login(
@@ -129,221 +111,3 @@ async fn check_user_valid(user_id: &str) -> anyhow::Result<User> {
}
Ok(user)
}
async fn handle_container_exec_terminal(
mut client_socket: WebSocket,
server: &Server,
container: String,
shell: String,
recreate: TerminalRecreateMode,
) {
let periphery = match crate::helpers::periphery_client(server).await
{
Ok(periphery) => periphery,
Err(e) => {
debug!("couldn't get periphery | {e:#}");
let _ = client_socket
.send(ws::Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connecting to periphery container exec websocket");
let response = match periphery
.connect_container_exec(container, shell, recreate)
.await
{
Ok(ws) => ws,
Err(e) => {
debug!(
"Failed connect to periphery container exec websocket | {e:#}"
);
let _ = client_socket
.send(ws::Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connected to periphery container exec websocket");
forward_ws_channel(periphery, client_socket, response).await
}
async fn handle_container_attach_terminal(
mut client_socket: WebSocket,
server: &Server,
container: String,
recreate: TerminalRecreateMode,
) {
let periphery = match crate::helpers::periphery_client(server).await
{
Ok(periphery) => periphery,
Err(e) => {
debug!("couldn't get periphery | {e:#}");
let _ = client_socket
.send(ws::Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connecting to periphery container exec websocket");
let response = match periphery
.connect_container_attach(container, recreate)
.await
{
Ok(ws) => ws,
Err(e) => {
debug!(
"Failed connect to periphery container attach websocket | {e:#}"
);
let _ = client_socket
.send(ws::Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connected to periphery container attach websocket");
forward_ws_channel(periphery, client_socket, response).await
}
async fn forward_ws_channel(
periphery: PeripheryClient,
client_socket: axum::extract::ws::WebSocket,
ConnectTerminalResponse {
channel,
sender: periphery_sender,
receiver: mut periphery_receiver,
}: ConnectTerminalResponse,
) {
let (mut client_send, mut client_receive) = client_socket.split();
let cancel = CancellationToken::new();
periphery_receiver.set_cancel(cancel.clone());
trace!("starting ws exchange");
let core_to_periphery = async {
loop {
let client_recv_res = tokio::select! {
res = client_receive.next() => res,
_ = cancel.cancelled() => {
trace!("core to periphery read: cancelled from inside");
break;
}
};
match client_recv_res {
Some(Ok(ws::Message::Binary(bytes))) => {
if let Err(e) = periphery_sender
.send_terminal(channel, Ok(bytes.into()))
.await
{
debug!("Failed to send terminal message | {e:?}",);
cancel.cancel();
break;
};
}
Some(Ok(ws::Message::Text(text))) => {
let bytes: Bytes = text.into();
if let Err(e) = periphery_sender
.send_terminal(channel, Ok(bytes.into()))
.await
{
debug!("Failed to send terminal message | {e:?}",);
cancel.cancel();
break;
};
}
Some(Ok(ws::Message::Close(_frame))) => {
let _ = periphery_sender
.send_terminal(
channel,
Err(anyhow!("Client disconnected")),
)
.await;
cancel.cancel();
break;
}
Some(Err(_e)) => {
let _ = periphery_sender
.send_terminal(
channel,
Err(anyhow!("Client disconnected")),
)
.await;
cancel.cancel();
break;
}
None => {
let _ = periphery_sender
.send_terminal(
channel,
Err(anyhow!("Client disconnected")),
)
.await;
cancel.cancel();
break;
}
// Ignore
Some(Ok(_)) => {}
}
}
};
let periphery_to_core = async {
loop {
// Already adheres to cancellation token
match periphery_receiver.recv().await {
Ok(Ok(bytes)) => {
if let Err(e) =
client_send.send(ws::Message::Binary(bytes.into())).await
{
debug!("{e:?}");
cancel.cancel();
break;
};
}
Ok(Err(e)) => {
let _ = client_send
.send(ws::Message::Text(format!("{e:#}").into()))
.await;
let _ = client_send.close().await;
cancel.cancel();
break;
}
Err(_) => {
let _ =
client_send.send(ws::Message::text("STREAM EOF")).await;
cancel.cancel();
break;
}
}
}
};
tokio::join!(core_to_periphery, periphery_to_core);
// Cleanup
if let Err(e) =
periphery.request(DisconnectTerminal { channel }).await
{
warn!(
"Failed to disconnect Periphery terminal forwarding | {e:#}",
)
}
if let Some(connection) =
periphery_connections().get(&periphery.id).await
{
connection.terminals.remove(&channel).await;
}
}

View File

@@ -1,153 +0,0 @@
use axum::{
extract::{Query, WebSocketUpgrade, ws::Message},
response::IntoResponse,
};
use futures_util::SinkExt;
use komodo_client::{
api::{
terminal::{ConnectStackAttachQuery, ConnectStackExecQuery},
write::TerminalRecreateMode,
},
entities::{
permission::PermissionLevel, server::Server, stack::Stack,
},
};
use crate::{
permission::get_check_permissions, resource::get,
state::stack_status_cache,
};
#[instrument("ConnectStackExec", skip(ws))]
pub async fn exec(
Query(ConnectStackExecQuery {
stack,
service,
shell,
recreate,
}): Query<ConnectStackExecQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((client_socket, server, container)) =
login_get_server_container(socket, &stack, &service).await
else {
return;
};
super::handle_container_exec_terminal(
client_socket,
&server,
container,
shell,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}
#[instrument("ConnectStackAttach", skip(ws))]
pub async fn attach(
Query(ConnectStackAttachQuery {
stack,
service,
recreate,
}): Query<ConnectStackAttachQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(async move |socket| {
let Some((client_socket, server, container)) =
login_get_server_container(socket, &stack, &service).await
else {
return;
};
super::handle_container_attach_terminal(
client_socket,
&server,
container,
recreate.unwrap_or(TerminalRecreateMode::DifferentCommand),
)
.await
})
}
async fn login_get_server_container(
socket: axum::extract::ws::WebSocket,
stack: &str,
service: &str,
) -> Option<(axum::extract::ws::WebSocket, Server, String)> {
let (mut client_socket, user) =
super::user_ws_login(socket).await?;
let stack = match get_check_permissions::<Stack>(
stack,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(stack) => stack,
Err(e) => {
debug!("could not get stack | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return None;
}
};
let server = match get::<Server>(&stack.config.server_id).await {
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return None;
}
};
let Some(status) = stack_status_cache().get(&stack.id).await else {
debug!("could not get stack status");
let _ = client_socket
.send(Message::text(String::from(
"ERROR: could not get stack status",
)))
.await;
let _ = client_socket.close().await;
return None;
};
let container = match status
.curr
.services
.iter()
.find(|s| s.service == service)
.map(|s| s.container.as_ref())
{
Some(Some(container)) => container.name.clone(),
Some(None) => {
let _ = client_socket
.send(Message::text(format!(
"ERROR: Service {service} container could not be found"
)))
.await;
let _ = client_socket.close().await;
return None;
}
None => {
let _ = client_socket
.send(Message::text(format!(
"ERROR: Service {service} could not be found"
)))
.await;
let _ = client_socket.close().await;
return None;
}
};
Some((client_socket, server, container))
}

View File

@@ -1,23 +1,27 @@
use anyhow::anyhow;
use axum::{
extract::{Query, WebSocketUpgrade, ws::Message},
extract::{FromRequestParts, WebSocketUpgrade, ws},
http::request,
response::IntoResponse,
};
use futures_util::SinkExt;
use bytes::Bytes;
use futures_util::{SinkExt, StreamExt as _};
use komodo_client::{
api::terminal::ConnectTerminalQuery,
entities::{permission::PermissionLevel, server::Server},
api::terminal::ConnectTerminalQuery, entities::user::User,
};
use periphery_client::api::terminal::DisconnectTerminal;
use serde::de::DeserializeOwned;
use tokio_util::sync::CancellationToken;
use crate::{
helpers::periphery_client, permission::get_check_permissions,
ws::forward_ws_channel,
helpers::terminal::setup_target_for_user,
periphery::{PeripheryClient, terminal::ConnectTerminalResponse},
state::periphery_connections,
};
#[instrument("ConnectTerminal", skip(ws))]
pub async fn handler(
Query(ConnectTerminalQuery { server, terminal }): Query<
ConnectTerminalQuery,
>,
Qs(query): Qs<ConnectTerminalQuery>,
ws: WebSocketUpgrade,
) -> impl IntoResponse {
ws.on_upgrade(|socket| async move {
@@ -27,52 +31,153 @@ pub async fn handler(
return;
};
let server = match get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await
{
Ok(server) => server,
Err(e) => {
debug!("could not get server | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
let periphery = match periphery_client(&server).await {
Ok(periphery) => periphery,
Err(e) => {
debug!("couldn't get periphery | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connecting to periphery terminal websocket");
let response = match periphery.connect_terminal(terminal).await {
Ok(ws) => ws,
Err(e) => {
debug!("Failed connect to periphery terminal | {e:#}");
let _ = client_socket
.send(Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
trace!("connected to periphery terminal websocket");
let (periphery, response) =
match setup_forwarding(query, &user).await {
Ok(response) => response,
Err(e) => {
let _ = client_socket
.send(ws::Message::text(format!("ERROR: {e:#}")))
.await;
let _ = client_socket.close().await;
return;
}
};
forward_ws_channel(periphery, client_socket, response).await
})
}
async fn setup_forwarding(
ConnectTerminalQuery {
target,
terminal,
init,
}: ConnectTerminalQuery,
user: &User,
) -> anyhow::Result<(PeripheryClient, ConnectTerminalResponse)> {
let (target, terminal, periphery) =
setup_target_for_user(target, terminal, init, user).await?;
let response = periphery.connect_terminal(terminal, target).await?;
Ok((periphery, response))
}
async fn forward_ws_channel(
periphery: PeripheryClient,
client_socket: axum::extract::ws::WebSocket,
ConnectTerminalResponse {
channel,
sender: periphery_sender,
receiver: mut periphery_receiver,
}: ConnectTerminalResponse,
) {
let (mut client_send, mut client_receive) = client_socket.split();
let cancel = CancellationToken::new();
periphery_receiver.set_cancel(cancel.clone());
trace!("starting ws exchange");
let core_to_periphery = async {
loop {
let client_recv_res = tokio::select! {
res = client_receive.next() => res,
_ = cancel.cancelled() => break,
};
let bytes = match client_recv_res {
Some(Ok(ws::Message::Binary(bytes))) => bytes.into(),
Some(Ok(ws::Message::Text(text))) => {
let bytes: Bytes = text.into();
bytes.into()
}
Some(Ok(ws::Message::Close(_frame))) => {
break;
}
Some(Err(_e)) => {
break;
}
None => {
break;
}
// Ignore
Some(Ok(_)) => continue,
};
if let Err(_e) =
periphery_sender.send_terminal(channel, Ok(bytes)).await
{
break;
};
}
cancel.cancel();
let _ = periphery_sender
.send_terminal(channel, Err(anyhow!("Client disconnected")))
.await;
};
let periphery_to_core = async {
loop {
// Already adheres to cancellation token
match periphery_receiver.recv().await {
Ok(Ok(bytes)) => {
if let Err(e) =
client_send.send(ws::Message::Binary(bytes.into())).await
{
debug!("{e:?}");
break;
};
}
Ok(Err(e)) => {
let _ = client_send
.send(ws::Message::text(format!("{e:#}")))
.await;
break;
}
Err(_) => {
let _ =
client_send.send(ws::Message::text("STREAM EOF")).await;
break;
}
}
}
let _ = client_send.close().await;
cancel.cancel();
};
tokio::join!(core_to_periphery, periphery_to_core);
// Cleanup
if let Err(e) =
periphery.request(DisconnectTerminal { channel }).await
{
warn!(
"Failed to disconnect Periphery terminal forwarding | {e:#}",
)
}
if let Some(connection) =
periphery_connections().get(&periphery.id).await
{
connection.terminals.remove(&channel).await;
}
}
pub struct Qs<T>(pub T);
impl<S, T> FromRequestParts<S> for Qs<T>
where
S: Send + Sync,
T: DeserializeOwned,
{
type Rejection = axum::response::Response;
async fn from_request_parts(
parts: &mut request::Parts,
_state: &S,
) -> Result<Self, Self::Rejection> {
let raw = parts.uri.query().unwrap_or_default();
serde_qs::from_str::<T>(raw).map(Qs).map_err(|e| {
axum::response::IntoResponse::into_response((
axum::http::StatusCode::BAD_REQUEST,
format!("Failed to parse request query: {e}"),
))
})
}
}

View File

@@ -35,7 +35,6 @@ serror = { workspace = true, features = ["axum"] }
async_timing_util.workspace = true
derive_variants.workspace = true
resolver_api.workspace = true
run_command.workspace = true
# external
hickory-resolver.workspace = true
serde_yaml_ng.workspace = true
@@ -55,9 +54,10 @@ sysinfo.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
rustls.workspace = true
tokio.workspace = true
serde.workspace = true
bytes.workspace = true
serde.workspace = true
shlex.workspace = true
tokio.workspace = true
axum.workspace = true
clap.workspace = true
envy.workspace = true

View File

@@ -1,6 +1,6 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
FROM rust:1.90.0-bullseye AS builder
FROM rust:1.90.0-trixie AS builder
RUN cargo install cargo-strip
WORKDIR /builder
@@ -14,7 +14,7 @@ COPY ./bin/periphery ./bin/periphery
RUN cargo build -p komodo_periphery --release && cargo strip
# Final Image
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/periphery/starship.toml /starship.toml
COPY ./bin/periphery/debian-deps.sh .

View File

@@ -10,7 +10,7 @@ ARG AARCH64_BINARIES=${BINARIES_IMAGE}-aarch64
FROM ${X86_64_BINARIES} AS x86_64
FROM ${AARCH64_BINARIES} AS aarch64
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/periphery/starship.toml /starship.toml
COPY ./bin/periphery/debian-deps.sh .

View File

@@ -6,7 +6,7 @@ ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
# This is required to work with COPY --from
FROM ${BINARIES_IMAGE} AS binaries
FROM debian:bullseye-slim
FROM debian:trixie-slim
COPY ./bin/periphery/starship.toml /starship.toml
COPY ./bin/periphery/debian-deps.sh .

View File

@@ -5,7 +5,8 @@ use std::{
use anyhow::{Context, anyhow};
use command::{
run_komodo_command, run_komodo_command_with_sanitization,
KomodoCommandMode, run_komodo_command_with_sanitization,
run_komodo_standard_command,
};
use formatting::format_serror;
use interpolate::Interpolator;
@@ -267,7 +268,7 @@ impl Resolve<super::Args> for build::Build {
"Pre Build",
pre_build_path.as_path(),
&pre_build.command,
true,
KomodoCommandMode::Multiline,
&replacers,
)
.instrument(span)
@@ -323,7 +324,7 @@ impl Resolve<super::Args> for build::Build {
"Docker Build",
build_path.as_ref(),
command,
false,
KomodoCommandMode::Shell,
&replacers,
)
.instrument(span)
@@ -349,7 +350,10 @@ impl Resolve<super::Args> for PruneBuilders {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker builder prune -a -f");
Ok(run_komodo_command("Prune Builders", None, command).await)
Ok(
run_komodo_standard_command("Prune Builders", None, command)
.await,
)
}
}
@@ -366,6 +370,9 @@ impl Resolve<super::Args> for PruneBuildx {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker buildx prune -a -f");
Ok(run_komodo_command("Prune Buildx", None, command).await)
Ok(
run_komodo_standard_command("Prune Buildx", None, command)
.await,
)
}
}

View File

@@ -4,7 +4,7 @@ use std::{
};
use anyhow::{Context, anyhow};
use command::run_komodo_command;
use command::run_komodo_standard_command;
use formatting::format_serror;
use komodo_client::entities::{
FileContents, RepoExecutionArgs,
@@ -99,7 +99,7 @@ pub async fn compose_down(
} else {
format!(" {}", services.join(" "))
};
let log = run_komodo_command(
let log = run_komodo_standard_command(
"Compose Down",
None,
format!("{docker_compose} -p {project} down{service_args}"),

View File

@@ -2,7 +2,8 @@ use std::{borrow::Cow, path::PathBuf};
use anyhow::{Context, anyhow};
use command::{
run_komodo_command, run_komodo_command_with_sanitization,
KomodoCommandMode, run_komodo_command_with_sanitization,
run_komodo_shell_command, run_komodo_standard_command,
};
use formatting::format_serror;
use git::write_commit_file;
@@ -43,7 +44,7 @@ fn docker_compose() -> &'static str {
pub async fn list_compose_projects()
-> anyhow::Result<Vec<ComposeProject>> {
let docker_compose = docker_compose();
let res = run_komodo_command(
let res = run_komodo_standard_command(
"List Projects",
None,
format!("{docker_compose} ls --all --format json"),
@@ -111,7 +112,10 @@ impl Resolve<super::Args> for GetComposeLog {
"{docker_compose} -p {project} logs --tail {tail}{timestamps} {}",
services.join(" ")
);
Ok(run_komodo_command("get stack log", None, command).await)
Ok(
run_komodo_standard_command("get stack log", None, command)
.await,
)
}
}
@@ -136,7 +140,10 @@ impl Resolve<super::Args> for GetComposeLogSearch {
"{docker_compose} -p {project} logs --tail 5000{timestamps} {} 2>&1 | {grep}",
services.join(" ")
);
Ok(run_komodo_command("Get stack log grep", None, command).await)
Ok(
run_komodo_shell_command("Get stack log grep", None, command)
.await,
)
}
}
@@ -395,7 +402,7 @@ impl Resolve<super::Args> for ComposePull {
let project_name = stack.project_name(false);
let span = info_span!("RunComposePull");
let log = run_komodo_command(
let log = run_komodo_standard_command(
"Compose Pull",
run_directory.as_ref(),
format!(
@@ -492,7 +499,7 @@ impl Resolve<super::Args> for ComposeUp {
"Pre Deploy",
pre_deploy_path.as_path(),
&stack.config.pre_deploy.command,
true,
KomodoCommandMode::Multiline,
&replacers,
)
.instrument(span)
@@ -536,7 +543,7 @@ impl Resolve<super::Args> for ComposeUp {
"Compose Config",
run_directory.as_path(),
command,
false,
KomodoCommandMode::Standard,
&replacers,
)
.instrument(span)
@@ -603,7 +610,7 @@ impl Resolve<super::Args> for ComposeUp {
"Compose Build",
run_directory.as_path(),
command,
false,
KomodoCommandMode::Shell,
&replacers,
)
.instrument(span)
@@ -625,7 +632,7 @@ impl Resolve<super::Args> for ComposeUp {
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} pull{service_args}",
);
let span = info_span!("RunComposePull");
let log = run_komodo_command(
let log = run_komodo_standard_command(
"Compose Pull",
run_directory.as_ref(),
command,
@@ -660,7 +667,7 @@ impl Resolve<super::Args> for ComposeUp {
"Compose Up",
run_directory.as_path(),
command,
false,
KomodoCommandMode::Shell,
&replacers,
)
.instrument(span)
@@ -680,7 +687,7 @@ impl Resolve<super::Args> for ComposeUp {
"Post Deploy",
post_deploy_path.as_path(),
&stack.config.post_deploy.command,
true,
KomodoCommandMode::Multiline,
&replacers,
)
.instrument(span)
@@ -710,7 +717,7 @@ impl Resolve<super::Args> for ComposeExecution {
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let ComposeExecution { project, command } = self;
let docker_compose = docker_compose();
let log = run_komodo_command(
let log = run_komodo_standard_command(
"Compose Command",
None,
format!("{docker_compose} -p {project} {command}"),
@@ -804,7 +811,7 @@ impl Resolve<super::Args> for ComposeRun {
let project_name = stack.project_name(true);
if pull.unwrap_or_default() {
let pull_log = run_komodo_command(
let pull_log = run_komodo_standard_command(
"Compose Pull",
run_directory.as_ref(),
format!(
@@ -867,7 +874,7 @@ impl Resolve<super::Args> for ComposeRun {
"Compose Run",
run_directory.as_path(),
command,
false,
KomodoCommandMode::Shell,
&replacers,
)
.instrument(span)

View File

@@ -1,5 +1,7 @@
use anyhow::Context;
use command::run_komodo_command;
use command::{
run_komodo_shell_command, run_komodo_standard_command,
};
use futures_util::future::join_all;
use komodo_client::entities::{
docker::{
@@ -53,7 +55,10 @@ impl Resolve<super::Args> for GetContainerLog {
};
let command =
format!("docker logs {name} --tail {tail}{timestamps}");
Ok(run_komodo_command("Get container log", None, command).await)
Ok(
run_komodo_standard_command("Get container log", None, command)
.await,
)
}
}
@@ -78,8 +83,12 @@ impl Resolve<super::Args> for GetContainerLogSearch {
"docker logs {name} --tail 5000{timestamps} 2>&1 | {grep}"
);
Ok(
run_komodo_command("Get container log grep", None, command)
.await,
run_komodo_shell_command(
"Get container log grep",
None,
command,
)
.await,
)
}
}
@@ -141,7 +150,7 @@ impl Resolve<super::Args> for StartContainer {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
Ok(
run_komodo_command(
run_komodo_standard_command(
"Docker Start",
None,
format!("docker start {}", self.name),
@@ -165,7 +174,7 @@ impl Resolve<super::Args> for RestartContainer {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
Ok(
run_komodo_command(
run_komodo_standard_command(
"Docker Restart",
None,
format!("docker restart {}", self.name),
@@ -189,7 +198,7 @@ impl Resolve<super::Args> for PauseContainer {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
Ok(
run_komodo_command(
run_komodo_standard_command(
"Docker Pause",
None,
format!("docker pause {}", self.name),
@@ -211,7 +220,7 @@ impl Resolve<super::Args> for UnpauseContainer {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
Ok(
run_komodo_command(
run_komodo_standard_command(
"Docker Unpause",
None,
format!("docker unpause {}", self.name),
@@ -236,11 +245,13 @@ impl Resolve<super::Args> for StopContainer {
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let StopContainer { name, signal, time } = self;
let command = stop_container_command(&name, signal, time);
let log = run_komodo_command("Docker Stop", None, command).await;
let log =
run_komodo_standard_command("Docker Stop", None, command).await;
if log.stderr.contains("unknown flag: --signal") {
let command = stop_container_command(&name, None, time);
let mut log =
run_komodo_command("Docker Stop", None, command).await;
run_komodo_standard_command("Docker Stop", None, command)
.await;
log.stderr = format!(
"old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
@@ -273,16 +284,22 @@ impl Resolve<super::Args> for RemoveContainer {
let stop_command = stop_container_command(&name, signal, time);
let command =
format!("{stop_command} && docker container rm {name}");
let log =
run_komodo_command("Docker Stop and Remove", None, command)
.await;
let log = run_komodo_shell_command(
"Docker Stop and Remove",
None,
command,
)
.await;
if log.stderr.contains("unknown flag: --signal") {
let stop_command = stop_container_command(&name, None, time);
let command =
format!("{stop_command} && docker container rm {name}");
let mut log =
run_komodo_command("Docker Stop and Remove", None, command)
.await;
let mut log = run_komodo_shell_command(
"Docker Stop and Remove",
None,
command,
)
.await;
log.stderr = format!(
"Old docker version: unable to use --signal flag{}",
if !log.stderr.is_empty() {
@@ -317,7 +334,10 @@ impl Resolve<super::Args> for RenameContainer {
new_name,
} = self;
let command = format!("docker rename {curr_name} {new_name}");
Ok(run_komodo_command("Docker Rename", None, command).await)
Ok(
run_komodo_standard_command("Docker Rename", None, command)
.await,
)
}
}
@@ -334,7 +354,10 @@ impl Resolve<super::Args> for PruneContainers {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker container prune -f");
Ok(run_komodo_command("Prune Containers", None, command).await)
Ok(
run_komodo_standard_command("Prune Containers", None, command)
.await,
)
}
}
@@ -371,7 +394,8 @@ impl Resolve<super::Args> for StartAllContainers {
}
let command = format!("docker start {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
run_komodo_standard_command(&command.clone(), None, command)
.await
})
},
);
@@ -412,7 +436,8 @@ impl Resolve<super::Args> for RestartAllContainers {
}
let command = format!("docker restart {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
run_komodo_standard_command(&command.clone(), None, command)
.await
})
},
);
@@ -453,7 +478,8 @@ impl Resolve<super::Args> for PauseAllContainers {
}
let command = format!("docker pause {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
run_komodo_standard_command(&command.clone(), None, command)
.await
})
},
);
@@ -494,7 +520,8 @@ impl Resolve<super::Args> for UnpauseAllContainers {
}
let command = format!("docker unpause {name}");
Some(async move {
run_komodo_command(&command.clone(), None, command).await
run_komodo_standard_command(&command.clone(), None, command)
.await
})
},
);
@@ -534,8 +561,8 @@ impl Resolve<super::Args> for StopAllContainers {
return None;
}
Some(async move {
run_komodo_command(
&format!("docker stop {name}"),
run_komodo_standard_command(
&format!("Docker stop {name}"),
None,
stop_container_command(name, None, None),
)

View File

@@ -1,5 +1,7 @@
use anyhow::Context;
use command::run_komodo_command_with_sanitization;
use command::{
KomodoCommandMode, run_komodo_command_with_sanitization,
};
use formatting::format_serror;
use interpolate::Interpolator;
use komodo_client::{
@@ -102,7 +104,7 @@ impl Resolve<super::Args> for Deploy {
"Docker Run",
None,
command,
false,
KomodoCommandMode::Shell,
&replacers,
)
.instrument(span)

View File

@@ -2,7 +2,7 @@ use std::sync::OnceLock;
use anyhow::Context;
use cache::TimeoutCache;
use command::run_komodo_command;
use command::run_komodo_standard_command;
use komodo_client::entities::{
deployment::extract_registry_domain,
docker::{
@@ -98,7 +98,7 @@ impl Resolve<super::Args> for PullImage {
)
.await?;
anyhow::Ok(
run_komodo_command(
run_komodo_standard_command(
"Docker Pull",
None,
format!("docker pull {name}"),
@@ -130,7 +130,10 @@ impl Resolve<super::Args> for DeleteImage {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = format!("docker image rm {}", self.name);
Ok(run_komodo_command("Delete Image", None, command).await)
Ok(
run_komodo_standard_command("Delete Image", None, command)
.await,
)
}
}
@@ -147,7 +150,10 @@ impl Resolve<super::Args> for PruneImages {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker image prune -a -f");
Ok(run_komodo_command("Prune Images", None, command).await)
Ok(
run_komodo_standard_command("Prune Images", None, command)
.await,
)
}
}
@@ -186,7 +192,10 @@ impl Resolve<super::Args> for CreateNetwork {
None => String::new(),
};
let command = format!("docker network create{driver} {name}");
Ok(run_komodo_command("Create Network", None, command).await)
Ok(
run_komodo_standard_command("Create Network", None, command)
.await,
)
}
}
@@ -204,7 +213,10 @@ impl Resolve<super::Args> for DeleteNetwork {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = format!("docker network rm {}", self.name);
Ok(run_komodo_command("Delete Network", None, command).await)
Ok(
run_komodo_standard_command("Delete Network", None, command)
.await,
)
}
}
@@ -221,7 +233,10 @@ impl Resolve<super::Args> for PruneNetworks {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker network prune -f");
Ok(run_komodo_command("Prune Networks", None, command).await)
Ok(
run_komodo_standard_command("Prune Networks", None, command)
.await,
)
}
}
@@ -254,7 +269,10 @@ impl Resolve<super::Args> for DeleteVolume {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = format!("docker volume rm {}", self.name);
Ok(run_komodo_command("Delete Volume", None, command).await)
Ok(
run_komodo_standard_command("Delete Volume", None, command)
.await,
)
}
}
@@ -271,6 +289,9 @@ impl Resolve<super::Args> for PruneVolumes {
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Log> {
let command = String::from("docker volume prune -a -f");
Ok(run_komodo_command("Prune Volumes", None, command).await)
Ok(
run_komodo_standard_command("Prune Volumes", None, command)
.await,
)
}
}

View File

@@ -1,4 +1,4 @@
use command::run_komodo_command;
use command::run_komodo_standard_command;
use derive_variants::EnumVariants;
use encoding::{EncodedJsonMessage, EncodedResponse};
use futures_util::FutureExt;
@@ -144,15 +144,14 @@ pub enum PeripheryRequest {
// Terminal
ListTerminals(ListTerminals),
CreateTerminal(CreateTerminal),
CreateServerTerminal(CreateServerTerminal),
CreateContainerExecTerminal(CreateContainerExecTerminal),
CreateContainerAttachTerminal(CreateContainerAttachTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
ConnectTerminal(ConnectTerminal),
ConnectContainerExec(ConnectContainerExec),
ConnectContainerAttach(ConnectContainerAttach),
DisconnectTerminal(DisconnectTerminal),
ExecuteTerminal(ExecuteTerminal),
ExecuteContainerExec(ExecuteContainerExec),
// Keys
RotatePrivateKey(RotatePrivateKey),
@@ -311,6 +310,9 @@ impl Resolve<Args> for PruneSystem {
)]
async fn resolve(self, args: &Args) -> anyhow::Result<Log> {
let command = String::from("docker system prune -a -f --volumes");
Ok(run_komodo_command("Prune System", None, command).await)
Ok(
run_komodo_standard_command("Prune System", None, command)
.await,
)
}
}

View File

@@ -1,11 +1,10 @@
use std::{sync::Arc, time::Duration};
use std::sync::Arc;
use anyhow::{Context, anyhow};
use colored::Colorize;
use futures_util::{Stream, StreamExt, TryStreamExt};
use komodo_client::entities::{
ContainerTerminalMode, KOMODO_EXIT_CODE, NoData,
server::TerminalInfo,
KOMODO_EXIT_CODE, NoData,
terminal::{Terminal, TerminalStdinMessage, TerminalTarget},
};
use periphery_client::{
api::terminal::*, transport::EncodedTransportMessage,
@@ -30,17 +29,17 @@ impl Resolve<super::Args> for ListTerminals {
async fn resolve(
self,
_: &super::Args,
) -> anyhow::Result<Vec<TerminalInfo>> {
) -> anyhow::Result<Vec<Terminal>> {
clean_up_terminals().await;
Ok(list_terminals(self.container.as_deref()).await)
Ok(list_terminals(self.target.as_ref()).await)
}
}
//
impl Resolve<super::Args> for CreateTerminal {
impl Resolve<super::Args> for CreateServerTerminal {
#[instrument(
"CreateTerminal",
"CreateServerTerminal",
skip_all,
fields(
id = args.id.to_string(),
@@ -56,12 +55,110 @@ impl Resolve<super::Args> for CreateTerminal {
) -> anyhow::Result<NoData> {
if periphery_config().disable_terminals {
return Err(anyhow!(
"Terminals are disabled in the periphery config"
"Terminals are disabled in the Periphery config"
));
}
create_terminal(self.name, self.command, self.recreate, None)
.await
.map(|_| NoData {})
create_terminal(
self.name,
TerminalTarget::Server { server: None },
self.command,
self.recreate,
)
.await
.map(|_| NoData {})
}
}
//
impl Resolve<super::Args> for CreateContainerExecTerminal {
#[instrument(
"CreateContainerExecTerminal",
skip_all,
fields(
id = args.id.to_string(),
core = args.core,
terminal = self.name,
target = format!("{:?}", self.target),
command = self.command,
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
args: &super::Args,
) -> anyhow::Result<NoData> {
if periphery_config().disable_container_terminals {
return Err(anyhow!(
"Container Terminals are disabled in the Periphery config"
));
}
let CreateContainerExecTerminal {
name,
target,
container,
command,
recreate,
} = self;
let command = command.unwrap_or_else(|| String::from("sh"));
if container.contains("&&") || command.contains("&&") {
return Err(anyhow!(
"The use of '&&' is forbidden in the container name or command"
));
}
create_terminal(
name,
target,
Some(format!("docker exec -it {container} {command}")),
recreate,
)
.await
.map(|_| NoData {})
}
}
//
impl Resolve<super::Args> for CreateContainerAttachTerminal {
#[instrument(
"CreateContainerAttachTerminal",
skip_all,
fields(
id = args.id.to_string(),
core = args.core,
terminal = self.name,
target = format!("{:?}", self.target),
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(
self,
args: &super::Args,
) -> anyhow::Result<NoData> {
if periphery_config().disable_container_terminals {
return Err(anyhow!(
"Container Terminals are disabled in the Periphery config"
));
}
let CreateContainerAttachTerminal {
name,
target,
container,
recreate,
} = self;
if container.contains("&&") {
return Err(anyhow!(
"The use of '&&' is forbidden in the container name"
));
}
create_terminal(
name,
target,
Some(format!("docker attach {container} --sig-proxy=false")),
recreate,
)
.await
.map(|_| NoData {})
}
}
@@ -81,7 +178,7 @@ impl Resolve<super::Args> for DeleteTerminal {
self,
args: &super::Args,
) -> anyhow::Result<NoData> {
delete_terminal(&self.terminal).await;
delete_terminal(&self.target, &self.terminal).await;
Ok(NoData {})
}
}
@@ -119,12 +216,6 @@ impl Resolve<super::Args> for ConnectTerminal {
)
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Uuid> {
if periphery_config().disable_terminals {
return Err(anyhow!(
"Terminals are disabled in the periphery config"
));
}
let connection =
core_connections().get(&args.core).await.with_context(
|| format!("Failed to find channel for {}", args.core),
@@ -132,115 +223,7 @@ impl Resolve<super::Args> for ConnectTerminal {
clean_up_terminals().await;
let terminal = get_terminal(&self.terminal).await?;
let channel =
spawn_terminal_forwarding(connection, terminal).await;
Ok(channel)
}
}
//
impl Resolve<super::Args> for ConnectContainerExec {
#[instrument(
"ConnectContainerExec",
skip_all,
fields(
id = args.id.to_string(),
core = args.core,
container = self.container,
shell = self.shell,
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Uuid> {
if periphery_config().disable_container_terminals {
return Err(anyhow!(
"Container Terminals are disabled in the periphery config"
));
}
let connection =
core_connections().get(&args.core).await.with_context(
|| format!("Failed to find channel for {}", args.core),
)?;
let ConnectContainerExec {
container,
shell,
recreate,
} = self;
if container.contains("&&") || shell.contains("&&") {
return Err(anyhow!(
"The use of '&&' is forbidden in the container name or shell"
));
}
// Create (recreate if shell changed)
let terminal = create_terminal(
container.clone(),
Some(format!("docker exec -it {container} {shell}")),
recreate,
Some((container, ContainerTerminalMode::Exec)),
)
.await
.context("Failed to create terminal for container exec")?;
let channel =
spawn_terminal_forwarding(connection, terminal).await;
Ok(channel)
}
}
//
impl Resolve<super::Args> for ConnectContainerAttach {
#[instrument(
"ConnectContainerAttach",
skip_all,
fields(
id = args.id.to_string(),
core = args.core,
container = self.container,
recreate = format!("{:?}", self.recreate),
)
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Uuid> {
if periphery_config().disable_container_terminals {
return Err(anyhow!(
"Container Terminals are disabled in the periphery config"
));
}
let connection =
core_connections().get(&args.core).await.with_context(
|| format!("Failed to find channel for {}", args.core),
)?;
let ConnectContainerAttach {
container,
recreate,
} = self;
if container.contains("&&") {
return Err(anyhow!(
"The use of '&&' is forbidden in the container name"
));
}
// Create (recreate if shell changed)
let terminal = create_terminal(
container.clone(),
Some(format!("docker attach {container} --sig-proxy=false")),
recreate,
Some((container, ContainerTerminalMode::Attach)),
)
.await
.context("Failed to create terminal for container attach")?;
let terminal = get_terminal(&self.terminal, &self.target).await?;
let channel =
spawn_terminal_forwarding(connection, terminal).await;
@@ -265,11 +248,7 @@ impl Resolve<super::Args> for DisconnectTerminal {
self,
args: &super::Args,
) -> anyhow::Result<NoData> {
if let Some(channel) =
terminal_channels().remove(&self.channel).await
{
channel.cancel.cancel();
}
terminal_channels().remove(&self.channel).await;
Ok(NoData {})
}
}
@@ -288,18 +267,12 @@ impl Resolve<super::Args> for ExecuteTerminal {
)
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Uuid> {
if periphery_config().disable_terminals {
return Err(anyhow!(
"Terminals are disabled in the Periphery config"
));
}
let channel =
core_connections().get(&args.core).await.with_context(
|| format!("Failed to find channel for {}", args.core),
)?;
let terminal = get_terminal(&self.terminal).await?;
let terminal = get_terminal(&self.terminal, &self.target).await?;
let channel_id = Uuid::new_v4();
@@ -323,82 +296,10 @@ impl Resolve<super::Args> for ExecuteTerminal {
}
}
//
impl Resolve<super::Args> for ExecuteContainerExec {
#[instrument(
"ExecuteContainerExec",
skip_all,
fields(
id = args.id.to_string(),
core = args.core,
container = self.container,
shell = self.shell,
command = self.command,
recreate = format!("{:?}", self.recreate)
)
)]
async fn resolve(self, args: &super::Args) -> anyhow::Result<Uuid> {
if periphery_config().disable_container_terminals {
return Err(anyhow!(
"Container Terminals are disabled in the Periphery config"
));
}
let Self {
container,
shell,
command,
recreate,
} = self;
if container.contains("&&") || shell.contains("&&") {
return Err(anyhow!(
"The use of '&&' is forbidden in the container name or shell"
));
}
let channel =
core_connections().get(&args.core).await.with_context(
|| format!("Failed to find channel for {}", args.core),
)?;
let terminal = create_terminal(
container.clone(),
Some(format!("docker exec -it {container} {shell}")),
recreate,
Some((container, ContainerTerminalMode::Exec)),
)
.await
.context("Failed to create terminal for container exec")?;
// Wait a bit for terminal to initialize
tokio::time::sleep(Duration::from_millis(500)).await;
let channel_id = Uuid::new_v4();
let stdout = setup_execute_command_on_terminal(
channel_id, &terminal, &command,
)
.await?;
tokio::spawn(async move {
forward_execute_command_on_terminal_response(
&channel.sender,
channel_id,
stdout,
)
.await
});
Ok(channel_id)
}
}
#[instrument("SpawnTerminalForwarding", skip_all)]
async fn spawn_terminal_forwarding(
connection: Arc<BufferedChannel<EncodedTransportMessage>>,
terminal: Arc<Terminal>,
terminal: Arc<PeripheryTerminal>,
) -> Uuid {
let channel = Uuid::new_v4();
let cancel = CancellationToken::new();
@@ -430,7 +331,7 @@ async fn spawn_terminal_forwarding(
async fn handle_terminal_forwarding(
sender: &Sender<EncodedTransportMessage>,
channel: Uuid,
terminal: Arc<Terminal>,
terminal: Arc<PeripheryTerminal>,
cancel: CancellationToken,
) {
// This waits to begin forwarding until Core sends the None byte start trigger.
@@ -470,23 +371,15 @@ async fn handle_terminal_forwarding(
// Forward stdout -> WS
let mut stdout = terminal.stdout.resubscribe();
loop {
let res = tokio::select! {
res = stdout.recv() => res,
_ = terminal.cancel.cancelled() => {
let _ = sender.send_terminal(channel, Err(anyhow!(
"\n{} {}",
"pty".bold(),
"exited".red().bold()
))).await;
let _ = sender.send_terminal_exited(channel).await;
break
},
_ = cancel.cancelled() => {
let _ = sender.send_terminal(channel, Err(anyhow!(
"\n{} {}",
"websocket".bold(),
"disconnected".red().bold()
))).await;
break
}
};
@@ -494,17 +387,7 @@ async fn handle_terminal_forwarding(
let bytes = match res {
Ok(bytes) => bytes,
Err(_e) => {
terminal.cancel();
let _ = sender
.send_terminal(
channel,
Err(anyhow!(
"\n{} {}",
"pty".bold(),
"exited".red().bold()
)),
)
.await;
let _ = sender.send_terminal_exited(channel).await;
break;
}
};
@@ -519,12 +402,7 @@ async fn handle_terminal_forwarding(
}
// Clean up
if let Some(terminal_channel) =
terminal_channels().remove(&channel).await
{
trace!("Cancel called for {channel}");
terminal_channel.cancel.cancel();
}
terminal_channels().remove(&channel).await;
clean_up_terminals().await;
}
@@ -532,7 +410,7 @@ async fn handle_terminal_forwarding(
#[instrument("SetupExecuteTerminal", skip(terminal))]
async fn setup_execute_command_on_terminal(
channel_id: Uuid,
terminal: &Terminal,
terminal: &PeripheryTerminal,
command: &str,
) -> anyhow::Result<
impl Stream<Item = Result<String, LinesCodecError>> + 'static,
@@ -555,7 +433,7 @@ async fn setup_execute_command_on_terminal(
terminal
.stdin
.send(StdinMsg::Bytes(full_command.into()))
.send(TerminalStdinMessage::forward(full_command.into()))
.await
.context("Failed to send command to terminal stdin")?;
@@ -587,7 +465,7 @@ async fn forward_execute_command_on_terminal_response(
channel: Uuid,
mut stdout: impl Stream<Item = Result<String, LinesCodecError>> + Unpin,
) {
// This waits to begin forwarding until Core sends the None byte start trigger.
// This waits to begin forwarding until Core sends the Begin byte start trigger.
// This ensures no messages are lost before channels on both sides are set up.
if let Err(e) = terminal_triggers().recv(&channel).await {
warn!("{e:#}");

View File

@@ -81,11 +81,21 @@ async fn handle_socket<W: Websocket>(
let forward_writes = async {
loop {
let message = match receiver.recv().await {
Ok(message) => message,
Err(e) => {
warn!("{e:#}");
break;
let message = match tokio::time::timeout(
Duration::from_secs(5),
receiver.recv(),
)
.await
{
Ok(Ok(message)) => message,
Ok(Err(_)) => break,
// Handle sending Ping
Err(_) => {
if let Err(e) = ws_write.ping().await {
warn!("Failed to send ping | {e:?}");
break;
}
continue;
}
};
match ws_write.send(message.into_bytes()).await {
@@ -93,11 +103,11 @@ async fn handle_socket<W: Websocket>(
Ok(_) => receiver.clear_buffer(),
Err(e) => {
warn!("Failed to send response | {e:?}");
let _ = ws_write.close().await;
break;
}
}
}
let _ = ws_write.close().await;
};
let handle_reads = async {

View File

@@ -1,8 +1,7 @@
use anyhow::{Context, anyhow};
use bollard::Docker;
use command::run_komodo_command;
use command::{run_komodo_standard_command, run_shell_command};
use komodo_client::entities::{TerminationSignal, update::Log};
use run_command::async_run_command;
pub mod stats;
@@ -38,9 +37,9 @@ pub async fn docker_login(
Some(token) => token,
None => crate::helpers::registry_token(domain, account)?,
};
let log = async_run_command(&format!(
let log = run_shell_command(&format!(
"echo {registry_token} | docker login {domain} --username '{account}' --password-stdin",
))
), None)
.await;
if log.success() {
Ok(true)
@@ -63,7 +62,7 @@ pub async fn docker_login(
#[instrument("PullImage")]
pub async fn pull_image(image: &str) -> Log {
let command = format!("docker pull {image}");
run_komodo_command("Docker Pull", None, command).await
run_komodo_standard_command("Docker Pull", None, command).await
}
pub fn stop_container_command(

View File

@@ -3,6 +3,7 @@ use std::{collections::HashMap, sync::Arc};
use anyhow::{Context, anyhow};
use async_timing_util::wait_until_timelength;
use bollard::{models, query_parameters::StatsOptionsBuilder};
use command::run_standard_command;
use futures_util::StreamExt;
use komodo_client::entities::docker::{
container::ContainerStats,
@@ -13,7 +14,6 @@ use komodo_client::entities::docker::{
ContainerThrottlingData, FullContainerStats,
},
};
use run_command::async_run_command;
use crate::{
config::periphery_config, docker::DockerClient,
@@ -65,7 +65,7 @@ pub async fn get_container_stats(
};
let command =
format!("docker stats{container_name} --no-stream {format}");
let output = async_run_command(&command).await;
let output = run_standard_command(&command, None).await;
if output.success() {
output
.stdout

View File

@@ -3,7 +3,10 @@ use std::{
};
use anyhow::Context;
use command::run_komodo_command_with_sanitization;
use command::{
KomodoCommandMode, run_komodo_command_with_sanitization,
run_standard_command,
};
use environment::write_env_file;
use interpolate::Interpolator;
use komodo_client::{
@@ -123,7 +126,7 @@ pub async fn handle_post_repo_execution(
"On Clone",
path.as_path(),
on_clone.command,
true,
KomodoCommandMode::Multiline,
&replacers,
)
.await
@@ -148,7 +151,7 @@ pub async fn handle_post_repo_execution(
"On Pull",
path.as_path(),
on_pull.command,
true,
KomodoCommandMode::Multiline,
&replacers,
)
.await
@@ -293,7 +296,7 @@ async fn generate_self_signed_ssl_certs() {
let command = format!(
"openssl req -x509 -newkey rsa:4096 -keyout {key_path} -out {cert_path} -sha256 -days 3650 -nodes -subj \"/C=XX/CN=periphery\""
);
let log = run_command::async_run_command(&command).await;
let log = run_standard_command(&command, None).await;
if log.success() {
info!("✅ SSL Certs generated");

View File

@@ -20,6 +20,7 @@ mod stats;
mod terminal;
async fn app() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
let config = config::periphery_config();
logger::init(&config.logging)?;
@@ -102,21 +103,15 @@ async fn main() -> anyhow::Result<()> {
return noise::key::command::handle(command).await;
}
dotenvy::dotenv().ok();
let mut term_signal = tokio::signal::unix::signal(
tokio::signal::unix::SignalKind::terminate(),
)?;
let app = tokio::spawn(app());
tokio::select! {
res = app => return res?,
res = tokio::spawn(app()) => return res?,
_ = term_signal.recv() => {
info!("Exiting all active Terminals for shutdown");
terminal::delete_all_terminals().await;
Ok(())
},
}
Ok(())
}

View File

@@ -6,8 +6,10 @@ use std::{
use anyhow::{Context, anyhow};
use arc_swap::ArcSwap;
use cache::CloneCache;
use komodo_client::entities::docker::container::ContainerStats;
use cache::{CloneCache, CloneVecCache};
use komodo_client::entities::{
docker::container::ContainerStats, terminal::TerminalStdinMessage,
};
use noise::key::{RotatableKeyPair, SpkiPublicKey};
use periphery_client::transport::EncodedTransportMessage;
use tokio::sync::{Mutex, OnceCell, RwLock, mpsc, oneshot};
@@ -16,11 +18,9 @@ use transport::channel::BufferedChannel;
use uuid::Uuid;
use crate::{
config::periphery_config,
docker::DockerClient,
helpers::resolve_host_public_ip,
stats::StatsClient,
terminal::{StdinMsg, Terminal},
config::periphery_config, docker::DockerClient,
helpers::resolve_host_public_ip, stats::StatsClient,
terminal::PeripheryTerminal,
};
/// Should call in startup to ensure Periphery errors without valid private key.
@@ -168,16 +168,38 @@ pub fn stats_client() -> &'static RwLock<StatsClient> {
STATS_CLIENT.get_or_init(|| RwLock::new(StatsClient::default()))
}
pub type PtyName = String;
pub type PtyMap =
tokio::sync::RwLock<HashMap<PtyName, Arc<Terminal>>>;
pub fn terminals() -> &'static PtyMap {
static TERMINALS: OnceLock<PtyMap> = OnceLock::new();
pub fn terminals() -> &'static CloneVecCache<Arc<PeripheryTerminal>> {
static TERMINALS: OnceLock<CloneVecCache<Arc<PeripheryTerminal>>> =
OnceLock::new();
TERMINALS.get_or_init(Default::default)
}
pub type TerminalChannels = CloneCache<Uuid, Arc<TerminalChannel>>;
#[derive(Default)]
pub struct TerminalChannels(CloneCache<Uuid, Arc<TerminalChannel>>);
impl TerminalChannels {
pub async fn get(
&self,
channel: &Uuid,
) -> Option<Arc<TerminalChannel>> {
self.0.get(channel).await
}
pub async fn insert(
&self,
channel: Uuid,
terminal: Arc<TerminalChannel>,
) -> Option<Arc<TerminalChannel>> {
self.0.insert(channel, terminal).await
}
pub async fn remove(&self, channel: &Uuid) {
let Some(channel) = self.0.remove(channel).await else {
return;
};
channel.cancel.cancel();
}
}
pub fn terminal_channels() -> &'static TerminalChannels {
static TERMINAL_CHANNELS: OnceLock<TerminalChannels> =
@@ -187,7 +209,7 @@ pub fn terminal_channels() -> &'static TerminalChannels {
#[derive(Debug)]
pub struct TerminalChannel {
pub sender: mpsc::Sender<StdinMsg>,
pub sender: mpsc::Sender<TerminalStdinMessage>,
pub cancel: CancellationToken,
}

View File

@@ -3,10 +3,11 @@ use std::{collections::VecDeque, sync::Arc, time::Duration};
use anyhow::{Context, anyhow};
use bytes::Bytes;
use encoding::{Decode as _, WithChannel};
use komodo_client::{
api::write::TerminalRecreateMode,
entities::{
ContainerTerminalMode, komodo_timestamp, server::TerminalInfo,
use komodo_client::entities::{
komodo_timestamp,
terminal::{
Terminal, TerminalMessage, TerminalRecreateMode,
TerminalStdinMessage, TerminalTarget,
},
};
use periphery_client::transport::EncodedTerminalMessage;
@@ -26,12 +27,14 @@ pub async fn handle_message(message: EncodedTerminalMessage) {
} = match message.decode() {
Ok(res) => res,
Err(e) => {
warn!("Received invalid Terminal bytes | {e:#}");
warn!(
"Received invalid Terminal bytes | Channel decode | {e:#}"
);
return;
}
};
let mut data = match data {
let data = match data {
Ok(data) => data,
Err(_) => {
// This means Core should disconnect.
@@ -40,25 +43,23 @@ pub async fn handle_message(message: EncodedTerminalMessage) {
}
};
let msg = match data.first() {
Some(&0x00) => StdinMsg::Bytes(data.drain(1..).collect()),
Some(&0xFF) => {
if let Ok(dimensions) =
serde_json::from_slice::<ResizeDimensions>(&data[1..])
{
StdinMsg::Resize(dimensions)
} else {
return;
}
let message = match TerminalMessage::from_raw(data)
.into_stdin_message()
{
Err(e) => {
warn!(
"Received invalid Terminal bytes | TerminalMessage decode | {e:#}"
);
return;
}
Some(_) => StdinMsg::Bytes(data),
// Empty bytes are the "begin" trigger for Terminal Executions
None => {
// Send 'begin' trigger for Terminal Executions
Ok(TerminalStdinMessage::Begin) => {
if let Err(e) = terminal_triggers().send(&channel_id).await {
warn!("{e:#}")
}
return;
}
Ok(message) => message,
};
let Some(channel) = terminal_channels().get(&channel_id).await
@@ -67,7 +68,7 @@ pub async fn handle_message(message: EncodedTerminalMessage) {
return;
};
if let Err(e) = channel.sender.send(msg).await {
if let Err(e) = channel.sender.send(message).await {
warn!("No receiver for {channel_id} | {e:?}");
};
}
@@ -75,20 +76,25 @@ pub async fn handle_message(message: EncodedTerminalMessage) {
#[instrument("CreateTerminalInner", skip_all, fields(name))]
pub async fn create_terminal(
name: String,
target: TerminalTarget,
command: Option<String>,
recreate: TerminalRecreateMode,
container: Option<(String, ContainerTerminalMode)>,
) -> anyhow::Result<Arc<Terminal>> {
) -> anyhow::Result<Arc<PeripheryTerminal>> {
let command = command.unwrap_or_else(|| {
periphery_config().default_terminal_command.clone()
});
trace!(
"CreateTerminal: {name} | command: {command} | recreate: {recreate:?}"
);
let mut terminals = terminals().write().await;
let terminals = terminals();
use TerminalRecreateMode::*;
if matches!(recreate, Never | DifferentCommand)
&& let Some(terminal) = terminals.get(&name)
&& let Some(terminal) = terminals
.find(|terminal| {
terminal.target.matches_on_server(&target)
&& terminal.name == name
})
.await
{
if terminal.command == command {
return Ok(terminal.clone());
@@ -100,44 +106,85 @@ pub async fn create_terminal(
}
}
let terminal = Arc::new(
Terminal::new(command, container)
PeripheryTerminal::new(name.clone(), target.clone(), command)
.await
.context("Failed to init terminal")?,
);
if let Some(prev) = terminals.insert(name, terminal.clone()) {
if let Some(prev) = terminals
.insert(
|terminal| {
terminal.target.matches_on_server(&target)
&& terminal.name == name
},
terminal.clone(),
)
.await
{
prev.cancel();
}
Ok(terminal)
}
#[instrument("DeleteTerminalInner")]
pub async fn delete_terminal(name: &str) {
if let Some(terminal) = terminals().write().await.remove(name) {
pub async fn delete_terminal(target: &TerminalTarget, name: &str) {
if let Some(terminal) = terminals()
.remove(|terminal| {
terminal.target.matches_on_server(target)
&& name == terminal.name.as_str()
})
.await
{
terminal.cancel.cancel();
}
}
pub async fn list_terminals(
container: Option<&str>,
) -> Vec<TerminalInfo> {
target: Option<&TerminalTarget>,
) -> Vec<Terminal> {
let mut terminals = terminals()
.read()
.list()
.await
.iter()
.filter(|(_, terminal)| {
// If no container passed, returns all
let Some(container) = container else {
.filter(|terminal| {
// If no target passed, returns all
let Some(target) = target else {
return true;
};
let Some(term_container) =
terminal.container.as_ref().map(|c| c.0.as_str())
else {
return false;
};
term_container == container
match (target, &terminal.target) {
(
TerminalTarget::Server { .. },
TerminalTarget::Server { .. },
) => true,
(
TerminalTarget::Container {
container: target_container,
..
},
TerminalTarget::Container { container, .. },
) => target_container == container,
(
TerminalTarget::Stack {
stack: target_stack,
service: target_service,
},
TerminalTarget::Stack { stack, service },
) => {
target_stack == stack
// If no service passed, only match on stack
&& (target_service.is_none() || target_service == service)
}
(
TerminalTarget::Deployment {
deployment: target_deployment,
},
TerminalTarget::Deployment { deployment },
) => target_deployment == deployment,
_ => false,
}
})
.map(|(name, terminal)| TerminalInfo {
name: name.to_string(),
.map(|terminal| Terminal {
name: terminal.name.clone(),
target: terminal.target.clone(),
command: terminal.command.clone(),
stored_size_kb: terminal.history.size_kb(),
created_at: terminal.created_at,
@@ -149,83 +196,75 @@ pub async fn list_terminals(
pub async fn get_terminal(
name: &str,
) -> anyhow::Result<Arc<Terminal>> {
target: &TerminalTarget,
) -> anyhow::Result<Arc<PeripheryTerminal>> {
terminals()
.read()
.find(|terminal| {
terminal.target.matches_on_server(target)
&& terminal.name.as_str() == name
})
.await
.get(name)
.cloned()
.with_context(|| format!("No terminal at {name}"))
.with_context(|| format!("No terminal for {target:?} at {name}"))
}
pub async fn clean_up_terminals() {
terminals()
.write()
.await
.retain(|_, terminal| !terminal.cancel.is_cancelled());
.retain(|terminal| !terminal.cancel.is_cancelled())
.await;
}
pub async fn delete_all_terminals() {
terminals()
.write()
.await
.drain()
.for_each(|(_, terminal)| terminal.cancel());
.retain(|terminal| {
terminal.cancel();
false
})
.await;
// The terminals poll cancel every 500 millis, need to wait for them
// to finish cancelling.
tokio::time::sleep(Duration::from_millis(100)).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
#[derive(Clone, serde::Deserialize)]
pub struct ResizeDimensions {
rows: u16,
cols: u16,
}
#[derive(Clone)]
pub enum StdinMsg {
Bytes(Vec<u8>),
Resize(ResizeDimensions),
}
pub type StdinSender = mpsc::Sender<StdinMsg>;
pub type StdinSender = mpsc::Sender<TerminalStdinMessage>;
pub type StdoutReceiver = broadcast::Receiver<Bytes>;
pub struct Terminal {
/// The command that was used as the root command, eg `shell`
command: String,
/// Created timestamp milliseconds
created_at: i64,
pub struct PeripheryTerminal {
/// The name of the terminal.
pub name: String,
/// The target resource of the Terminal.
pub target: TerminalTarget,
/// The command used to init the shell.
pub command: String,
/// When the Terminal was created.
pub created_at: i64,
pub cancel: CancellationToken,
pub stdin: StdinSender,
pub stdout: StdoutReceiver,
pub history: Arc<History>,
/// If terminal is for a container.
pub container: Option<(String, ContainerTerminalMode)>,
}
impl Terminal {
impl PeripheryTerminal {
async fn new(
name: String,
target: TerminalTarget,
command: String,
container: Option<(String, ContainerTerminalMode)>,
) -> anyhow::Result<Terminal> {
trace!("Creating terminal with command: {command}");
) -> anyhow::Result<PeripheryTerminal> {
trace!("Creating Terminal | Command: {command}");
let terminal = native_pty_system()
.openpty(PtySize::default())
.context("Failed to open terminal")?;
let mut command_split = command.split(' ').map(|arg| arg.trim());
let cmd =
command_split.next().context("Command cannot be empty")?;
let mut lexed = shlex::split(&command)
.context("Invalid command: empty")?
.into_iter();
let cmd = lexed.next().context("Command cannot be empty")?;
let mut cmd = CommandBuilder::new(cmd);
for arg in command_split {
for arg in lexed {
cmd.arg(arg);
}
@@ -272,27 +311,27 @@ impl Terminal {
break;
}
match child.try_wait() {
Ok(None) => {
// Continue
std::thread::sleep(Duration::from_millis(500));
}
Ok(Some(code)) => {
debug!("child exited with code {code}");
_cancel.cancel();
break;
}
Ok(None) => {
std::thread::sleep(Duration::from_millis(500));
}
Err(e) => {
debug!("failed to wait for child | {e:?}");
_cancel.cancel();
break;
}
}
}
// Cancel if loop broken
_cancel.cancel();
});
// WS (channel) -> STDIN TASK
// Theres only one consumer here, so use mpsc
let (stdin, mut channel_read) =
tokio::sync::mpsc::channel::<StdinMsg>(8192);
let (stdin, mut stdin_read) = tokio::sync::mpsc::channel(8192);
let _cancel = cancel.clone();
tokio::task::spawn_blocking(move || {
loop {
@@ -300,15 +339,16 @@ impl Terminal {
trace!("terminal write: cancelled from outside");
break;
}
match channel_read.blocking_recv() {
Some(StdinMsg::Bytes(bytes)) => {
match stdin_read.blocking_recv() {
// Handled in self::handle_message
Some(TerminalStdinMessage::Begin) => {}
Some(TerminalStdinMessage::Forward(bytes)) => {
if let Err(e) = terminal_write.write_all(&bytes) {
debug!("Failed to write to PTY: {e:?}");
_cancel.cancel();
break;
}
}
Some(StdinMsg::Resize(dimensions)) => {
Some(TerminalStdinMessage::Resize(dimensions)) => {
if let Err(e) = terminal.master.resize(PtySize {
cols: dimensions.cols,
rows: dimensions.rows,
@@ -316,24 +356,24 @@ impl Terminal {
pixel_height: 0,
}) {
debug!("Failed to resize | {e:?}");
_cancel.cancel();
break;
};
}
None => {
debug!("WS -> PTY channel read error: Disconnected");
_cancel.cancel();
break;
}
}
}
// Cancel if loop broken
_cancel.cancel();
});
let history = Arc::new(History::default());
// PTY -> WS (channel) TASK
// Uses broadcast to output to multiple client simultaneously
let (write, stdout) =
let (write_stdout, stdout) =
tokio::sync::broadcast::channel::<Bytes>(8192);
let _cancel = cancel.clone();
let _history = history.clone();
@@ -345,40 +385,37 @@ impl Terminal {
break;
}
match terminal_read.read(&mut buf) {
Ok(0) => {
// EOF
trace!("Got PTY read EOF");
_cancel.cancel();
break;
}
Ok(0) => break, // EOF
Ok(n) => {
_history.push(&buf[..n]);
let slice = &buf[..n];
_history.push(slice);
if let Err(e) =
write.send(Bytes::copy_from_slice(&buf[..n]))
write_stdout.send(Bytes::copy_from_slice(slice))
{
debug!("PTY -> WS channel send error: {e:?}");
_cancel.cancel();
break;
}
}
Err(e) => {
debug!("Failed to read for PTY: {e:?}");
_cancel.cancel();
break;
}
}
}
// Cancel if loop broken
_cancel.cancel();
});
trace!("terminal tasks spawned");
Ok(Terminal {
Ok(PeripheryTerminal {
name,
target,
command,
cancel,
stdin,
stdout,
history,
container,
created_at: komodo_timestamp(),
})
}

View File

@@ -40,6 +40,7 @@ serde_qs.workspace = true
reqwest.workspace = true
tracing.workspace = true
anyhow.workspace = true
bytes.workspace = true
serde.workspace = true
tokio.workspace = true
strum.workspace = true

View File

@@ -61,6 +61,10 @@ pub struct ListAllDockerContainers {
/// Filter by server id or name.
#[serde(default)]
pub servers: Vec<String>,
/// Filter by container name.
#[serde(default)]
pub containers: Vec<String>,
}
#[typeshare]

View File

@@ -20,6 +20,7 @@ mod server;
mod stack;
mod sync;
mod tag;
mod terminal;
mod toml;
mod update;
mod user;
@@ -43,6 +44,7 @@ pub use server::*;
pub use stack::*;
pub use sync::*;
pub use tag::*;
pub use terminal::*;
pub use toml::*;
pub use update::*;
pub use user::*;

View File

@@ -7,7 +7,7 @@ use crate::entities::{
I64, Timelength,
server::{
PeripheryInformation, Server, ServerActionState, ServerListItem,
ServerQuery, ServerState, TerminalInfo, TerminalInfoWithServer,
ServerQuery, ServerState,
},
stats::{
SystemInformation, SystemProcess, SystemStats, SystemStatsRecord,
@@ -265,51 +265,3 @@ pub struct GetServersSummaryResponse {
/// The number of disabled servers.
pub disabled: I64,
}
//
/// List the current terminals on specified server.
/// Response: [ListTerminalsResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoReadRequest)]
#[response(ListTerminalsResponse)]
#[error(serror::Error)]
pub struct ListTerminals {
/// Id or name
#[serde(alias = "id", alias = "name")]
pub server: String,
/// Force a fresh call to Periphery for the list.
/// Otherwise the response will be cached for 30s
#[serde(default)]
pub fresh: bool,
}
#[typeshare]
pub type ListTerminalsResponse = Vec<TerminalInfo>;
//
/// List the current terminals on specified server.
/// Response: [ListAllTerminalsResponse].
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoReadRequest)]
#[response(ListAllTerminalsResponse)]
#[error(serror::Error)]
pub struct ListAllTerminals {
/// optional structured query to filter servers.
#[serde(default)]
pub query: ServerQuery,
/// Force a fresh call to Periphery for the list.
/// Otherwise the response will be cached for 30s
#[serde(default)]
pub fresh: bool,
}
#[typeshare]
pub type ListAllTerminalsResponse = Vec<TerminalInfoWithServer>;

View File

@@ -0,0 +1,30 @@
use derive_empty_traits::EmptyTraits;
use resolver_api::Resolve;
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::entities::terminal::{Terminal, TerminalTarget};
use super::KomodoReadRequest;
//
/// List Terminals.
/// Response: [ListTerminalsResponse].
#[typeshare]
#[derive(
Debug, Clone, Default, Serialize, Deserialize, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoReadRequest)]
#[response(ListTerminalsResponse)]
#[error(serror::Error)]
pub struct ListTerminals {
/// Filter the Terminals returned by the Target.
pub target: Option<TerminalTarget>,
/// Return results with resource names instead of ids.
#[serde(default)]
pub use_names: bool,
}
#[typeshare]
pub type ListTerminalsResponse = Vec<Terminal>;

View File

@@ -1,181 +1,61 @@
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::api::write::TerminalRecreateMode;
use crate::entities::terminal::{
ContainerTerminalMode, TerminalRecreateMode, TerminalTarget,
};
/// Query to connect to a terminal (interactive shell over websocket) on the given server.
/// Connect to a Terminal.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectTerminalQuery {
/// Server Id or name
pub server: String,
/// Each periphery can keep multiple terminals open.
/// If a terminals with the specified name does not exist,
/// the call will fail.
/// Create a terminal using [CreateTerminal][super::write::server::CreateTerminal]
pub terminal: String,
/// The target to create terminal for.
pub target: TerminalTarget,
/// Terminal name to connect to.
/// If it may not exist yet, also pass 'init' params
/// to include initialization.
/// Default: Depends on target.
pub terminal: Option<String>,
/// Pass to init the terminal session
/// for when the terminal doesn't already exist.
///
/// Example: ?...(query)&init[command]=bash&init[recreate]=DifferentCommand
pub init: Option<InitTerminal>,
}
/// Args to init the Terminal if needed.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct InitTerminal {
/// The shell command (eg `bash`) to init the shell.
///
/// Default:
/// - Server: Configured on each Periphery
/// - Container: `sh`
pub command: Option<String>,
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
/// Only relevant for container-type terminals.
/// Specify the container terminal mode (`exec` or `attach`).
/// Default: `exec`
pub mode: Option<ContainerTerminalMode>,
}
/// Execute a terminal command on the given server.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExecuteTerminalBody {
/// Server Id or name
pub server: String,
/// The name of the terminal on the server to use to execute.
pub terminal: String,
/// The target to create terminal for.
pub target: TerminalTarget,
/// Terminal name to connect to.
/// If it may not exist yet, also pass 'init' params
/// to include initialization.
/// Default: Depends on target.
pub terminal: Option<String>,
/// The command to execute.
pub command: String,
/// Pass to init the terminal session
/// for when the terminal doesn't already exist.
pub init: Option<InitTerminal>,
}
/// Init a terminal on the server.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct InitTerminal {
/// The shell command (eg `bash`) to init the shell.
///
/// This can also include args:
/// `docker exec -it container sh`
///
/// Default: Configured on each Periphery
pub command: Option<String>,
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
}
/// Query to connect to a container exec session (interactive shell over websocket) on the given server.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectContainerExecQuery {
/// Server Id or name
pub server: String,
/// The container name
pub container: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Query to connect to a container attach session (interactive shell over websocket) on the given server.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectContainerAttachQuery {
/// Server Id or name
pub server: String,
/// The container name
pub container: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Execute a command in the given containers shell.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExecuteContainerExecBody {
/// Server Id or name
pub server: String,
/// The container name
pub container: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// The command to execute.
pub command: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Query to connect to a container exec session (interactive shell over websocket) on the given Deployment.
/// This call will use access to the Deployment Terminal to permission the call.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectDeploymentExecQuery {
/// Deployment Id or name
pub deployment: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Query to connect to a container attach session (interactive shell over websocket) on the given Deployment.
/// This call will use access to the Deployment Terminal to permission the call.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectDeploymentAttachQuery {
/// Deployment Id or name
pub deployment: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Execute a command in the given containers shell.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExecuteDeploymentExecBody {
/// Deployment Id or name
pub deployment: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// The command to execute.
pub command: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Query to connect to a container exec session (interactive shell over websocket) on the given Stack / service.
/// This call will use access to the Stack Terminal to permission the call.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectStackExecQuery {
/// Stack Id or name
pub stack: String,
/// The service name to connect to
pub service: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Query to connect to a container attach session (interactive shell over websocket) on the given Stack / service.
/// This call will use access to the Stack Terminal to permission the call.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ConnectStackAttachQuery {
/// Stack Id or name
pub stack: String,
/// The service name to attach to
pub service: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}
/// Execute a command in the given containers shell.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ExecuteStackExecBody {
/// Stack Id or name
pub stack: String,
/// The service name to connect to
pub service: String,
/// The shell to use (eg. `sh` or `bash`)
pub shell: String,
/// The command to execute.
pub command: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
pub recreate: Option<TerminalRecreateMode>,
}

View File

@@ -15,6 +15,7 @@ mod server;
mod stack;
mod sync;
mod tags;
mod terminal;
mod user;
mod user_group;
mod variable;
@@ -36,6 +37,7 @@ pub use server::*;
pub use stack::*;
pub use sync::*;
pub use tags::*;
pub use terminal::*;
pub use user::*;
pub use user_group::*;
pub use variable::*;

View File

@@ -4,8 +4,7 @@ use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::entities::{
NoData,
server::{_PartialServerConfig, Server, ServerQuery},
server::{_PartialServerConfig, Server},
update::Update,
};
@@ -131,100 +130,6 @@ pub struct CreateNetwork {
//
/// Configures the behavior of [CreateTerminal] if the
/// specified terminal name already exists.
#[typeshare]
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)]
pub enum TerminalRecreateMode {
/// Never kill the old terminal if it already exists.
/// If the command is different, returns error.
#[default]
Never,
/// Always kill the old terminal and create new one
Always,
/// Only kill and recreate if the command is different.
DifferentCommand,
}
/// Create a terminal on the server.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct CreateTerminal {
/// Server Id or name
pub server: String,
/// The name of the terminal on the server to create.
pub name: String,
/// The shell command (eg `bash`) to init the shell.
///
/// This can also include args:
/// `docker exec -it container sh`
///
/// Default: Configured on each Periphery
pub command: Option<String>,
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
}
//
/// Delete a terminal on the server.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct DeleteTerminal {
/// Server Id or name
pub server: String,
/// The name of the terminal on the server to delete.
pub terminal: String,
}
//
/// Delete all terminals on the server.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct DeleteAllTerminals {
/// Server Id or name
pub server: String,
}
//
/// Delete all terminals on many or all Servers.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct BatchDeleteAllTerminals {
/// optional structured query to filter servers.
#[serde(default)]
pub query: ServerQuery,
}
//
/// Updates the Server with an explicit Public Key.
/// Response: [Update]
#[typeshare]

View File

@@ -0,0 +1,100 @@
use derive_empty_traits::EmptyTraits;
use resolver_api::Resolve;
use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::entities::{
NoData,
server::ServerQuery,
terminal::{
ContainerTerminalMode, TerminalRecreateMode, TerminalTarget,
},
};
use super::KomodoWriteRequest;
//
/// Create a Terminal.
/// Requires minimum Read + Terminal permission on the target Resource.
/// Response: [NoData]
#[typeshare]
#[derive(
Debug, Clone, Serialize, Deserialize, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct CreateTerminal {
/// A name for the Terminal session.
pub name: String,
/// The target to create terminal for
pub target: TerminalTarget,
/// The shell command (eg `bash`) to init the shell.
///
/// Default:
/// - Server: Configured on each Periphery
/// - ContainerExec: `sh`
/// - Attach: unused
pub command: Option<String>,
/// For container terminals, choose 'exec' or 'attach'.
///
/// Default
/// - Server: ignored
/// - Container / Stack / Deployment: `exec`
pub mode: Option<ContainerTerminalMode>,
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
}
//
/// Delete a terminal.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct DeleteTerminal {
/// Server / Container / Stack / Deployment
pub target: TerminalTarget,
/// The name of the Terminal to delete.
pub terminal: String,
}
//
/// Delete all Terminals on the Server.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct DeleteAllTerminals {
/// Server Id or name
pub server: String,
}
//
/// Delete all terminals on many or all Servers.
/// Response: [NoData]
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
)]
#[empty_traits(KomodoWriteRequest)]
#[response(NoData)]
#[error(serror::Error)]
pub struct BatchDeleteAllTerminals {
/// Optional structured query to filter servers.
#[serde(default)]
pub query: ServerQuery,
}

View File

@@ -7,7 +7,7 @@ use crate::api::execute::Execution;
pub mod container;
pub mod database;
pub mod list;
pub mod ssh;
pub mod terminal;
pub mod update;
#[derive(Debug, clap::Parser)]
@@ -52,14 +52,6 @@ pub enum Command {
unsanitized: bool,
},
/// Container info (aliases: `ps`, `cn`, `containers`)
#[clap(alias = "ps", alias = "cn", alias = "containers")]
Container(container::Container),
/// Inspect containers (alias: `i`)
#[clap(alias = "i")]
Inspect(container::InspectContainer),
/// List Komodo resources (aliases: `ls`, `resources`)
#[clap(alias = "ls", alias = "resources")]
List(list::List),
@@ -81,7 +73,23 @@ pub enum Command {
command: update::UpdateCommand,
},
Ssh(ssh::Ssh),
/// Container info (aliases: `ps`, `cn`, `containers`)
#[clap(alias = "ps", alias = "cn", alias = "containers")]
Container(container::Container),
/// Inspect containers (alias: `i`)
#[clap(alias = "i")]
Inspect(container::InspectContainer),
/// Connect to Server Terminals. (alias: `ssh`)
#[clap(alias = "ssh")]
Connect(terminal::Connect),
/// Connect to Container Terminals. `docker exec` analogue.
Exec(terminal::Exec),
/// Attach to Container Terminals. `docker attach` analogue.
Attach(terminal::Attach),
/// Private-Public key utilities. (alias: `k`)
#[clap(alias = "k")]

View File

@@ -1,17 +0,0 @@
#[derive(Debug, Clone, clap::Parser)]
pub struct Ssh {
/// The server to connect to.
pub server: String,
/// Custom command to use to start the shell, eg `bash`.
/// Defaults to Periphery default.
pub command: Option<String>,
/// The terminal name to connect to. Default: `ssh`
#[arg(long, short = 'n', default_value_t = String::from("ssh"))]
pub name: String,
/// Force fresh terminal to replace existing one.
#[arg(long, short = 'r', default_value_t = false)]
pub recreate: bool,
}

View File

@@ -0,0 +1,49 @@
#[derive(Debug, Clone, clap::Parser)]
pub struct Connect {
/// The server to connect to.
pub server: String,
/// Custom command to use to start the shell, eg `bash`.
/// Defaults to Periphery default.
pub command: Option<String>,
/// The terminal name to connect to. Default: `ssh`
#[arg(long, short = 'n', default_value_t = String::from("ssh"))]
pub name: String,
/// Force fresh terminal to replace existing one.
#[arg(long, short = 'r', default_value_t = false)]
pub recreate: bool,
}
#[derive(Debug, Clone, clap::Parser)]
pub struct Exec {
/// The container (name) to connect to.
/// Will error if matches multiple containers but no Server is defined.
pub container: String,
/// The shell, eg `bash`.
pub shell: String,
/// Specify Server.
/// Required if multiple servers have same container name.
/// (alias: `s`)
#[arg(long, short = 's')]
pub server: Option<String>,
/// Force fresh terminal to replace existing one.
#[arg(long, short = 'r', default_value_t = false)]
pub recreate: bool,
}
#[derive(Debug, Clone, clap::Parser)]
pub struct Attach {
/// The container (name) to attach to.
/// Will error if matches multiple containers but no Server is defined.
pub container: String,
/// Specify Server.
/// Required if multiple servers have same container name.
/// (alias: `s`)
#[arg(long, short = 's')]
pub server: Option<String>,
/// Force fresh terminal to replace existing one.
#[arg(long, short = 'r', default_value_t = false)]
pub recreate: bool,
}

View File

@@ -66,6 +66,8 @@ pub mod stats;
pub mod sync;
/// Subtypes of [Tag][tag::Tag].
pub mod tag;
pub mod terminal;
/// Subtypes of [ResourcesToml][toml::ResourcesToml].
pub mod toml;
/// Subtypes of [Update][update::Update].
@@ -605,28 +607,6 @@ pub struct RepoExecutionResponse {
pub commit_message: Option<String>,
}
#[typeshare]
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
Hash,
Default,
Serialize,
Deserialize,
Display,
EnumString,
)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ContainerTerminalMode {
#[default]
Exec,
Attach,
}
#[typeshare]
#[derive(
Debug,

View File

@@ -246,6 +246,47 @@ impl PermissionLevel {
}
impl PermissionLevelAndSpecifics {
/// Elevates self by permissions in other:
/// - If other.level > self.level, set self.level = other.level
/// - If other includes more specifics, add them.
pub fn elevate(&mut self, other: &impl HasLevelAndSpecific) {
let other_level = other.level();
if other_level > self.level {
self.level = other_level;
}
self.specific.extend(other.specific().iter().cloned());
}
/// Joins permissions in self with other to produce a new PermissionsLevelAndSpecifics:
/// - If other.level > self.level, set self.level = other.level
/// - If other includes more specifics, add them.
pub fn join(
&self,
other: &impl HasLevelAndSpecific,
) -> PermissionLevelAndSpecifics {
let mut specific = self.specific.clone();
specific.extend(other.specific().iter().cloned());
PermissionLevelAndSpecifics {
level: std::cmp::max(self.level, other.level()),
specific,
}
}
/// Joins permissions in self with other to produce a new PermissionsLevelAndSpecifics:
/// - If other.level > self.level, set self.level = other.level
/// - If other includes more specifics, add them.
pub fn join_permission(
&self,
other: &Permission,
) -> PermissionLevelAndSpecifics {
let mut specific = self.specific.clone();
specific.extend(other.specific.iter().cloned());
PermissionLevelAndSpecifics {
level: std::cmp::max(self.level, other.level),
specific,
}
}
/// Returns true when self.level >= other.level,
/// and has all required specific permissions.
pub fn fulfills(
@@ -332,3 +373,26 @@ impl PermissionLevelAndSpecifics {
self.specific(SpecificPermission::Processes)
}
}
pub trait HasLevelAndSpecific {
fn level(&self) -> PermissionLevel;
fn specific(&self) -> &IndexSet<SpecificPermission>;
}
impl HasLevelAndSpecific for Permission {
fn level(&self) -> PermissionLevel {
self.level
}
fn specific(&self) -> &IndexSet<SpecificPermission> {
&self.specific
}
}
impl HasLevelAndSpecific for PermissionLevelAndSpecifics {
fn level(&self) -> PermissionLevel {
self.level
}
fn specific(&self) -> &IndexSet<SpecificPermission> {
&self.specific
}
}

View File

@@ -1,4 +1,6 @@
use bson::{Document, doc};
use std::str::FromStr;
use bson::{Document, doc, oid::ObjectId};
use clap::ValueEnum;
use derive_builder::Builder;
use derive_default_builder::DefaultBuilder;
@@ -166,8 +168,12 @@ impl AddFilters for () {}
impl<T: AddFilters + Default> AddFilters for ResourceQuery<T> {
fn add_filters(&self, filters: &mut Document) {
if !self.names.is_empty() {
filters.insert("name", doc! { "$in": &self.names });
let (ids, names) = split_names(&self.names);
if !ids.is_empty() {
filters.insert("_id", doc! { "$in": ids });
}
if !names.is_empty() {
filters.insert("name", doc! { "$in": names });
}
match self.templates {
TemplatesQueryBehavior::Exclude => {
@@ -198,3 +204,18 @@ impl<T: AddFilters + Default> AddFilters for ResourceQuery<T> {
self.specific.add_filters(filters);
}
}
/// Returns (ids, names)
fn split_names(
names_or_ids: &[String],
) -> (Vec<ObjectId>, Vec<&String>) {
let mut ids = Vec::new();
let mut names = Vec::new();
for name in names_or_ids {
match ObjectId::from_str(name) {
Ok(id) => ids.push(id),
Err(_) => names.push(name),
}
}
(ids, names)
}

View File

@@ -10,7 +10,7 @@ use crate::{
deserializers::{
option_string_list_deserializer, string_list_deserializer,
},
entities::{I64, MaintenanceWindow, Timelength},
entities::{MaintenanceWindow, Timelength},
};
use super::{
@@ -364,62 +364,6 @@ pub struct PeripheryInformation {
pub public_ip: Option<String>,
}
/// Info about an active terminal on a server.
/// Retrieve with [ListTerminals][crate::api::read::server::ListTerminals].
#[typeshare]
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
pub struct TerminalInfo {
/// The name of the terminal.
pub name: String,
/// The root program / args of the pty
pub command: String,
/// The size of the terminal history in memory.
pub stored_size_kb: f64,
/// When the Terminal was created.
pub created_at: I64,
}
/// Info about an active terminal on a server.
/// Retrieve with [ListAllTerminals][crate::api::read::server::ListAllTerminals].
#[typeshare]
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
pub struct TerminalInfoWithServer {
/// The server id.
pub server_id: String,
/// The server name.
pub server_name: String,
/// The name of the terminal.
pub name: String,
/// The root program / args of the pty
pub command: String,
/// The size of the terminal history in memory.
pub stored_size_kb: f64,
/// When the Terminal was created in unix milliseconds.
pub created_at: I64,
}
impl TerminalInfoWithServer {
pub fn from_terminal_info(
server_id: impl Into<String>,
server_name: impl Into<String>,
TerminalInfo {
name,
command,
stored_size_kb,
created_at,
}: TerminalInfo,
) -> Self {
Self {
server_id: server_id.into(),
server_name: server_name.into(),
name,
command,
stored_size_kb,
created_at,
}
}
}
/// Current pending actions on the server.
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Default)]

View File

@@ -0,0 +1,219 @@
use anyhow::{Context as _, anyhow};
use derive_variants::EnumVariants;
use serde::{Deserialize, Serialize};
use strum::AsRefStr;
use tokio_tungstenite::tungstenite;
use typeshare::typeshare;
use crate::entities::I64;
/// Represents an active terminal on a server.
/// Retrieve with [ListTerminals][crate::api::read::server::ListTerminals].
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Terminal {
/// The name of the terminal.
pub name: String,
/// The target resource of the Terminal.
pub target: TerminalTarget,
/// The command used to init the shell.
pub command: String,
/// The size of the terminal history in memory.
pub stored_size_kb: f64,
/// When the Terminal was created.
/// Unix timestamp milliseconds.
pub created_at: I64,
}
#[typeshare]
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize,
)]
#[serde(tag = "type", content = "params")]
pub enum TerminalTarget {
Server {
server: Option<String>,
},
Container {
server: String,
container: String,
},
Stack {
stack: String,
service: Option<String>,
},
Deployment {
deployment: String,
},
}
impl TerminalTarget {
// Checks for target match in a fixed server context.
pub fn matches_on_server(&self, other: &TerminalTarget) -> bool {
match (self, other) {
(
TerminalTarget::Server { .. },
TerminalTarget::Server { .. },
) => true,
(
TerminalTarget::Container {
container: target, ..
},
TerminalTarget::Container { container, .. },
) => target == container,
(
TerminalTarget::Stack { stack: target, .. },
TerminalTarget::Stack { stack, .. },
) => target == stack,
(
TerminalTarget::Deployment { deployment: target },
TerminalTarget::Deployment { deployment },
) => target == deployment,
_ => false,
}
}
}
/// Specify the container terminal mode (exec or attach)
#[typeshare]
#[derive(
Debug, Clone, Copy, Default, Serialize, Deserialize, AsRefStr,
)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ContainerTerminalMode {
#[default]
Exec,
Attach,
}
/// Configures the behavior of [CreateTerminal] if the
/// specified terminal name already exists.
#[typeshare]
#[derive(
Debug, Clone, Copy, Default, Serialize, Deserialize, AsRefStr,
)]
pub enum TerminalRecreateMode {
/// Never kill the old terminal if it already exists.
/// If the init command is different, returns error.
#[default]
Never,
/// Always kill the old terminal and create new one
Always,
/// Only kill and recreate if the command is different.
DifferentCommand,
}
/// JSON structure to send new terminal window dimensions
#[typeshare]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TerminalResizeMessage {
pub rows: u16,
pub cols: u16,
}
#[derive(Debug, Clone)]
pub struct TerminalMessage(Vec<u8>);
impl TerminalMessage {
/// Suitable to use for PTY stdout -> client messages.
pub fn from_raw(vec: Vec<u8>) -> Self {
Self(vec)
}
/// Suitable to use for PTY stdout -> client messages.
pub fn into_raw(self) -> Vec<u8> {
self.0
}
pub fn into_ws_message(self) -> tungstenite::Message {
tungstenite::Message::Binary(self.0.into())
}
/// Message sent from client -> PTY stdin could be
/// regular bytes, or resize message.
pub fn into_stdin_message(
self,
) -> anyhow::Result<TerminalStdinMessage> {
let mut bytes = self.0;
let variant_byte = bytes.pop().context(
"Failed to decode Terminal message | bytes are empty",
)?;
use TerminalStdinMessageVariant::*;
match TerminalStdinMessageVariant::from_byte(variant_byte)? {
Begin => Ok(TerminalStdinMessage::Begin),
Forward => Ok(TerminalStdinMessage::Forward(bytes)),
Resize => {
let message =
serde_json::from_slice::<TerminalResizeMessage>(&bytes)
.context("Invalid TerminalResizeMessage bytes")?;
Ok(TerminalStdinMessage::Resize(message))
}
}
}
}
/// This is message send from clients -> PTY stdin.
#[derive(Debug, EnumVariants)]
#[variant_derive(Debug, Clone, Copy)]
pub enum TerminalStdinMessage {
/// This triggers forwarding to begin.
Begin,
/// Forward these bytes as normal to PTY stdin.
Forward(Vec<u8>),
/// Resize the PTY dimensions based on client.
/// Clients should send this whenever its window resizes.
Resize(TerminalResizeMessage),
}
impl TerminalStdinMessage {
pub fn forward(bytes: Vec<u8>) -> Self {
Self::Forward(bytes)
}
pub fn into_terminal_message(
self,
) -> anyhow::Result<TerminalMessage> {
match self {
TerminalStdinMessage::Begin => Ok(TerminalMessage(vec![
TerminalStdinMessageVariant::Begin.as_byte(),
])),
TerminalStdinMessage::Forward(mut bytes) => {
bytes.push(TerminalStdinMessageVariant::Forward.as_byte());
Ok(TerminalMessage(bytes))
}
TerminalStdinMessage::Resize(message) => {
let mut bytes = serde_json::to_vec(&message).context(
"Failed to serialize TerminalResizeMessage to bytes",
)?;
bytes.push(TerminalStdinMessageVariant::Resize.as_byte());
Ok(TerminalMessage(bytes))
}
}
}
}
impl TerminalStdinMessageVariant {
pub fn from_byte(byte: u8) -> anyhow::Result<Self> {
use TerminalStdinMessageVariant::*;
let variant = match byte {
0x00 => Begin,
0x01 => Forward,
0xFF => Resize,
other => {
return Err(anyhow!(
"Got unrecognized TerminalStdinMessageVariant byte: {other}"
));
}
};
Ok(variant)
}
pub fn as_byte(self) -> u8 {
use TerminalStdinMessageVariant::*;
match self {
Begin => 0x00,
Forward => 0x01,
Resize => 0xFF,
}
}
}

View File

@@ -11,6 +11,7 @@ use typeshare::typeshare;
use crate::KomodoClient;
pub mod terminal;
pub mod update;
#[typeshare]
@@ -34,19 +35,6 @@ impl WsLoginMessage {
}
impl KomodoClient {
pub async fn connect_terminal_websocket(
&self,
server: &str,
terminal: &str,
) -> anyhow::Result<WebSocketStream<MaybeTlsStream<TcpStream>>> {
self
.connect_login_user_websocket(
"/terminal",
Some(&format!("server={server}&terminal={terminal}")),
)
.await
}
async fn connect_login_user_websocket(
&self,
path: &str,

View File

@@ -0,0 +1,238 @@
use anyhow::Context;
use bytes::Bytes;
use futures_util::{
SinkExt, StreamExt, TryStreamExt,
stream::{SplitSink, SplitStream},
};
use tokio::net::TcpStream;
use tokio_tungstenite::{
MaybeTlsStream, WebSocketStream, tungstenite,
};
use crate::{
KomodoClient,
api::terminal::{ConnectTerminalQuery, InitTerminal},
entities::terminal::{
TerminalResizeMessage, TerminalStdinMessage, TerminalTarget,
},
};
impl KomodoClient {
pub async fn connect_terminal(
&self,
query: &ConnectTerminalQuery,
) -> anyhow::Result<TerminalWebsocket> {
self
.connect_login_user_websocket(
"/terminal",
Some(&serde_qs::to_string(query)?),
)
.await
.map(TerminalWebsocket)
}
pub async fn connect_server_terminal(
&self,
server: String,
terminal: Option<String>,
init: Option<InitTerminal>,
) -> anyhow::Result<TerminalWebsocket> {
self
.connect_terminal(&ConnectTerminalQuery {
target: TerminalTarget::Server {
server: Some(server),
},
terminal,
init,
})
.await
}
pub async fn connect_container_terminal(
&self,
server: String,
container: String,
terminal: Option<String>,
init: Option<InitTerminal>,
) -> anyhow::Result<TerminalWebsocket> {
self
.connect_terminal(&ConnectTerminalQuery {
target: TerminalTarget::Container { server, container },
terminal,
init,
})
.await
}
pub async fn connect_stack_service_terminal(
&self,
stack: String,
service: String,
terminal: Option<String>,
init: Option<InitTerminal>,
) -> anyhow::Result<TerminalWebsocket> {
self
.connect_terminal(&ConnectTerminalQuery {
target: TerminalTarget::Stack {
stack,
service: Some(service),
},
terminal,
init,
})
.await
}
pub async fn connect_deployment_terminal(
&self,
deployment: String,
terminal: Option<String>,
init: Option<InitTerminal>,
) -> anyhow::Result<TerminalWebsocket> {
self
.connect_terminal(&ConnectTerminalQuery {
target: TerminalTarget::Deployment { deployment },
terminal,
init,
})
.await
}
}
pub type TerminalWebsocketInner =
WebSocketStream<MaybeTlsStream<TcpStream>>;
pub struct TerminalWebsocket(TerminalWebsocketInner);
impl TerminalWebsocket {
pub fn into_inner(self) -> TerminalWebsocketInner {
self.0
}
pub fn split(
self,
) -> (TerminalWebsocketSink, TerminalWebsocketStream) {
let (write, read) = self.0.split();
(TerminalWebsocketSink(write), TerminalWebsocketStream(read))
}
pub async fn send_stdin_message(
&mut self,
message: TerminalStdinMessage,
) -> anyhow::Result<()> {
let message = message.into_terminal_message()?.into_ws_message();
self
.0
.send(message)
.await
.context("Failed to forward stdin message")
}
pub async fn send_stdin_bytes(
&mut self,
bytes: Vec<u8>,
) -> anyhow::Result<()> {
self
.send_stdin_message(TerminalStdinMessage::Forward(bytes))
.await
}
pub async fn send_resize_bytes(
&mut self,
resize: TerminalResizeMessage,
) -> anyhow::Result<()> {
self
.send_stdin_message(TerminalStdinMessage::Resize(resize))
.await
}
pub async fn receive_stdout(
&mut self,
) -> anyhow::Result<Option<Bytes>> {
loop {
match self.0.try_next().await.context("Websocket read error")? {
Some(tungstenite::Message::Binary(bytes)) => {
return Ok(Some(bytes));
}
Some(tungstenite::Message::Text(text)) => {
return Ok(Some(text.into()));
}
Some(tungstenite::Message::Close(_)) | None => {
return Ok(None);
}
// Can ignore these message types
Some(tungstenite::Message::Ping(_))
| Some(tungstenite::Message::Pong(_))
| Some(tungstenite::Message::Frame(_)) => continue,
}
}
}
}
pub type TerminalWebsocketSinkInner = SplitSink<
WebSocketStream<MaybeTlsStream<TcpStream>>,
tungstenite::Message,
>;
pub struct TerminalWebsocketSink(TerminalWebsocketSinkInner);
impl TerminalWebsocketSink {
pub async fn send_stdin_message(
&mut self,
message: TerminalStdinMessage,
) -> anyhow::Result<()> {
let message = message.into_terminal_message()?.into_ws_message();
self
.0
.send(message)
.await
.context("Failed to forward stdin message")
}
pub async fn send_stdin_bytes(
&mut self,
bytes: Vec<u8>,
) -> anyhow::Result<()> {
self
.send_stdin_message(TerminalStdinMessage::Forward(bytes))
.await
}
pub async fn send_resize_bytes(
&mut self,
resize: TerminalResizeMessage,
) -> anyhow::Result<()> {
self
.send_stdin_message(TerminalStdinMessage::Resize(resize))
.await
}
}
pub type TerminalWebsocketStreamInner =
SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>;
pub struct TerminalWebsocketStream(TerminalWebsocketStreamInner);
impl TerminalWebsocketStream {
pub async fn receive_stdout(
&mut self,
) -> anyhow::Result<Option<Bytes>> {
loop {
match self.0.try_next().await.context("Websocket read error")? {
Some(tungstenite::Message::Binary(bytes)) => {
return Ok(Some(bytes));
}
Some(tungstenite::Message::Text(text)) => {
return Ok(Some(text.into()));
}
Some(tungstenite::Message::Close(_)) | None => {
return Ok(None);
}
// Can ignore these message types
Some(tungstenite::Message::Ping(_))
| Some(tungstenite::Message::Pong(_))
| Some(tungstenite::Message::Frame(_)) => continue,
}
}
}
}

View File

@@ -5,12 +5,7 @@ import {
UserResponses,
WriteResponses,
} from "./responses.js";
import {
terminal_methods,
ConnectExecQuery,
ExecuteExecBody,
TerminalCallbacks,
} from "./terminal.js";
import { terminal_methods, TerminalCallbacks } from "./terminal.js";
import {
AuthRequest,
BatchExecutionResponse,
@@ -28,7 +23,7 @@ import {
export * as Types from "./types.js";
export type { ConnectExecQuery, ExecuteExecBody, TerminalCallbacks };
export type { TerminalCallbacks };
export type InitOptions =
| { type: "jwt"; params: { jwt: string } }
@@ -331,20 +326,10 @@ export function KomodoClient(url: string, options: InitOptions) {
connect_terminal,
execute_terminal,
execute_terminal_stream,
connect_exec,
connect_attach,
connect_container_exec,
connect_container_attach,
execute_container_exec,
execute_container_exec_stream,
connect_deployment_exec,
connect_deployment_attach,
execute_deployment_exec,
execute_deployment_exec_stream,
connect_stack_exec,
connect_stack_attach,
execute_stack_exec,
execute_stack_exec_stream,
execute_server_terminal,
execute_container_terminal,
execute_stack_service_terminal,
execute_deployment_terminal,
} = terminal_methods(url, state);
return {
@@ -448,15 +433,24 @@ export function KomodoClient(url: string, options: InitOptions) {
*/
connect_terminal,
/**
* Executes a command on a given Server / terminal,
* and gives a callback to handle the output as it comes in.
* Executes a command on a given target / terminal,
* and gives callbacks to handle the output as it comes in.
*
* ```ts
* await komodo.execute_terminal(
* {
* server: "my-server",
* target: {
* type: "Server",
* params: {
* server: "my-server"
* }
* },
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* },
* {
* onLine: (line) => console.log(line),
@@ -467,7 +461,7 @@ export function KomodoClient(url: string, options: InitOptions) {
*/
execute_terminal,
/**
* Executes a command on a given Server / terminal,
* Executes a command on a given target / terminal,
* and returns a stream to process the output as it comes in.
*
* Note. The final line of the stream will usually be
@@ -479,9 +473,18 @@ export function KomodoClient(url: string, options: InitOptions) {
*
* ```ts
* const stream = await komodo.execute_terminal_stream({
* server: "my-server",
* target: {
* type: "Server",
* params: {
* server: "my-server"
* }
* },
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* });
*
* for await (const line of stream) {
@@ -491,42 +494,19 @@ export function KomodoClient(url: string, options: InitOptions) {
*/
execute_terminal_stream,
/**
* Subscribes to container exec io over websocket message,
* for use with xtermjs. Can connect to container on a Server,
* or associated with a Deployment or Stack.
* Terminal permission on connecting resource required.
*/
connect_exec,
/**
* Subscribes to container attach io over websocket message,
* for use with xtermjs. Can attach to container on a Server,
* or associated with a Deployment or Stack.
* Terminal permission on connecting resource required.
*/
connect_attach,
/**
* Subscribes to container exec io over websocket message,
* for use with xtermjs. Can connect to Container on a Server.
* Server Terminal permission required.
*/
connect_container_exec,
/**
* Subscribes to container attach io over websocket message,
* for use with xtermjs. Can attach to Container on a Server.
* Server Terminal permission required.
*/
connect_container_attach,
/**
* Executes a command on a given container,
* and gives a callback to handle the output as it comes in.
* Executes a command on a given Server / terminal,
* and gives callbacks to handle the output as it comes in.
*
* ```ts
* await komodo.execute_container_exec(
* await komodo.execute_server_terminal(
* {
* server: "my-server",
* container: "name",
* shell: "bash",
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* },
* {
* onLine: (line) => console.log(line),
@@ -535,54 +515,22 @@ export function KomodoClient(url: string, options: InitOptions) {
* );
* ```
*/
execute_container_exec,
execute_server_terminal,
/**
* Executes a command on a given container,
* and returns a stream to process the output as it comes in.
*
* Note. The final line of the stream will usually be
* `__KOMODO_EXIT_CODE__:0`. The number
* is the exit code of the command.
*
* If this line is NOT present, it means the stream
* was terminated early, ie like running `exit`.
* Executes a command on a given Server / Container / terminal,
* and gives callbacks to handle the output as it comes in.
*
* ```ts
* const stream = await komodo.execute_container_exec_stream({
* server: "my-server",
* container: "name",
* shell: "bash",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* });
*
* for await (const line of stream) {
* console.log(line);
* }
* ```
*/
execute_container_exec_stream,
/**
* Subscribes to deployment container exec io over websocket message,
* for use with xtermjs. Can connect to Deployment container.
* Deployment Terminal permission required.
*/
connect_deployment_exec,
/**
* Subscribes to deployment container attach io over websocket message,
* for use with xtermjs. Can attach to Deployment container.
* Deployment Terminal permission required.
*/
connect_deployment_attach,
/**
* Executes a command on a given deployment container,
* and gives a callback to handle the output as it comes in.
*
* ```ts
* await komodo.execute_deployment_exec(
* await komodo.execute_container_terminal(
* {
* deployment: "my-deployment",
* shell: "bash",
* server: "my-server",
* container: "my-container",
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* },
* {
* onLine: (line) => console.log(line),
@@ -591,54 +539,22 @@ export function KomodoClient(url: string, options: InitOptions) {
* );
* ```
*/
execute_deployment_exec,
execute_container_terminal,
/**
* Executes a command on a given deployment container,
* and returns a stream to process the output as it comes in.
*
* Note. The final line of the stream will usually be
* `__KOMODO_EXIT_CODE__:0`. The number
* is the exit code of the command.
*
* If this line is NOT present, it means the stream
* was terminated early, ie like running `exit`.
* Executes a command on a given Stack / service / terminal,
* and gives callbacks to handle the output as it comes in.
*
* ```ts
* const stream = await komodo.execute_deployment_exec_stream({
* deployment: "my-deployment",
* shell: "bash",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* });
*
* for await (const line of stream) {
* console.log(line);
* }
* ```
*/
execute_deployment_exec_stream,
/**
* Subscribes to container exec io over websocket message,
* for use with xtermjs. Can connect to Stack service container.
* Stack Terminal permission required.
*/
connect_stack_exec,
/**
* Subscribes to container attach io over websocket message,
* for use with xtermjs. Can attach to Stack service container.
* Stack Terminal permission required.
*/
connect_stack_attach,
/**
* Executes a command on a given stack service container,
* and gives a callback to handle the output as it comes in.
*
* ```ts
* await komodo.execute_stack_exec(
* await komodo.execute_stack_service_terminal(
* {
* stack: "my-stack",
* service: "database"
* shell: "bash",
* service: "my-service",
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* },
* {
* onLine: (line) => console.log(line),
@@ -647,31 +563,29 @@ export function KomodoClient(url: string, options: InitOptions) {
* );
* ```
*/
execute_stack_exec,
execute_stack_service_terminal,
/**
* Executes a command on a given stack service container,
* and returns a stream to process the output as it comes in.
*
* Note. The final line of the stream will usually be
* `__KOMODO_EXIT_CODE__:0`. The number
* is the exit code of the command.
*
* If this line is NOT present, it means the stream
* was terminated early, ie like running `exit`.
* Executes a command on a given Deployment / terminal,
* and gives callbacks to handle the output as it comes in.
*
* ```ts
* const stream = await komodo.execute_stack_exec_stream({
* stack: "my-stack",
* service: "service1",
* shell: "bash",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* });
*
* for await (const line of stream) {
* console.log(line);
* }
* await komodo.execute_deployment_terminal(
* {
* deployment: "my-deployemnt",
* terminal: "name",
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
* init: {
* command: "bash",
* recreate: Types.TerminalRecreateMode.Always
* }
* },
* {
* onLine: (line) => console.log(line),
* onFinish: (code) => console.log("Finished:", code),
* }
* );
* ```
*/
execute_stack_exec_stream,
execute_deployment_terminal,
};
}

View File

@@ -63,7 +63,6 @@ export type ReadResponses = {
ListServers: Types.ListServersResponse;
ListFullServers: Types.ListFullServersResponse;
ListTerminals: Types.ListTerminalsResponse;
ListAllTerminals: Types.ListAllTerminalsResponse;
// ==== DOCKER ====
GetDockerContainersSummary: Types.GetDockerContainersSummaryResponse;
@@ -217,10 +216,6 @@ export type WriteResponses = {
UpdateServer: Types.Server;
RenameServer: Types.Update;
CreateNetwork: Types.Update;
CreateTerminal: Types.NoData;
DeleteTerminal: Types.NoData;
DeleteAllTerminals: Types.NoData;
BatchDeleteAllTerminals: Types.NoData;
UpdateServerPublicKey: Types.Update;
RotateServerKeys: Types.Update;
@@ -296,6 +291,12 @@ export type WriteResponses = {
WriteSyncFileContents: Types.Update;
RefreshResourceSyncPending: Types.ResourceSync;
// ==== TERMINAL ====
CreateTerminal: Types.NoData;
DeleteTerminal: Types.NoData;
DeleteAllTerminals: Types.NoData;
BatchDeleteAllTerminals: Types.NoData;
// ==== TAG ====
CreateTag: Types.Tag;
DeleteTag: Types.Tag;

View File

@@ -1,16 +1,9 @@
import { ClientState } from "./lib";
import { ClientState, Types } from "./lib";
import {
ConnectContainerAttachQuery,
ConnectContainerExecQuery,
ConnectDeploymentAttachQuery,
ConnectDeploymentExecQuery,
ConnectStackAttachQuery,
ConnectStackExecQuery,
ConnectTerminalQuery,
ExecuteContainerExecBody,
ExecuteDeploymentExecBody,
ExecuteStackExecBody,
ExecuteTerminalBody,
InitTerminal,
TerminalTarget,
WsLoginMessage,
} from "./types";
@@ -21,48 +14,6 @@ export type TerminalCallbacks = {
on_close?: () => void;
};
export type ConnectExecQuery =
| {
type: "container";
query: ConnectContainerExecQuery;
}
| {
type: "deployment";
query: ConnectDeploymentExecQuery;
}
| {
type: "stack";
query: ConnectStackExecQuery;
};
export type ConnectAttachQuery =
| {
type: "container";
query: ConnectContainerAttachQuery;
}
| {
type: "deployment";
query: ConnectDeploymentAttachQuery;
}
| {
type: "stack";
query: ConnectStackAttachQuery;
};
export type ExecuteExecBody =
| {
type: "container";
body: ExecuteContainerExecBody;
}
| {
type: "deployment";
body: ExecuteDeploymentExecBody;
}
| {
type: "stack";
body: ExecuteStackExecBody;
};
export type ExecuteCallbacks = {
onLine?: (line: string) => void | Promise<void>;
onFinish?: (code: string) => void | Promise<void>;
@@ -70,7 +21,7 @@ export type ExecuteCallbacks = {
export const terminal_methods = (url: string, state: ClientState) => {
const connect_terminal = ({
query,
query: { target, terminal, init },
on_message,
on_login,
on_open,
@@ -78,9 +29,19 @@ export const terminal_methods = (url: string, state: ClientState) => {
}: {
query: ConnectTerminalQuery;
} & TerminalCallbacks) => {
const url_query = new URLSearchParams(
query as any as Record<string, string>
).toString();
let url_query = connect_terminal_target_query(target);
if (terminal) {
url_query += `&terminal=${terminal}`;
}
if (init?.command) {
url_query += `&init[command]=${init.command}`;
}
if (init?.recreate) {
url_query += `&init[recreate]=${init.recreate}`;
}
if (init?.mode) {
url_query += `&init[mode]=${init.mode}`;
}
const ws = new WebSocket(
url.replace("http", "ws") + "/ws/terminal?" + url_query
);
@@ -140,200 +101,6 @@ export const terminal_methods = (url: string, state: ClientState) => {
const execute_terminal_stream = (request: ExecuteTerminalBody) =>
execute_stream("/terminal/execute", request);
const connect_container_exec = ({
query,
...callbacks
}: {
query: ConnectContainerExecQuery;
} & TerminalCallbacks) =>
connect_exec({ query: { type: "container", query }, ...callbacks });
const connect_deployment_exec = ({
query,
...callbacks
}: {
query: ConnectDeploymentExecQuery;
} & TerminalCallbacks) =>
connect_exec({ query: { type: "deployment", query }, ...callbacks });
const connect_stack_exec = ({
query,
...callbacks
}: {
query: ConnectStackExecQuery;
} & TerminalCallbacks) =>
connect_exec({ query: { type: "stack", query }, ...callbacks });
const connect_exec = ({
query: { type, query },
on_message,
on_login,
on_open,
on_close,
}: {
query: ConnectExecQuery;
} & TerminalCallbacks) => {
const url_query = new URLSearchParams(
query as any as Record<string, string>
).toString();
const ws = new WebSocket(
url.replace("http", "ws") + `/ws/${type}/terminal?` + url_query
);
// Handle login on websocket open
ws.onopen = () => {
const login_msg: WsLoginMessage = state.jwt
? {
type: "Jwt",
params: {
jwt: state.jwt,
},
}
: {
type: "ApiKeys",
params: {
key: state.key!,
secret: state.secret!,
},
};
ws.send(JSON.stringify(login_msg));
on_open?.();
};
ws.onmessage = (e) => {
if (e.data == "LOGGED_IN") {
ws.binaryType = "arraybuffer";
ws.onmessage = (e) => on_message?.(e);
on_login?.();
return;
} else {
on_message?.(e);
}
};
ws.onclose = () => on_close?.();
return ws;
};
const connect_container_attach = ({
query,
...callbacks
}: {
query: ConnectContainerAttachQuery;
} & TerminalCallbacks) =>
connect_attach({ query: { type: "container", query }, ...callbacks });
const connect_deployment_attach = ({
query,
...callbacks
}: {
query: ConnectDeploymentAttachQuery;
} & TerminalCallbacks) =>
connect_attach({ query: { type: "deployment", query }, ...callbacks });
const connect_stack_attach = ({
query,
...callbacks
}: {
query: ConnectStackAttachQuery;
} & TerminalCallbacks) =>
connect_attach({ query: { type: "stack", query }, ...callbacks });
const connect_attach = ({
query: { type, query },
on_message,
on_login,
on_open,
on_close,
}: {
query: ConnectAttachQuery;
} & TerminalCallbacks) => {
const url_query = new URLSearchParams(
query as any as Record<string, string>
).toString();
const ws = new WebSocket(
url.replace("http", "ws") + `/ws/${type}/terminal/attach?` + url_query
);
// Handle login on websocket open
ws.onopen = () => {
const login_msg: WsLoginMessage = state.jwt
? {
type: "Jwt",
params: {
jwt: state.jwt,
},
}
: {
type: "ApiKeys",
params: {
key: state.key!,
secret: state.secret!,
},
};
ws.send(JSON.stringify(login_msg));
on_open?.();
};
ws.onmessage = (e) => {
if (e.data == "LOGGED_IN") {
ws.binaryType = "arraybuffer";
ws.onmessage = (e) => on_message?.(e);
on_login?.();
return;
} else {
on_message?.(e);
}
};
ws.onclose = () => on_close?.();
return ws;
};
const execute_container_exec = (
body: ExecuteContainerExecBody,
callbacks?: ExecuteCallbacks
) => execute_exec({ type: "container", body }, callbacks);
const execute_deployment_exec = (
body: ExecuteDeploymentExecBody,
callbacks?: ExecuteCallbacks
) => execute_exec({ type: "deployment", body }, callbacks);
const execute_stack_exec = (
body: ExecuteStackExecBody,
callbacks?: ExecuteCallbacks
) => execute_exec({ type: "stack", body }, callbacks);
const execute_exec = async (
request: ExecuteExecBody,
callbacks?: ExecuteCallbacks
) => {
const stream = await execute_exec_stream(request);
for await (const line of stream) {
if (line.startsWith("__KOMODO_EXIT_CODE")) {
await callbacks?.onFinish?.(line.split(":")[1]);
return;
} else {
await callbacks?.onLine?.(line);
}
}
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
await callbacks?.onFinish?.("Early exit without code");
};
const execute_container_exec_stream = (body: ExecuteContainerExecBody) =>
execute_exec_stream({ type: "container", body });
const execute_deployment_exec_stream = (body: ExecuteDeploymentExecBody) =>
execute_exec_stream({ type: "deployment", body });
const execute_stack_exec_stream = (body: ExecuteStackExecBody) =>
execute_exec_stream({ type: "stack", body });
const execute_exec_stream = (request: ExecuteExecBody) =>
execute_stream(`/terminal/execute/${request.type}`, request.body);
const execute_stream = (path: string, request: any) =>
new Promise<AsyncIterable<string>>(async (res, rej) => {
try {
@@ -408,23 +175,134 @@ export const terminal_methods = (url: string, state: ClientState) => {
}
});
const execute_server_terminal = (
{
server,
terminal,
command,
init,
}: {
server: string;
terminal?: string;
command: string;
init?: InitTerminal;
},
callbacks?: ExecuteCallbacks
) =>
execute_terminal(
{
target: { type: "Server", params: { server } },
terminal,
command,
init,
},
callbacks
);
const execute_container_terminal = async (
{
server,
container,
terminal,
command,
init,
}: {
server: string;
container: string;
terminal?: string;
command: string;
init?: InitTerminal;
},
callbacks?: ExecuteCallbacks
) =>
execute_terminal(
{
target: { type: "Container", params: { server, container } },
terminal,
command,
init,
},
callbacks
);
const execute_stack_service_terminal = async (
{
stack,
service,
terminal,
command,
init,
}: {
stack: string;
service: string;
terminal?: string;
command: string;
init?: InitTerminal;
},
callbacks?: ExecuteCallbacks
) =>
execute_terminal(
{
target: { type: "Stack", params: { stack, service } },
terminal,
command,
init,
},
callbacks
);
const execute_deployment_terminal = async (
{
deployment,
terminal,
command,
init,
}: {
deployment: string;
terminal?: string;
command: string;
init?: InitTerminal;
},
callbacks?: ExecuteCallbacks
) =>
execute_terminal(
{
target: { type: "Deployment", params: { deployment } },
terminal,
command,
init,
},
callbacks
);
return {
connect_terminal,
execute_terminal,
execute_terminal_stream,
connect_exec,
connect_attach,
connect_container_exec,
connect_container_attach,
execute_container_exec,
execute_container_exec_stream,
connect_deployment_exec,
connect_deployment_attach,
execute_deployment_exec,
execute_deployment_exec_stream,
connect_stack_exec,
connect_stack_attach,
execute_stack_exec,
execute_stack_exec_stream,
// Convenience methods
execute_server_terminal,
execute_container_terminal,
execute_stack_service_terminal,
execute_deployment_terminal,
};
};
const connect_terminal_target_query = (target: TerminalTarget) => {
const base = `target[type]=${target.type}&`;
switch (target.type) {
case "Server":
return base + `target[params][server]=${target.params.server}`;
case "Container":
return (
base +
`target[params][server]=${target.params.server}&target[params][container]=${target.params.container}`
);
case "Stack":
return (
base +
`target[params][stack]=${target.params.stack}&target[params][service]=${target.params.service}`
);
case "Deployment":
return base + `target[params][deployment]=${target.params.deployment}`;
}
};

View File

@@ -3554,27 +3554,6 @@ export interface ContainerListItem {
export type ListAllDockerContainersResponse = ContainerListItem[];
/**
* Info about an active terminal on a server.
* Retrieve with [ListAllTerminals][crate::api::read::server::ListAllTerminals].
*/
export interface TerminalInfoWithServer {
/** The server id. */
server_id: string;
/** The server name. */
server_name: string;
/** The name of the terminal. */
name: string;
/** The root program / args of the pty */
command: string;
/** The size of the terminal history in memory. */
stored_size_kb: number;
/** When the Terminal was created in unix milliseconds. */
created_at: I64;
}
export type ListAllTerminalsResponse = TerminalInfoWithServer[];
/** An api key used to authenticate requests via request headers. */
export interface ApiKey {
/** Unique key associated with secret */
@@ -4110,22 +4089,43 @@ export type ListSystemProcessesResponse = SystemProcess[];
export type ListTagsResponse = Tag[];
export type TerminalTarget =
| { type: "Server", params: {
server?: string;
}}
| { type: "Container", params: {
server: string;
container: string;
}}
| { type: "Stack", params: {
stack: string;
service?: string;
}}
| { type: "Deployment", params: {
deployment: string;
}};
/**
* Info about an active terminal on a server.
* Represents an active terminal on a server.
* Retrieve with [ListTerminals][crate::api::read::server::ListTerminals].
*/
export interface TerminalInfo {
export interface Terminal {
/** The name of the terminal. */
name: string;
/** The root program / args of the pty */
/** The target resource of the Terminal. */
target: TerminalTarget;
/** The command used to init the shell. */
command: string;
/** The size of the terminal history in memory. */
stored_size_kb: number;
/** When the Terminal was created. */
/**
* When the Terminal was created.
* Unix timestamp milliseconds.
*/
created_at: I64;
}
export type ListTerminalsResponse = TerminalInfo[];
export type ListTerminalsResponse = Terminal[];
export type ListUserGroupsResponse = UserGroup[];
@@ -4383,7 +4383,7 @@ export interface BatchCloneRepo {
* Response: [NoData]
*/
export interface BatchDeleteAllTerminals {
/** optional structured query to filter servers. */
/** Optional structured query to filter servers. */
query?: ServerQuery;
}
@@ -4655,7 +4655,7 @@ export interface CommitSync {
export enum TerminalRecreateMode {
/**
* Never kill the old terminal if it already exists.
* If the command is different, returns error.
* If the init command is different, returns error.
*/
Never = "Never",
/** Always kill the old terminal and create new one */
@@ -4664,109 +4664,50 @@ export enum TerminalRecreateMode {
DifferentCommand = "DifferentCommand",
}
/** Query to connect to a container attach session (interactive shell over websocket) on the given server. */
export interface ConnectContainerAttachQuery {
/** Server Id or name */
server: string;
/** The container name */
container: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
/** Specify the container terminal mode (exec or attach) */
export enum ContainerTerminalMode {
Exec = "exec",
Attach = "attach",
}
/** Query to connect to a container exec session (interactive shell over websocket) on the given server. */
export interface ConnectContainerExecQuery {
/** Server Id or name */
server: string;
/** The container name */
container: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/** Args to init the Terminal if needed. */
export interface InitTerminal {
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
* The shell command (eg `bash`) to init the shell.
*
* Default:
* - Server: Configured on each Periphery
* - Container: `sh`
*/
command?: string;
/** Default: `Never` */
recreate?: TerminalRecreateMode;
/**
* Only relevant for container-type terminals.
* Specify the container terminal mode (`exec` or `attach`).
* Default: `exec`
*/
mode?: ContainerTerminalMode;
}
/**
* Query to connect to a container attach session (interactive shell over websocket) on the given Deployment.
* This call will use access to the Deployment Terminal to permission the call.
*/
export interface ConnectDeploymentAttachQuery {
/** Deployment Id or name */
deployment: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/**
* Query to connect to a container exec session (interactive shell over websocket) on the given Deployment.
* This call will use access to the Deployment Terminal to permission the call.
*/
export interface ConnectDeploymentExecQuery {
/** Deployment Id or name */
deployment: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/**
* Query to connect to a container attach session (interactive shell over websocket) on the given Stack / service.
* This call will use access to the Stack Terminal to permission the call.
*/
export interface ConnectStackAttachQuery {
/** Stack Id or name */
stack: string;
/** The service name to attach to */
service: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/**
* Query to connect to a container exec session (interactive shell over websocket) on the given Stack / service.
* This call will use access to the Stack Terminal to permission the call.
*/
export interface ConnectStackExecQuery {
/** Stack Id or name */
stack: string;
/** The service name to connect to */
service: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/** Query to connect to a terminal (interactive shell over websocket) on the given server. */
/** Connect to a Terminal. */
export interface ConnectTerminalQuery {
/** Server Id or name */
server: string;
/** The target to create terminal for. */
target: TerminalTarget;
/**
* Each periphery can keep multiple terminals open.
* If a terminals with the specified name does not exist,
* the call will fail.
* Create a terminal using [CreateTerminal][super::write::server::CreateTerminal]
* Terminal name to connect to.
* If it may not exist yet, also pass 'init' params
* to include initialization.
* Default: Depends on target.
*/
terminal: string;
terminal?: string;
/**
* Pass to init the terminal session
* for when the terminal doesn't already exist.
*
* Example: ?...(query)&init[command]=bash&init[recreate]=DifferentCommand
*/
init?: InitTerminal;
}
/** Blkio stats entry. This type is Linux-specific and omitted for Windows containers. */
@@ -5330,23 +5271,32 @@ export interface CreateTag {
}
/**
* Create a terminal on the server.
* Create a Terminal.
* Requires minimum Read + Terminal permission on the target Resource.
* Response: [NoData]
*/
export interface CreateTerminal {
/** Server Id or name */
server: string;
/** The name of the terminal on the server to create. */
/** A name for the Terminal session. */
name: string;
/** The target to create terminal for */
target: TerminalTarget;
/**
* The shell command (eg `bash`) to init the shell.
*
* This can also include args:
* `docker exec -it container sh`
*
* Default: Configured on each Periphery
* Default:
* - Server: Configured on each Periphery
* - ContainerExec: `sh`
* - Attach: unused
*/
command?: string;
/**
* For container terminals, choose 'exec' or 'attach'.
*
* Default
* - Server: ignored
* - Container / Stack / Deployment: `exec`
*/
mode?: ContainerTerminalMode;
/** Default: `Never` */
recreate?: TerminalRecreateMode;
}
@@ -5394,7 +5344,7 @@ export interface DeleteAlerter {
}
/**
* Delete all terminals on the server.
* Delete all Terminals on the Server.
* Response: [NoData]
*/
export interface DeleteAllTerminals {
@@ -5553,13 +5503,13 @@ export interface DeleteTag {
}
/**
* Delete a terminal on the server.
* Delete a terminal.
* Response: [NoData]
*/
export interface DeleteTerminal {
/** Server Id or name */
server: string;
/** The name of the terminal on the server to delete. */
/** Server / Container / Stack / Deployment */
target: TerminalTarget;
/** The name of the Terminal to delete. */
terminal: string;
}
@@ -5721,76 +5671,17 @@ export interface ExchangeForJwt {
token: string;
}
/** Execute a command in the given containers shell. */
export interface ExecuteContainerExecBody {
/** Server Id or name */
server: string;
/** The container name */
container: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/** The command to execute. */
command: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/** Execute a command in the given containers shell. */
export interface ExecuteDeploymentExecBody {
/** Deployment Id or name */
deployment: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/** The command to execute. */
command: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/** Execute a command in the given containers shell. */
export interface ExecuteStackExecBody {
/** Stack Id or name */
stack: string;
/** The service name to connect to */
service: string;
/** The shell to use (eg. `sh` or `bash`) */
shell: string;
/** The command to execute. */
command: string;
/**
* Specify the recreate behavior.
* Default is 'DifferentCommand'
*/
recreate?: TerminalRecreateMode;
}
/** Init a terminal on the server. */
export interface InitTerminal {
/**
* The shell command (eg `bash`) to init the shell.
*
* This can also include args:
* `docker exec -it container sh`
*
* Default: Configured on each Periphery
*/
command?: string;
/** Default: `Never` */
recreate?: TerminalRecreateMode;
}
/** Execute a terminal command on the given server. */
export interface ExecuteTerminalBody {
/** Server Id or name */
server: string;
/** The name of the terminal on the server to use to execute. */
terminal: string;
/** The target to create terminal for. */
target: TerminalTarget;
/**
* Terminal name to connect to.
* If it may not exist yet, also pass 'init' params
* to include initialization.
* Default: Depends on target.
*/
terminal?: string;
/** The command to execute. */
command: string;
/**
@@ -6747,20 +6638,8 @@ export interface ListAlertsResponse {
export interface ListAllDockerContainers {
/** Filter by server id or name. */
servers?: string[];
}
/**
* List the current terminals on specified server.
* Response: [ListAllTerminalsResponse].
*/
export interface ListAllTerminals {
/** optional structured query to filter servers. */
query?: ServerQuery;
/**
* Force a fresh call to Periphery for the list.
* Otherwise the response will be cached for 30s
*/
fresh?: boolean;
/** Filter by container name. */
containers?: string[];
}
/**
@@ -7119,17 +6998,14 @@ export interface ListTags {
}
/**
* List the current terminals on specified server.
* List Terminals.
* Response: [ListTerminalsResponse].
*/
export interface ListTerminals {
/** Id or name */
server: string;
/**
* Force a fresh call to Periphery for the list.
* Otherwise the response will be cached for 30s
*/
fresh?: boolean;
/** Filter the Terminals returned by the Target. */
target?: TerminalTarget;
/** Return results with resource names instead of ids. */
use_names?: boolean;
}
/**
@@ -8136,6 +8012,12 @@ export interface StopStack {
services?: string[];
}
/** JSON structure to send new terminal window dimensions */
export interface TerminalResizeMessage {
rows: number;
cols: number;
}
export interface TerminationSignalLabel {
signal: TerminationSignal;
label: string;
@@ -8634,11 +8516,6 @@ export type AuthRequest =
| { type: "ExchangeForJwt", params: ExchangeForJwt }
| { type: "GetUser", params: GetUser };
export enum ContainerTerminalMode {
Exec = "exec",
Attach = "attach",
}
/** Days of the week */
export enum DayOfWeek {
Monday = "Monday",
@@ -8841,7 +8718,6 @@ export type ReadRequest =
| { type: "ListServers", params: ListServers }
| { type: "ListFullServers", params: ListFullServers }
| { type: "ListTerminals", params: ListTerminals }
| { type: "ListAllTerminals", params: ListAllTerminals }
| { type: "GetDockerContainersSummary", params: GetDockerContainersSummary }
| { type: "ListAllDockerContainers", params: ListAllDockerContainers }
| { type: "ListDockerContainers", params: ListDockerContainers }
@@ -9012,10 +8888,6 @@ export type WriteRequest =
| { type: "UpdateServer", params: UpdateServer }
| { type: "RenameServer", params: RenameServer }
| { type: "CreateNetwork", params: CreateNetwork }
| { type: "CreateTerminal", params: CreateTerminal }
| { type: "DeleteTerminal", params: DeleteTerminal }
| { type: "DeleteAllTerminals", params: DeleteAllTerminals }
| { type: "BatchDeleteAllTerminals", params: BatchDeleteAllTerminals }
| { type: "UpdateServerPublicKey", params: UpdateServerPublicKey }
| { type: "RotateServerKeys", params: RotateServerKeys }
| { type: "CreateStack", params: CreateStack }
@@ -9072,6 +8944,10 @@ export type WriteRequest =
| { type: "WriteSyncFileContents", params: WriteSyncFileContents }
| { type: "CommitSync", params: CommitSync }
| { type: "RefreshResourceSyncPending", params: RefreshResourceSyncPending }
| { type: "CreateTerminal", params: CreateTerminal }
| { type: "DeleteTerminal", params: DeleteTerminal }
| { type: "DeleteAllTerminals", params: DeleteAllTerminals }
| { type: "BatchDeleteAllTerminals", params: BatchDeleteAllTerminals }
| { type: "CreateTag", params: CreateTag }
| { type: "DeleteTag", params: DeleteTag }
| { type: "RenameTag", params: RenameTag }

View File

@@ -1,6 +1,6 @@
use komodo_client::{
api::write::TerminalRecreateMode,
entities::{NoData, server::TerminalInfo},
use komodo_client::entities::{
NoData,
terminal::{Terminal, TerminalRecreateMode, TerminalTarget},
};
use resolver_api::Resolve;
use serde::{Deserialize, Serialize};
@@ -11,19 +11,18 @@ pub const START_OF_OUTPUT: &str = "__KOMODO_START_OF_OUTPUT__";
pub const END_OF_OUTPUT: &str = "__KOMODO_END_OF_OUTPUT__";
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(Vec<TerminalInfo>)]
#[response(Vec<Terminal>)]
#[error(anyhow::Error)]
pub struct ListTerminals {
/// If none, only includes non-container terminals.
/// if Some, only includes that containers terminals.
pub container: Option<String>,
/// Optionally restrict list to specific target.
pub target: Option<TerminalTarget>,
}
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(NoData)]
#[error(anyhow::Error)]
pub struct CreateTerminal {
/// The name of the terminal to create
pub struct CreateServerTerminal {
/// A name for the terminal session
pub name: String,
/// The shell command (eg `bash`) to init the shell.
///
@@ -32,6 +31,46 @@ pub struct CreateTerminal {
///
/// Default: Set in Periphery config.
pub command: Option<String>,
/// Specify the recreate behavior.
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(NoData)]
#[error(anyhow::Error)]
pub struct CreateContainerExecTerminal {
/// A name for the terminal session
pub name: String,
/// The target for the terminal sessions (Container, Stack, Deployment).
pub target: TerminalTarget,
/// The name of the container to connect to
pub container: String,
/// The command to init shell inside container.
/// Default: `sh`
pub command: Option<String>,
/// Specify the recreate behavior.
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(NoData)]
#[error(anyhow::Error)]
pub struct CreateContainerAttachTerminal {
/// A name for the terminal session
pub name: String,
/// The target for the terminal sessions (Container, Stack, Deployment).
pub target: TerminalTarget,
/// The name of the container to attach to
pub container: String,
/// Specify the recreate behavior.
/// Default: `Never`
#[serde(default)]
pub recreate: TerminalRecreateMode,
@@ -45,38 +84,8 @@ pub struct CreateTerminal {
pub struct ConnectTerminal {
/// The name of the terminal to connect to
pub terminal: String,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(Uuid)]
#[error(anyhow::Error)]
pub struct ConnectContainerExec {
/// The name of the container to connect to.
pub container: String,
/// The shell to start inside container.
/// Default: `sh`
#[serde(default = "default_container_shell")]
pub shell: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
#[serde(default = "default_container_recreate_mode")]
pub recreate: TerminalRecreateMode,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(Uuid)]
#[error(anyhow::Error)]
pub struct ConnectContainerAttach {
/// The name of the container to attach to.
pub container: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
#[serde(default = "default_container_recreate_mode")]
pub recreate: TerminalRecreateMode,
/// The target for the terminal session
pub target: TerminalTarget,
}
//
@@ -96,8 +105,10 @@ pub struct DisconnectTerminal {
#[response(NoData)]
#[error(anyhow::Error)]
pub struct DeleteTerminal {
/// The name of the terminal to delete
/// The name of the terminal to delete.
pub terminal: String,
/// The terminal target.
pub target: TerminalTarget,
}
//
@@ -116,34 +127,8 @@ pub struct DeleteAllTerminals {}
pub struct ExecuteTerminal {
/// Specify the terminal to execute the command on.
pub terminal: String,
/// The terminal target.
pub target: TerminalTarget,
/// The command to execute.
pub command: String,
}
//
#[derive(Serialize, Deserialize, Debug, Clone, Resolve)]
#[response(Uuid)]
#[error(anyhow::Error)]
pub struct ExecuteContainerExec {
/// The name of the container to execute command in.
pub container: String,
/// The shell to start inside container.
/// Default: `sh`
#[serde(default = "default_container_shell")]
pub shell: String,
/// The command to execute.
pub command: String,
/// Specify the recreate behavior.
/// Default is 'DifferentCommand'
#[serde(default = "default_container_recreate_mode")]
pub recreate: TerminalRecreateMode,
}
fn default_container_shell() -> String {
String::from("sh")
}
fn default_container_recreate_mode() -> TerminalRecreateMode {
TerminalRecreateMode::DifferentCommand
}

View File

@@ -85,7 +85,7 @@ impl TransportMessageVariant {
3 => Terminal,
other => {
return Err(anyhow!(
"Got unrecognized MessageVariant byte: {other}"
"Got unrecognized TransportMessageVariant byte: {other}"
));
}
};

View File

@@ -7,8 +7,8 @@
## Additionally, they are passed to both Komodo Core and Komodo Periphery with `env_file: ./compose.env`,
## so you can pass any additional environment variables to Core / Periphery directly in this file as well.
## Stick to a specific version, or use `latest`
COMPOSE_KOMODO_IMAGE_TAG=latest
## Follows "major.minor.patch" semver.
COMPOSE_KOMODO_IMAGE_TAG="2"
## Store dated database backups on the host - https://komo.do/docs/setup/backup
COMPOSE_KOMODO_BACKUPS_PATH=/etc/komodo/backups

View File

@@ -41,7 +41,8 @@ services:
FERRETDB_POSTGRESQL_URL: postgres://${KOMODO_DATABASE_USERNAME}:${KOMODO_DATABASE_PASSWORD}@postgres:5432/postgres
core:
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-2}
init: true
restart: unless-stopped
depends_on:
- ferretdb
@@ -66,7 +67,8 @@ services:
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-2}
init: true
restart: unless-stopped
depends_on:
- core

View File

@@ -24,7 +24,8 @@ services:
MONGO_INITDB_ROOT_PASSWORD: ${KOMODO_DATABASE_PASSWORD}
core:
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
image: ghcr.io/moghtech/komodo-core:${COMPOSE_KOMODO_IMAGE_TAG:-2}
init: true
restart: unless-stopped
depends_on:
- mongo
@@ -49,7 +50,8 @@ services:
## or deploy the Periphery binary with systemd using
## https://github.com/moghtech/komodo/tree/main/scripts
periphery:
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-latest}
image: ghcr.io/moghtech/komodo-periphery:${COMPOSE_KOMODO_IMAGE_TAG:-2}
init: true
restart: unless-stopped
depends_on:
- core

Some files were not shown because too many files have changed in this diff Show More