mirror of
https://github.com/moghtech/komodo.git
synced 2026-03-10 15:56:18 -05:00
Compare commits
19 Commits
v1.19.0-de
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
34a9f8eb9e | ||
|
|
494d01aeed | ||
|
|
084e2fec23 | ||
|
|
98d72fc908 | ||
|
|
20ac04fae5 | ||
|
|
a65fd4dca7 | ||
|
|
0873104b5a | ||
|
|
9a7b6ebd51 | ||
|
|
a4153fa28b | ||
|
|
e732da3b05 | ||
|
|
75ffbd559b | ||
|
|
cae80b43e5 | ||
|
|
d924a8ace4 | ||
|
|
dcfad5dc4e | ||
|
|
134d1697e9 | ||
|
|
3094d0036a | ||
|
|
ee5fd55cdb | ||
|
|
0ca126ff23 | ||
|
|
2fa9d9ecce |
1041
Cargo.lock
generated
1041
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
50
Cargo.toml
50
Cargo.toml
@@ -8,13 +8,16 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.19.0-dev-9"
|
||||
version = "1.19.5"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/moghtech/komodo"
|
||||
homepage = "https://komo.do"
|
||||
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
@@ -33,7 +36,7 @@ git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.5.0", default-features = false }
|
||||
serror = { version = "0.5.1", default-features = false }
|
||||
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
@@ -42,12 +45,12 @@ partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.2"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.1.2"
|
||||
mungos = "3.2.1"
|
||||
toml_pretty = "1.2.0"
|
||||
mungos = "3.2.2"
|
||||
svi = "1.2.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.22", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
reqwest = { version = "0.12.23", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
@@ -59,45 +62,46 @@ arc-swap = "1.7.1"
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.4", features = ["fs", "cors"] }
|
||||
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.10.0", features = ["serde"] }
|
||||
indexmap = { version = "2.11.1", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
bson = { version = "2.15.0" } # must keep in sync with mongodb version
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.142"
|
||||
serde_json = "1.0.145"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.9.5"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.98"
|
||||
thiserror = "2.0.12"
|
||||
anyhow = "1.0.99"
|
||||
thiserror = "2.0.16"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.30.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.30.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.30.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.43", features = ["derive"] }
|
||||
clap = { version = "4.5.47", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.17.0", features = ["v4", "fast-rng", "serde"] }
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false }
|
||||
openidconnect = "4.0.1"
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.17.0"
|
||||
bcrypt = "0.17.1"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.31"
|
||||
hmac = "0.12.1"
|
||||
@@ -108,28 +112,28 @@ hex = "0.4.3"
|
||||
# SYSTEM
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.2"
|
||||
sysinfo = "0.36.1"
|
||||
sysinfo = "0.37.0"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.8.4"
|
||||
aws-sdk-ec2 = "1.156.0"
|
||||
aws-credential-types = "1.2.5"
|
||||
aws-config = "1.8.6"
|
||||
aws-sdk-ec2 = "1.167.0"
|
||||
aws-credential-types = "1.2.6"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.41"
|
||||
chrono = "0.4.42"
|
||||
croner = "3.0.0"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.27", features = ["tokio", "gzip"] }
|
||||
async-compression = { version = "0.4.30", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
comfy-table = "7.1.4"
|
||||
comfy-table = "7.2.1"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.10.0"
|
||||
dashmap = "6.1.0"
|
||||
wildcard = "0.3.0"
|
||||
colored = "3.0.0"
|
||||
regex = "1.11.1"
|
||||
regex = "1.11.2"
|
||||
bytes = "1.10.1"
|
||||
bson = "2.15.0"
|
||||
shell-escape = "0.1.5"
|
||||
@@ -1,7 +1,8 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.88.0-bullseye AS builder
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -16,7 +17,8 @@ COPY ./bin/cli ./bin/cli
|
||||
RUN \
|
||||
cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_periphery --release && \
|
||||
cargo build -p komodo_cli --release
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
36
bin/chef.binaries.Dockerfile
Normal file
36
bin/chef.binaries.Dockerfile
Normal file
@@ -0,0 +1,36 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
## Uses chef for dependency caching to help speed up back-to-back builds.
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.89.0-bullseye AS chef
|
||||
WORKDIR /builder
|
||||
|
||||
# Plan just the RECIPE to see if things have changed
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
RUN cargo install cargo-strip
|
||||
COPY --from=planner /builder/recipe.json recipe.json
|
||||
# Build JUST dependencies - cached layer
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
# NOW copy again (this time into builder) and build app
|
||||
COPY . .
|
||||
RUN \
|
||||
cargo build --release --bin core && \
|
||||
cargo build --release --bin periphery && \
|
||||
cargo build --release --bin km && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM rust:1.88.0-bullseye AS builder
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -8,7 +9,7 @@ COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN cargo build -p komodo_cli --release
|
||||
RUN cargo build -p komodo_cli --release && cargo strip
|
||||
|
||||
# Copy binaries to distroless base
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
@@ -7,13 +7,13 @@ Can be used to move between MongoDB / FerretDB, or upgrade from FerretDB v1 to v
|
||||
services:
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-util
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
MODE: CopyDatabase
|
||||
SOURCE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
|
||||
SOURCE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
|
||||
TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
```
|
||||
|
||||
@@ -90,13 +90,13 @@ services:
|
||||
...(new database)
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-util
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
MODE: CopyDatabase
|
||||
SOURCE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
|
||||
SOURCE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
|
||||
TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
...(unchanged)
|
||||
```
|
||||
|
||||
@@ -14,15 +14,17 @@ use komodo_client::{
|
||||
config::cli::args::container::{
|
||||
Container, ContainerCommand, InspectContainer,
|
||||
},
|
||||
docker::container::{
|
||||
ContainerListItem, ContainerStateStatusEnum,
|
||||
docker::{
|
||||
self,
|
||||
container::{ContainerListItem, ContainerStateStatusEnum},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
command::{
|
||||
PrintTable, matches_wildcards, parse_wildcards, print_items,
|
||||
PrintTable, clamp_sha, matches_wildcards, parse_wildcards,
|
||||
print_items,
|
||||
},
|
||||
config::cli_config,
|
||||
};
|
||||
@@ -40,6 +42,7 @@ async fn list_containers(
|
||||
Container {
|
||||
all,
|
||||
down,
|
||||
links,
|
||||
reverse,
|
||||
containers: names,
|
||||
images,
|
||||
@@ -126,7 +129,7 @@ async fn list_containers(
|
||||
if *reverse {
|
||||
containers.reverse();
|
||||
}
|
||||
print_items(containers, *format)?;
|
||||
print_items(containers, *format, *links)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -196,38 +199,60 @@ pub async fn inspect_container(
|
||||
);
|
||||
}
|
||||
1 => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&containers[0])
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
println!("{}", serialize_container(inspect, &containers[0])?);
|
||||
}
|
||||
_ => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&containers)
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
let containers = containers
|
||||
.iter()
|
||||
.map(|c| serialize_container(inspect, c))
|
||||
.collect::<anyhow::Result<Vec<_>>>()?
|
||||
.join("\n");
|
||||
println!("{containers}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_container(
|
||||
inspect: &InspectContainer,
|
||||
container: &docker::container::Container,
|
||||
) -> anyhow::Result<String> {
|
||||
let res = if inspect.state {
|
||||
serde_json::to_string_pretty(&container.state)
|
||||
} else if inspect.mounts {
|
||||
serde_json::to_string_pretty(&container.mounts)
|
||||
} else if inspect.host_config {
|
||||
serde_json::to_string_pretty(&container.host_config)
|
||||
} else if inspect.config {
|
||||
serde_json::to_string_pretty(&container.config)
|
||||
} else if inspect.network_settings {
|
||||
serde_json::to_string_pretty(&container.network_settings)
|
||||
} else {
|
||||
serde_json::to_string_pretty(container)
|
||||
}
|
||||
.context("Failed to serialize items to JSON")?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&[
|
||||
"Container",
|
||||
"State",
|
||||
"Server",
|
||||
"Ports",
|
||||
"Networks",
|
||||
"Image",
|
||||
"Link",
|
||||
]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&[
|
||||
"Container",
|
||||
"State",
|
||||
"Server",
|
||||
"Ports",
|
||||
"Networks",
|
||||
"Image",
|
||||
"Link",
|
||||
]
|
||||
} else {
|
||||
&["Container", "State", "Server", "Ports", "Networks", "Image"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<Cell> {
|
||||
fn row(self, links: bool) -> Vec<Cell> {
|
||||
let color = match self.1.state {
|
||||
ContainerStateStatusEnum::Running => Color::Green,
|
||||
ContainerStateStatusEnum::Paused => Color::DarkYellow,
|
||||
@@ -257,6 +282,21 @@ impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
} else {
|
||||
Cell::new(format!(":{}", ports.join(", :")))
|
||||
};
|
||||
|
||||
let image = self.1.image.as_deref().unwrap_or("Unknown");
|
||||
let mut res = vec![
|
||||
Cell::new(self.1.name.clone()).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.1.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(clamp_sha(image)),
|
||||
];
|
||||
if !links {
|
||||
return res;
|
||||
}
|
||||
let link = if let Some(server_id) = self.1.server_id {
|
||||
format!(
|
||||
"{}/servers/{server_id}/container/{}",
|
||||
@@ -266,16 +306,7 @@ impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.1.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.1.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(self.1.image.as_deref().unwrap_or("Unknown")),
|
||||
Cell::new(link),
|
||||
]
|
||||
res.push(Cell::new(link));
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,12 +212,24 @@ pub async fn handle(
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunStackService(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::SendAlert(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::ClearRepoCache(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BackupCoreDatabase(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::GlobalAutoUpdate(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -458,14 +470,30 @@ pub async fn handle(
|
||||
Execution::BatchDestroyStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunStackService(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::TestAlerter(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::SendAlert(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::ClearRepoCache(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BackupCoreDatabase(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::GlobalAutoUpdate(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
@@ -521,20 +549,20 @@ async fn poll_update_until_complete(
|
||||
} else {
|
||||
format!("{}/updates/{}", cli_config().host, update.id)
|
||||
};
|
||||
info!("Link: '{}'", link.bold());
|
||||
println!("Link: '{}'", link.bold());
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let update = client.poll_update_until_complete(&update.id).await?;
|
||||
if update.success {
|
||||
info!(
|
||||
println!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION SUCCESSFUL".green(),
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
eprintln!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION FAILED".red(),
|
||||
|
||||
@@ -51,38 +51,38 @@ pub async fn handle(list: &args::list::List) -> anyhow::Result<()> {
|
||||
match &list.command {
|
||||
None => list_all(list).await,
|
||||
Some(ListCommand::Servers(filters)) => {
|
||||
list_resources::<ServerListItem>(filters).await
|
||||
list_resources::<ServerListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Stacks(filters)) => {
|
||||
list_resources::<StackListItem>(filters).await
|
||||
list_resources::<StackListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Deployments(filters)) => {
|
||||
list_resources::<DeploymentListItem>(filters).await
|
||||
list_resources::<DeploymentListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Builds(filters)) => {
|
||||
list_resources::<BuildListItem>(filters).await
|
||||
list_resources::<BuildListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Repos(filters)) => {
|
||||
list_resources::<RepoListItem>(filters).await
|
||||
list_resources::<RepoListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Procedures(filters)) => {
|
||||
list_resources::<ProcedureListItem>(filters).await
|
||||
list_resources::<ProcedureListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Actions(filters)) => {
|
||||
list_resources::<ActionListItem>(filters).await
|
||||
list_resources::<ActionListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Syncs(filters)) => {
|
||||
list_resources::<ResourceSyncListItem>(filters).await
|
||||
list_resources::<ResourceSyncListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Builders(filters)) => {
|
||||
list_resources::<BuilderListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Alerters(filters)) => {
|
||||
list_resources::<AlerterListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Schedules(filters)) => {
|
||||
list_schedules(filters).await
|
||||
}
|
||||
Some(ListCommand::Builders(filters)) => {
|
||||
list_resources::<BuilderListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Alerters(filters)) => {
|
||||
list_resources::<AlerterListItem>(filters).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,61 +105,61 @@ async fn list_all(list: &args::list::List) -> anyhow::Result<()> {
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
ServerListItem::list(client, &filters),
|
||||
StackListItem::list(client, &filters),
|
||||
DeploymentListItem::list(client, &filters),
|
||||
BuildListItem::list(client, &filters),
|
||||
RepoListItem::list(client, &filters),
|
||||
ProcedureListItem::list(client, &filters),
|
||||
ActionListItem::list(client, &filters),
|
||||
ResourceSyncListItem::list(client, &filters),
|
||||
ServerListItem::list(client, &filters, true),
|
||||
StackListItem::list(client, &filters, true),
|
||||
DeploymentListItem::list(client, &filters, true),
|
||||
BuildListItem::list(client, &filters, true),
|
||||
RepoListItem::list(client, &filters, true),
|
||||
ProcedureListItem::list(client, &filters, true),
|
||||
ActionListItem::list(client, &filters, true),
|
||||
ResourceSyncListItem::list(client, &filters, true),
|
||||
)?;
|
||||
|
||||
if !servers.is_empty() {
|
||||
fix_tags(&mut servers, &tags);
|
||||
print_items(servers, filters.format)?;
|
||||
print_items(servers, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !stacks.is_empty() {
|
||||
fix_tags(&mut stacks, &tags);
|
||||
print_items(stacks, filters.format)?;
|
||||
print_items(stacks, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !deployments.is_empty() {
|
||||
fix_tags(&mut deployments, &tags);
|
||||
print_items(deployments, filters.format)?;
|
||||
print_items(deployments, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !builds.is_empty() {
|
||||
fix_tags(&mut builds, &tags);
|
||||
print_items(builds, filters.format)?;
|
||||
print_items(builds, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !repos.is_empty() {
|
||||
fix_tags(&mut repos, &tags);
|
||||
print_items(repos, filters.format)?;
|
||||
print_items(repos, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !procedures.is_empty() {
|
||||
fix_tags(&mut procedures, &tags);
|
||||
print_items(procedures, filters.format)?;
|
||||
print_items(procedures, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !actions.is_empty() {
|
||||
fix_tags(&mut actions, &tags);
|
||||
print_items(actions, filters.format)?;
|
||||
print_items(actions, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !syncs.is_empty() {
|
||||
fix_tags(&mut syncs, &tags);
|
||||
print_items(syncs, filters.format)?;
|
||||
print_items(syncs, filters.format, list.links)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
@@ -168,6 +168,7 @@ async fn list_all(list: &args::list::List) -> anyhow::Result<()> {
|
||||
|
||||
async fn list_resources<T>(
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
T: ListResources,
|
||||
@@ -175,7 +176,7 @@ where
|
||||
{
|
||||
let client = crate::command::komodo_client().await?;
|
||||
let (mut resources, tags) = tokio::try_join!(
|
||||
T::list(client, filters),
|
||||
T::list(client, filters, minimal),
|
||||
client.read(ListTags::default()).map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
@@ -183,7 +184,7 @@ where
|
||||
)?;
|
||||
fix_tags(&mut resources, &tags);
|
||||
if !resources.is_empty() {
|
||||
print_items(resources, filters.format)?;
|
||||
print_items(resources, filters.format, filters.links)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -226,13 +227,13 @@ async fn list_schedules(
|
||||
a.name.cmp(&b.name).then(a.enabled.cmp(&b.enabled))
|
||||
});
|
||||
if !schedules.is_empty() {
|
||||
print_items(schedules, filters.format)?;
|
||||
print_items(schedules, filters.format, filters.links)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fix_tags<T>(
|
||||
resources: &mut Vec<ResourceListItem<T>>,
|
||||
resources: &mut [ResourceListItem<T>],
|
||||
tags: &HashMap<String, String>,
|
||||
) {
|
||||
resources.iter_mut().for_each(|resource| {
|
||||
@@ -254,6 +255,8 @@ where
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
// For use with root `km ls`
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<ResourceListItem<Self::Info>>>;
|
||||
}
|
||||
|
||||
@@ -264,12 +267,14 @@ impl ListResources for ServerListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
_minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let servers = client
|
||||
.read(ListServers {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?;
|
||||
@@ -282,6 +287,8 @@ impl ListResources for ServerListItem {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(server.info.state, ServerState::Ok)
|
||||
} else if filters.in_progress {
|
||||
false
|
||||
} else {
|
||||
matches!(server.info.state, ServerState::Ok)
|
||||
};
|
||||
@@ -303,6 +310,7 @@ impl ListResources for StackListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
_minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (servers, mut stacks) = tokio::try_join!(
|
||||
client
|
||||
@@ -317,6 +325,7 @@ impl ListResources for StackListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
@@ -337,9 +346,17 @@ impl ListResources for StackListItem {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(stack.info.state, StackState::Running)
|
||||
!matches!(
|
||||
stack.info.state,
|
||||
StackState::Running | StackState::Deploying
|
||||
)
|
||||
} else if filters.in_progress {
|
||||
matches!(stack.info.state, StackState::Deploying)
|
||||
} else {
|
||||
matches!(stack.info.state, StackState::Running)
|
||||
matches!(
|
||||
stack.info.state,
|
||||
StackState::Running | StackState::Deploying
|
||||
)
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[stack.name.as_str()])
|
||||
@@ -365,6 +382,7 @@ impl ListResources for DeploymentListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
_minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (servers, mut deployments) = tokio::try_join!(
|
||||
client
|
||||
@@ -379,6 +397,7 @@ impl ListResources for DeploymentListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
@@ -400,9 +419,17 @@ impl ListResources for DeploymentListItem {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(deployment.info.state, DeploymentState::Running)
|
||||
!matches!(
|
||||
deployment.info.state,
|
||||
DeploymentState::Running | DeploymentState::Deploying
|
||||
)
|
||||
} else if filters.in_progress {
|
||||
matches!(deployment.info.state, DeploymentState::Deploying)
|
||||
} else {
|
||||
matches!(deployment.info.state, DeploymentState::Running)
|
||||
matches!(
|
||||
deployment.info.state,
|
||||
DeploymentState::Running | DeploymentState::Deploying
|
||||
)
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[deployment.name.as_str()])
|
||||
@@ -428,6 +455,7 @@ impl ListResources for BuildListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (builders, mut builds) = tokio::try_join!(
|
||||
client
|
||||
@@ -442,6 +470,7 @@ impl ListResources for BuildListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
@@ -459,7 +488,20 @@ impl ListResources for BuildListItem {
|
||||
let mut builds = builds
|
||||
.into_iter()
|
||||
.filter(|build| {
|
||||
matches_wildcards(&names, &[build.name.as_str()])
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
matches!(
|
||||
build.info.state,
|
||||
BuildState::Failed | BuildState::Unknown
|
||||
)
|
||||
} else if minimal || filters.in_progress {
|
||||
matches!(build.info.state, BuildState::Building)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[build.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&builders,
|
||||
&[build.info.builder_id.as_str()],
|
||||
@@ -481,6 +523,7 @@ impl ListResources for RepoListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut repos = client
|
||||
@@ -488,11 +531,30 @@ impl ListResources for RepoListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|repo| matches_wildcards(&names, &[repo.name.as_str()]))
|
||||
.filter(|repo| {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
matches!(
|
||||
repo.info.state,
|
||||
RepoState::Failed | RepoState::Unknown
|
||||
)
|
||||
} else if minimal || filters.in_progress {
|
||||
matches!(
|
||||
repo.info.state,
|
||||
RepoState::Building | RepoState::Cloning
|
||||
)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[repo.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
repos.sort_by(|a, b| {
|
||||
a.name
|
||||
@@ -509,6 +571,7 @@ impl ListResources for ProcedureListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut procedures = client
|
||||
@@ -516,12 +579,26 @@ impl ListResources for ProcedureListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|procedure| {
|
||||
matches_wildcards(&names, &[procedure.name.as_str()])
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
matches!(
|
||||
procedure.info.state,
|
||||
ProcedureState::Failed | ProcedureState::Unknown
|
||||
)
|
||||
} else if minimal || filters.in_progress {
|
||||
matches!(procedure.info.state, ProcedureState::Running)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[procedure.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
procedures.sort_by(|a, b| {
|
||||
@@ -542,6 +619,7 @@ impl ListResources for ActionListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut actions = client
|
||||
@@ -549,12 +627,26 @@ impl ListResources for ActionListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|action| {
|
||||
matches_wildcards(&names, &[action.name.as_str()])
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
matches!(
|
||||
action.info.state,
|
||||
ActionState::Failed | ActionState::Unknown
|
||||
)
|
||||
} else if minimal || filters.in_progress {
|
||||
matches!(action.info.state, ActionState::Running)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[action.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
actions.sort_by(|a, b| {
|
||||
@@ -575,6 +667,7 @@ impl ListResources for ResourceSyncListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut syncs = client
|
||||
@@ -582,11 +675,30 @@ impl ListResources for ResourceSyncListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|sync| matches_wildcards(&names, &[sync.name.as_str()]))
|
||||
.filter(|sync| {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
matches!(
|
||||
sync.info.state,
|
||||
ResourceSyncState::Failed | ResourceSyncState::Unknown
|
||||
)
|
||||
} else if minimal || filters.in_progress {
|
||||
matches!(
|
||||
sync.info.state,
|
||||
ResourceSyncState::Syncing | ResourceSyncState::Pending
|
||||
)
|
||||
} else {
|
||||
true
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[sync.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
syncs.sort_by(|a, b| {
|
||||
a.name.cmp(&b.name).then(a.info.state.cmp(&b.info.state))
|
||||
@@ -600,6 +712,7 @@ impl ListResources for BuilderListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut builders = client
|
||||
@@ -607,12 +720,14 @@ impl ListResources for BuilderListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|builder| {
|
||||
matches_wildcards(&names, &[builder.name.as_str()])
|
||||
(!minimal || filters.all)
|
||||
&& matches_wildcards(&names, &[builder.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
builders.sort_by(|a, b| {
|
||||
@@ -629,6 +744,7 @@ impl ListResources for AlerterListItem {
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
minimal: bool,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut syncs = client
|
||||
@@ -636,11 +752,15 @@ impl ListResources for AlerterListItem {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.templates(filters.templates)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|sync| matches_wildcards(&names, &[sync.name.as_str()]))
|
||||
.filter(|sync| {
|
||||
(!minimal || filters.all)
|
||||
&& matches_wildcards(&names, &[sync.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
syncs.sort_by(|a, b| {
|
||||
a.info
|
||||
@@ -656,39 +776,51 @@ impl ListResources for AlerterListItem {
|
||||
// TABLE
|
||||
|
||||
impl PrintTable for ResourceListItem<ServerListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Server", "State", "Address", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Server", "State", "Address", "Tags", "Link"]
|
||||
} else {
|
||||
&["Server", "State", "Address", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<Cell> {
|
||||
fn row(self, links: bool) -> Vec<Cell> {
|
||||
let color = match self.info.state {
|
||||
ServerState::Ok => Color::Green,
|
||||
ServerState::NotOk => Color::Red,
|
||||
ServerState::Disabled => Color::Blue,
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.address),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Server,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<StackListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Stack", "State", "Server", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Stack", "State", "Server", "Tags", "Link"]
|
||||
} else {
|
||||
&["Stack", "State", "Server", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
StackState::Down => Color::Blue,
|
||||
StackState::Running => Color::Green,
|
||||
StackState::Deploying => Color::DarkYellow,
|
||||
StackState::Paused => Color::DarkYellow,
|
||||
StackState::Unknown => Color::Magenta,
|
||||
_ => Color::Red,
|
||||
@@ -700,7 +832,7 @@ impl PrintTable for ResourceListItem<StackListItemInfo> {
|
||||
// } else {
|
||||
// "UI Defined"
|
||||
// };
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
@@ -708,75 +840,97 @@ impl PrintTable for ResourceListItem<StackListItemInfo> {
|
||||
Cell::new(self.info.server_id),
|
||||
// Cell::new(source),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Stack,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<DeploymentListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Deployment", "State", "Server", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Deployment", "State", "Server", "Tags", "Link"]
|
||||
} else {
|
||||
&["Deployment", "State", "Server", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
DeploymentState::NotDeployed => Color::Blue,
|
||||
DeploymentState::Running => Color::Green,
|
||||
DeploymentState::Deploying => Color::DarkYellow,
|
||||
DeploymentState::Paused => Color::DarkYellow,
|
||||
DeploymentState::Unknown => Color::Magenta,
|
||||
_ => Color::Red,
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.server_id),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Deployment,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<BuildListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Build", "State", "Builder", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Build", "State", "Builder", "Tags", "Link"]
|
||||
} else {
|
||||
&["Build", "State", "Builder", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
BuildState::Ok => Color::Green,
|
||||
BuildState::Building => Color::DarkYellow,
|
||||
BuildState::Unknown => Color::Magenta,
|
||||
BuildState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.builder_id),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Build,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)));
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<RepoListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Repo", "State", "Link", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Repo", "State", "Link", "Tags", "Link"]
|
||||
} else {
|
||||
&["Repo", "State", "Link", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
RepoState::Ok => Color::Green,
|
||||
RepoState::Building
|
||||
@@ -785,27 +939,34 @@ impl PrintTable for ResourceListItem<RepoListItemInfo> {
|
||||
RepoState::Unknown => Color::Magenta,
|
||||
RepoState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.repo_link),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Repo,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ProcedureListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Procedure", "State", "Next Run", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Procedure", "State", "Next Run", "Tags", "Link"]
|
||||
} else {
|
||||
&["Procedure", "State", "Next Run", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ProcedureState::Ok => Color::Green,
|
||||
ProcedureState::Running => Color::DarkYellow,
|
||||
@@ -821,27 +982,34 @@ impl PrintTable for ResourceListItem<ProcedureListItemInfo> {
|
||||
} else {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Procedure,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ActionListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Action", "State", "Next Run", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Action", "State", "Next Run", "Tags", "Link"]
|
||||
} else {
|
||||
&["Action", "State", "Next Run", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ActionState::Ok => Color::Green,
|
||||
ActionState::Running => Color::DarkYellow,
|
||||
@@ -857,27 +1025,34 @@ impl PrintTable for ResourceListItem<ActionListItemInfo> {
|
||||
} else {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Action,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)));
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ResourceSyncListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Sync", "State", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Sync", "State", "Tags", "Link"]
|
||||
} else {
|
||||
&["Sync", "State", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ResourceSyncState::Ok => Color::Green,
|
||||
ResourceSyncState::Pending | ResourceSyncState::Syncing => {
|
||||
@@ -886,45 +1061,59 @@ impl PrintTable for ResourceListItem<ResourceSyncListItemInfo> {
|
||||
ResourceSyncState::Unknown => Color::Magenta,
|
||||
ResourceSyncState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::ResourceSync,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)))
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<BuilderListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Builder", "Type", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Builder", "Type", "Tags", "Link"]
|
||||
} else {
|
||||
&["Builder", "Type", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
vec![
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.builder_type),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Builder,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)));
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<AlerterListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Alerter", "Type", "Enabled", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Alerter", "Type", "Enabled", "Tags", "Link"]
|
||||
} else {
|
||||
&["Alerter", "Type", "Enabled", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
vec![
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let mut row = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.endpoint_type),
|
||||
if self.info.enabled {
|
||||
@@ -933,20 +1122,27 @@ impl PrintTable for ResourceListItem<AlerterListItemInfo> {
|
||||
Cell::new(self.info.enabled.to_string()).fg(Color::Red)
|
||||
},
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
];
|
||||
if links {
|
||||
row.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Alerter,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
)));
|
||||
}
|
||||
row
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for Schedule {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Name", "Type", "Next Run", "Tags", "Link"]
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&["Name", "Type", "Next Run", "Tags", "Link"]
|
||||
} else {
|
||||
&["Name", "Type", "Next Run", "Tags"]
|
||||
}
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
fn row(self, links: bool) -> Vec<comfy_table::Cell> {
|
||||
let next_run = if let Some(ts) = self.next_scheduled_run {
|
||||
Cell::new(
|
||||
format_timetamp(ts)
|
||||
@@ -957,12 +1153,19 @@ impl PrintTable for Schedule {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
let (resource_type, id) = self.target.extract_variant_id();
|
||||
vec![
|
||||
let mut res = vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.target.extract_variant_id().0),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(&cli_config().host, resource_type, id)),
|
||||
]
|
||||
];
|
||||
if links {
|
||||
res.push(Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
resource_type,
|
||||
id,
|
||||
)));
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,8 @@ use chrono::TimeZone;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Table};
|
||||
use komodo_client::{
|
||||
KomodoClient, entities::config::cli::args::CliFormat,
|
||||
KomodoClient,
|
||||
entities::config::cli::{CliTableBorders, args::CliFormat},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
@@ -91,19 +92,30 @@ fn sanitize_uri(uri: &str) -> String {
|
||||
fn print_items<T: PrintTable + Serialize>(
|
||||
items: Vec<T>,
|
||||
format: CliFormat,
|
||||
links: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
match format {
|
||||
CliFormat::Table => {
|
||||
let mut table = Table::new();
|
||||
table
|
||||
.load_preset(comfy_table::presets::UTF8_FULL)
|
||||
.set_header(
|
||||
T::header()
|
||||
.into_iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
let preset = {
|
||||
use comfy_table::presets::*;
|
||||
match cli_config().table_borders {
|
||||
None | Some(CliTableBorders::Horizontal) => {
|
||||
UTF8_HORIZONTAL_ONLY
|
||||
}
|
||||
Some(CliTableBorders::Vertical) => UTF8_FULL_CONDENSED,
|
||||
Some(CliTableBorders::Inside) => UTF8_NO_BORDERS,
|
||||
Some(CliTableBorders::Outside) => UTF8_BORDERS_ONLY,
|
||||
Some(CliTableBorders::All) => UTF8_FULL,
|
||||
}
|
||||
};
|
||||
table.load_preset(preset).set_header(
|
||||
T::header(links)
|
||||
.iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
for item in items {
|
||||
table.add_row(item.row());
|
||||
table.add_row(item.row(links));
|
||||
}
|
||||
println!("{table}");
|
||||
}
|
||||
@@ -119,8 +131,8 @@ fn print_items<T: PrintTable + Serialize>(
|
||||
}
|
||||
|
||||
trait PrintTable {
|
||||
fn header() -> &'static [&'static str];
|
||||
fn row(self) -> Vec<Cell>;
|
||||
fn header(links: bool) -> &'static [&'static str];
|
||||
fn row(self, links: bool) -> Vec<Cell>;
|
||||
}
|
||||
|
||||
fn parse_wildcards(items: &[String]) -> Vec<Wildcard<'_>> {
|
||||
@@ -156,6 +168,14 @@ fn format_timetamp(ts: i64) -> anyhow::Result<String> {
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
fn clamp_sha(maybe_sha: &str) -> String {
|
||||
if maybe_sha.starts_with("sha256:") {
|
||||
maybe_sha[0..20].to_string() + "..."
|
||||
} else {
|
||||
maybe_sha.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
// fn text_link(link: &str, text: &str) -> String {
|
||||
// format!("\x1b]8;;{link}\x07{text}\x1b]8;;\x07")
|
||||
// }
|
||||
|
||||
@@ -191,6 +191,9 @@ pub fn cli_config() -> &'static CliConfig {
|
||||
config_profile,
|
||||
config_aliases: config.config_aliases,
|
||||
default_profile: config.default_profile,
|
||||
table_borders: env
|
||||
.komodo_cli_table_borders
|
||||
.or(config.table_borders),
|
||||
host: host
|
||||
.or(env.komodo_cli_host)
|
||||
.or(env.komodo_host)
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.88.0-bullseye AS core-builder
|
||||
FROM rust:1.89.0-bullseye AS core-builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -13,7 +14,8 @@ COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_cli --release
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
|
||||
@@ -17,6 +17,28 @@ pub async fn send_alert(
|
||||
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Version mismatch detected ⚠️\nPeriphery: **{server_version}** | Core: **{core_version}**\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -207,6 +229,16 @@ pub async fn send_alert(
|
||||
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
|
||||
@@ -48,8 +48,9 @@ pub async fn send_alerts(alerts: &[Alert]) {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
@@ -58,7 +59,7 @@ pub async fn send_alerts(alerts: &[Alert]) {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -250,3 +251,238 @@ fn resource_link(
|
||||
id,
|
||||
)
|
||||
}
|
||||
|
||||
/// Standard message content format
|
||||
/// used by Ntfy, Pushover.
|
||||
fn standard_alert_content(alert: &Alert) -> String {
|
||||
let level = fmt_level(alert.level);
|
||||
match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,205 +8,7 @@ pub async fn send_alert(
|
||||
email: Option<&str>,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, email, content).await?;
|
||||
}
|
||||
|
||||
@@ -7,205 +7,7 @@ pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, content).await?;
|
||||
}
|
||||
|
||||
@@ -23,6 +23,35 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let text = match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Periphery version now matches Core version ✅"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}"
|
||||
)
|
||||
}
|
||||
};
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -429,6 +458,12 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
let text = format!("{level} | {message}");
|
||||
let blocks =
|
||||
vec![Block::header(text.clone()), Block::section(details)];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
|
||||
@@ -3,11 +3,12 @@ use std::{sync::OnceLock, time::Instant};
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use serror::{AddStatusCode, Json};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -25,6 +26,7 @@ use crate::{
|
||||
|
||||
use super::Variant;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AuthArgs {
|
||||
pub headers: HeaderMap,
|
||||
}
|
||||
@@ -41,7 +43,7 @@ pub struct AuthArgs {
|
||||
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
|
||||
pub enum AuthRequest {
|
||||
GetLoginOptions(GetLoginOptions),
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
SignUpLocalUser(SignUpLocalUser),
|
||||
LoginLocalUser(LoginLocalUser),
|
||||
ExchangeForJwt(ExchangeForJwt),
|
||||
GetUser(GetUser),
|
||||
@@ -138,8 +140,10 @@ impl Resolve<AuthArgs> for ExchangeForJwt {
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
) -> serror::Result<ExchangeForJwtResponse> {
|
||||
let jwt = jwt_client().redeem_exchange_token(&self.token).await?;
|
||||
Ok(ExchangeForJwtResponse { jwt })
|
||||
jwt_client()
|
||||
.redeem_exchange_token(&self.token)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,7 +153,11 @@ impl Resolve<AuthArgs> for GetUser {
|
||||
self,
|
||||
AuthArgs { headers }: &AuthArgs,
|
||||
) -> serror::Result<User> {
|
||||
let user_id = get_user_id_from_headers(headers).await?;
|
||||
Ok(get_user(&user_id).await?)
|
||||
let user_id = get_user_id_from_headers(headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
get_user(&user_id)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,8 +92,11 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure action not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
let _action_guard = action_state.update_custom(
|
||||
|state| state.running += 1,
|
||||
|state| state.running -= 1,
|
||||
false,
|
||||
)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures::{TryStreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::TestAlerter,
|
||||
api::execute::{SendAlert, TestAlerter},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
alert::send_alert_to_alerter, helpers::update::update_update,
|
||||
permission::get_check_permissions,
|
||||
permission::get_check_permissions, resource::list_full_for_user,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
@@ -71,3 +75,75 @@ impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ExecuteArgs> for SendAlert {
|
||||
#[instrument(name = "SendAlert", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerters = list_full_for_user::<Alerter>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&[],
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|a| {
|
||||
a.config.enabled
|
||||
&& (self.alerters.is_empty()
|
||||
|| self.alerters.contains(&a.name)
|
||||
|| self.alerters.contains(&a.id))
|
||||
&& (a.config.alert_types.is_empty()
|
||||
|| a.config.alert_types.contains(&AlertDataVariant::Custom))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if alerters.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
|
||||
).status_code(StatusCode::BAD_REQUEST));
|
||||
}
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let ts = komodo_timestamp();
|
||||
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: true,
|
||||
level: self.level,
|
||||
target: update.target.clone(),
|
||||
data: AlertData::Custom {
|
||||
message: self.message,
|
||||
details: self.details,
|
||||
},
|
||||
resolved_ts: Some(ts),
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Send alert",
|
||||
serde_json::to_string_pretty(&alert)
|
||||
.context("Failed to serialize alert to JSON")?,
|
||||
);
|
||||
|
||||
if let Err(e) = alerters
|
||||
.iter()
|
||||
.map(|alerter| send_alert_to_alerter(alerter, &alert))
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
update.push_error_log("Send Error", format_serror(&e.into()));
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use std::{future::IntoFuture, time::Duration};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::IntoFuture,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
@@ -20,10 +24,10 @@ use komodo_client::{
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, BuildConfig, ImageRegistryConfig},
|
||||
build::{Build, BuildConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
update::{Log, Update},
|
||||
@@ -133,8 +137,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let git_token =
|
||||
build_git_token(&mut build, repo.as_mut()).await?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
let registry_tokens =
|
||||
validate_account_extract_registry_tokens(&build).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
@@ -284,14 +288,12 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
repo,
|
||||
registry_token,
|
||||
registry_tokens,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// Push a commit hash tagged image
|
||||
additional_tags: if update.commit_hash.is_empty() {
|
||||
Default::default()
|
||||
} else {
|
||||
vec![update.commit_hash.clone()]
|
||||
},
|
||||
// To push a commit hash tagged image
|
||||
commit_hash: optional_string(&update.commit_hash),
|
||||
// Unused for now
|
||||
additional_tags: Default::default(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
@@ -608,34 +610,48 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
async fn validate_account_extract_registry_token(
|
||||
async fn validate_account_extract_registry_tokens(
|
||||
Build {
|
||||
config:
|
||||
BuildConfig {
|
||||
image_registry:
|
||||
ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
},
|
||||
..
|
||||
},
|
||||
config: BuildConfig { image_registry, .. },
|
||||
..
|
||||
}: &Build,
|
||||
) -> serror::Result<Option<String>> {
|
||||
if domain.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
// Maps (domain, account) -> token
|
||||
) -> serror::Result<Vec<(String, String, String)>> {
|
||||
let mut res = HashMap::with_capacity(image_registry.capacity());
|
||||
|
||||
for (domain, account) in image_registry
|
||||
.iter()
|
||||
.map(|r| (r.domain.as_str(), r.account.as_str()))
|
||||
// This ensures uniqueness / prevents redundant logins
|
||||
.collect::<HashSet<_>>()
|
||||
{
|
||||
if domain.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let Some(registry_token) = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)? else {
|
||||
continue;
|
||||
};
|
||||
|
||||
res.insert(
|
||||
(domain.to_string(), account.to_string()),
|
||||
registry_token,
|
||||
);
|
||||
}
|
||||
|
||||
let registry_token = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?;
|
||||
|
||||
Ok(registry_token)
|
||||
Ok(
|
||||
res
|
||||
.into_iter()
|
||||
.map(|((domain, account), token)| (domain, account, token))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use komodo_client::{
|
||||
deployment::{
|
||||
Deployment, DeploymentImage, extract_registry_domain,
|
||||
},
|
||||
get_image_name, komodo_timestamp, optional_string,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
@@ -115,8 +115,11 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
let (version, registry_token) = match &deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
@@ -133,21 +136,27 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version_str}"),
|
||||
};
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(version, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
deployment.config.image_registry_account = account
|
||||
deployment.config.image_registry_account =
|
||||
account.to_string();
|
||||
}
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
registry_token(domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
@@ -213,7 +222,7 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
}
|
||||
};
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -240,8 +249,11 @@ pub async fn pull_deployment_inner(
|
||||
let (image, account, token) = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version.to_string()
|
||||
} else {
|
||||
@@ -255,26 +267,31 @@ pub async fn pull_deployment_inner(
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
let image = format!("{image_name}:{version}");
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(image, None, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
let account =
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
account
|
||||
} else {
|
||||
deployment.config.image_registry_account
|
||||
&deployment.config.image_registry_account
|
||||
};
|
||||
let token = if !account.is_empty() {
|
||||
registry_token(&domain, &account).await.with_context(
|
||||
registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(image, optional_string(&account), token)
|
||||
(image, optional_string(account), token)
|
||||
}
|
||||
}
|
||||
DeploymentImage::Image { image } => {
|
||||
@@ -326,7 +343,7 @@ pub async fn pull_deployment_inner(
|
||||
Err(e) => Log::error("Pull image", format_serror(&e.into())),
|
||||
};
|
||||
|
||||
update_cache_for_server(server).await;
|
||||
update_cache_for_server(server, true).await;
|
||||
anyhow::Ok(log)
|
||||
}
|
||||
.await;
|
||||
@@ -411,7 +428,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -460,7 +477,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -507,7 +524,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -556,7 +573,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -611,7 +628,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -694,7 +711,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
|
||||
319
bin/core/src/api/execute/maintenance.rs
Normal file
319
bin/core/src/api/execute/maintenance.rs
Normal file
@@ -0,0 +1,319 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use formatting::{bold, format_serror};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
|
||||
},
|
||||
entities::{
|
||||
deployment::DeploymentState, server::ServerState,
|
||||
stack::StackState,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
api::execute::{
|
||||
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::update::update_update,
|
||||
state::{
|
||||
db_client, deployment_status_cache, server_status_cache,
|
||||
stack_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(
|
||||
name = "ClearRepoCache",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = clear_repo_cache_lock()
|
||||
.try_lock()
|
||||
.context("Clear already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let mut contents =
|
||||
tokio::fs::read_dir(&core_config().repo_directory)
|
||||
.await
|
||||
.context("Failed to read repo cache directory")?;
|
||||
|
||||
loop {
|
||||
let path = match contents
|
||||
.next_entry()
|
||||
.await
|
||||
.context("Failed to read contents at path")
|
||||
{
|
||||
Ok(Some(contents)) => contents.path(),
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Read Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if path.is_dir() {
|
||||
match tokio::fs::remove_dir_all(&path)
|
||||
.await
|
||||
.context("Failed to clear contents at path")
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clear Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn backup_database_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
#[instrument(
|
||||
name = "BackupCoreDatabase",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = backup_database_lock()
|
||||
.try_lock()
|
||||
.context("Backup already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let res = run_komodo_command(
|
||||
"Backup Core Database",
|
||||
None,
|
||||
"km database backup --yes",
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.push(res);
|
||||
update.finalize();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_update_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
#[instrument(
|
||||
name = "GlobalAutoUpdate",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_update_lock()
|
||||
.try_lock()
|
||||
.context("Global update already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
// This is all done in sequence because there is no rush,
|
||||
// the pulls / deploys happen spaced out to ease the load on system.
|
||||
let servers = find_collect(&db_client().servers, None, None)
|
||||
.await
|
||||
.context("Failed to query for servers from database")?;
|
||||
|
||||
let query = doc! {
|
||||
"$or": [
|
||||
{ "config.poll_for_updates": true },
|
||||
{ "config.auto_update": true }
|
||||
]
|
||||
};
|
||||
|
||||
let (stacks, repos) = tokio::try_join!(
|
||||
find_collect(&db_client().stacks, query.clone(), None),
|
||||
find_collect(&db_client().repos, None, None)
|
||||
)
|
||||
.context("Failed to query for resources from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let stack_status_cache = stack_status_cache();
|
||||
|
||||
// Will be edited later at update.logs[0]
|
||||
update.push_simple_log("Auto Pull", String::new());
|
||||
|
||||
for stack in stacks {
|
||||
let Some(status) = stack_status_cache.get(&stack.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running stacks.
|
||||
if !matches!(status.curr.state, StackState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == stack.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = stack.name.clone();
|
||||
let repo = if stack.config.linked_repo.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let Some(repo) =
|
||||
repos.iter().find(|r| r.id == stack.config.linked_repo)
|
||||
else {
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format!(
|
||||
"Did not find any Repo matching {}",
|
||||
stack.config.linked_repo
|
||||
),
|
||||
);
|
||||
continue;
|
||||
};
|
||||
Some(repo.clone())
|
||||
};
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), server, repo, None)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0]
|
||||
.stdout
|
||||
.push_str(&format!("Pulled Stack {} ✅", bold(name)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let deployment_status_cache = deployment_status_cache();
|
||||
let deployments =
|
||||
find_collect(&db_client().deployments, query, None)
|
||||
.await
|
||||
.context("Failed to query for deployments from database")?;
|
||||
for deployment in deployments {
|
||||
let Some(status) =
|
||||
deployment_status_cache.get(&deployment.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running deployments.
|
||||
if !matches!(status.curr.state, DeploymentState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == deployment.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = deployment.name.clone();
|
||||
if let Err(e) =
|
||||
pull_deployment_inner(deployment, server).await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Deployment {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0].stdout.push_str(&format!(
|
||||
"Pulled Deployment {} ✅",
|
||||
bold(name)
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{pin::Pin, sync::OnceLock, time::Instant};
|
||||
use std::{pin::Pin, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
@@ -18,19 +18,16 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::{AddStatusCodeError, Json};
|
||||
use tokio::sync::Mutex;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
config::core_config,
|
||||
helpers::update::{init_execution_update, update_update},
|
||||
resource::{KomodoResource, list_full_for_user_using_pattern},
|
||||
state::db_client,
|
||||
@@ -40,6 +37,7 @@ mod action;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod deployment;
|
||||
mod maintenance;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
@@ -104,6 +102,7 @@ pub enum ExecuteRequest {
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
RunStackService(RunStackService),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
@@ -141,12 +140,15 @@ pub enum ExecuteRequest {
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== MAINTENANCE ====
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -219,24 +221,33 @@ pub fn inner_handler(
|
||||
));
|
||||
}
|
||||
|
||||
// Spawn a task for the execution which continues
|
||||
// running after this method returns.
|
||||
let handle =
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
|
||||
// Spawns another task to monitor the first for failures,
|
||||
// and add the log to Update about it (which primary task can't do because it errored out)
|
||||
tokio::spawn({
|
||||
let update_id = update.id.clone();
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
Log::error("task error", format_serror(&e.into()))
|
||||
Log::error("Task Error", format_serror(&e.into()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
Log::error("spawn error", format!("{e:#?}"))
|
||||
Log::error("Spawn Error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
let res = async {
|
||||
// Nothing to do if update was never actually created,
|
||||
// which is the case when the id is empty.
|
||||
if update_id.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().updates, &update_id)
|
||||
.await
|
||||
@@ -334,71 +345,3 @@ async fn batch_execute<E: BatchExecute>(
|
||||
});
|
||||
Ok(join_all(futures).await)
|
||||
}
|
||||
|
||||
fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
static CLEAR_REPO_CACHE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
CLEAR_REPO_CACHE_LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(name = "ClearRepoCache", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = clear_repo_cache_lock()
|
||||
.try_lock()
|
||||
.context("Clear already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let mut contents =
|
||||
tokio::fs::read_dir(&core_config().repo_directory)
|
||||
.await
|
||||
.context("Failed to read repo cache directory")?;
|
||||
|
||||
loop {
|
||||
let path = match contents
|
||||
.next_entry()
|
||||
.await
|
||||
.context("Failed to read contents at path")
|
||||
{
|
||||
Ok(Some(contents)) => contents.path(),
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Read Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if path.is_dir() {
|
||||
match tokio::fs::remove_dir_all(&path)
|
||||
.await
|
||||
.context("Failed to clear contents at path")
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clear Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -122,7 +122,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -176,7 +176,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -232,7 +232,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -288,7 +288,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -350,7 +350,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -401,7 +401,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -453,7 +453,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -503,7 +503,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -555,7 +555,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -605,7 +605,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -660,7 +660,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -711,7 +711,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -765,7 +765,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -813,7 +813,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -865,7 +865,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -916,7 +916,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -968,7 +968,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1020,7 +1020,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1072,7 +1072,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1123,7 +1123,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::bson::{doc, to_document};
|
||||
use database::mungos::mongodb::bson::{
|
||||
doc, oid::ObjectId, to_bson, to_document,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshStackCache},
|
||||
entities::{
|
||||
FileContents,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::{Stack, StackInfo},
|
||||
stack::{
|
||||
Stack, StackFileRequires, StackInfo, StackRemoteFileContents,
|
||||
},
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::compose::*;
|
||||
@@ -21,7 +29,9 @@ use crate::{
|
||||
periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
stack_git_token,
|
||||
update::{add_update_without_send, update_update},
|
||||
update::{
|
||||
add_update_without_send, init_execution_update, update_update,
|
||||
},
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
permission::get_check_permissions,
|
||||
@@ -179,7 +189,15 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
) = if deployed {
|
||||
(
|
||||
Some(latest_services.clone()),
|
||||
Some(file_contents.clone()),
|
||||
Some(
|
||||
file_contents
|
||||
.iter()
|
||||
.map(|f| FileContents {
|
||||
path: f.path.clone(),
|
||||
contents: f.contents.clone(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
compose_config,
|
||||
commit_hash.clone(),
|
||||
commit_message.clone(),
|
||||
@@ -242,7 +260,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
}
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -289,62 +307,347 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
RefreshStackCache {
|
||||
stack: stack.id.clone(),
|
||||
}
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await?;
|
||||
|
||||
let stack = resource::get::<Stack>(&stack.id).await?;
|
||||
let changed = match (
|
||||
|
||||
let action = match (
|
||||
&stack.info.deployed_contents,
|
||||
&stack.info.remote_contents,
|
||||
) {
|
||||
(Some(deployed_contents), Some(latest_contents)) => {
|
||||
let changed = || {
|
||||
for latest in latest_contents {
|
||||
let Some(deployed) = deployed_contents
|
||||
.iter()
|
||||
.find(|c| c.path == latest.path)
|
||||
else {
|
||||
return true;
|
||||
};
|
||||
if latest.contents != deployed.contents {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
};
|
||||
changed()
|
||||
let services = stack
|
||||
.info
|
||||
.latest_services
|
||||
.iter()
|
||||
.map(|s| s.service_name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
resolve_deploy_if_changed_action(
|
||||
deployed_contents,
|
||||
latest_contents,
|
||||
&services,
|
||||
)
|
||||
}
|
||||
(None, _) => true,
|
||||
_ => false,
|
||||
(None, _) => DeployIfChangedAction::FullDeploy,
|
||||
_ => DeployIfChangedAction::Services {
|
||||
deploy: Vec::new(),
|
||||
restart: Vec::new(),
|
||||
},
|
||||
};
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
if !changed {
|
||||
update.push_simple_log(
|
||||
"Diff compose files",
|
||||
String::from("Deploy cancelled after no changes detected."),
|
||||
);
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
match action {
|
||||
// Existing path pre 1.19.1
|
||||
DeployIfChangedAction::FullDeploy => {
|
||||
// Don't actually send it here, let the handler send it after it can set action state.
|
||||
// This is usually done in crate::helpers::update::init_execution_update.
|
||||
update.id = add_update_without_send(&update).await?;
|
||||
|
||||
// Don't actually send it here, let the handler send it after it can set action state.
|
||||
// This is usually done in crate::helpers::update::init_execution_update.
|
||||
update.id = add_update_without_send(&update).await?;
|
||||
DeployStack {
|
||||
stack: stack.name,
|
||||
services: Vec::new(),
|
||||
stop_time: self.stop_time,
|
||||
}
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
DeployIfChangedAction::FullRestart => {
|
||||
// For git repo based stacks, need to do a
|
||||
// PullStack in order to ensure latest repo contents on the
|
||||
// host before restart.
|
||||
maybe_pull_stack(&stack, Some(&mut update)).await?;
|
||||
|
||||
DeployStack {
|
||||
stack: stack.name,
|
||||
services: Vec::new(),
|
||||
stop_time: self.stop_time,
|
||||
let mut update =
|
||||
restart_services(stack.name, Vec::new(), user).await?;
|
||||
|
||||
if update.success {
|
||||
// Need to update 'info.deployed_contents' with the
|
||||
// latest contents so next check doesn't read the same diff.
|
||||
update_deployed_contents_with_latest(
|
||||
&stack.id,
|
||||
stack.info.remote_contents,
|
||||
&mut update,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
DeployIfChangedAction::Services { deploy, restart } => {
|
||||
match (deploy.is_empty(), restart.is_empty()) {
|
||||
// Both empty, nothing to do
|
||||
(true, true) => {
|
||||
update.push_simple_log(
|
||||
"Diff compose files",
|
||||
String::from(
|
||||
"Deploy cancelled after no changes detected.",
|
||||
),
|
||||
);
|
||||
update.finalize();
|
||||
Ok(update)
|
||||
}
|
||||
// Only restart
|
||||
(true, false) => {
|
||||
// For git repo based stacks, need to do a
|
||||
// PullStack in order to ensure latest repo contents on the
|
||||
// host before restart. Only necessary if no "deploys" (deploy already pulls stack).
|
||||
maybe_pull_stack(&stack, Some(&mut update)).await?;
|
||||
|
||||
let mut update =
|
||||
restart_services(stack.name, restart, user).await?;
|
||||
|
||||
if update.success {
|
||||
// Need to update 'info.deployed_contents' with the
|
||||
// latest contents so next check doesn't read the same diff.
|
||||
update_deployed_contents_with_latest(
|
||||
&stack.id,
|
||||
stack.info.remote_contents,
|
||||
&mut update,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
// Only deploy
|
||||
(false, true) => {
|
||||
deploy_services(stack.name, deploy, user).await
|
||||
}
|
||||
// Deploy then restart, returning non-db update with executed services.
|
||||
(false, false) => {
|
||||
update.push_simple_log(
|
||||
"Execute Deploys",
|
||||
format!("Deploying: {}", deploy.join(", "),),
|
||||
);
|
||||
// This already updates 'stack.info.deployed_services',
|
||||
// restart doesn't require this again.
|
||||
let deploy_update =
|
||||
deploy_services(stack.name.clone(), deploy, user)
|
||||
.await?;
|
||||
if !deploy_update.success {
|
||||
update.push_error_log(
|
||||
"Execute Deploys",
|
||||
String::from("There was a failure in service deploy"),
|
||||
);
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
update.push_simple_log(
|
||||
"Execute Restarts",
|
||||
format!("Restarting: {}", restart.join(", "),),
|
||||
);
|
||||
let restart_update =
|
||||
restart_services(stack.name, restart, user).await?;
|
||||
if !restart_update.success {
|
||||
update.push_error_log(
|
||||
"Execute Restarts",
|
||||
String::from(
|
||||
"There was a failure in a service restart",
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn deploy_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
user: &User,
|
||||
) -> serror::Result<Update> {
|
||||
// The existing update is initialized to DeployStack,
|
||||
// but also has not been created on database.
|
||||
// Setup a new update here.
|
||||
let req = ExecuteRequest::DeployStack(DeployStack {
|
||||
stack,
|
||||
services,
|
||||
stop_time: None,
|
||||
});
|
||||
let update = init_execution_update(&req, user).await?;
|
||||
let ExecuteRequest::DeployStack(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
req
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn restart_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
user: &User,
|
||||
) -> serror::Result<Update> {
|
||||
// The existing update is initialized to DeployStack,
|
||||
// but also has not been created on database.
|
||||
// Setup a new update here.
|
||||
let req =
|
||||
ExecuteRequest::RestartStack(RestartStack { stack, services });
|
||||
let update = init_execution_update(&req, user).await?;
|
||||
let ExecuteRequest::RestartStack(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
req
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// This can safely be called in [DeployStackIfChanged]
|
||||
/// when there are ONLY changes to config files requiring restart,
|
||||
/// AFTER the restart has been successfully completed.
|
||||
///
|
||||
/// In the case the if changed action is not FullDeploy,
|
||||
/// the only file diff possible is to config files.
|
||||
/// Also note either full or service deploy will already update 'deployed_contents'
|
||||
/// making this method unnecessary in those cases.
|
||||
///
|
||||
/// Changes to config files after restart is applied should
|
||||
/// be taken as the deployed contents, otherwise next changed check
|
||||
/// will restart service again for no reason.
|
||||
async fn update_deployed_contents_with_latest(
|
||||
id: &str,
|
||||
contents: Option<Vec<StackRemoteFileContents>>,
|
||||
update: &mut Update,
|
||||
) {
|
||||
let Some(contents) = contents else {
|
||||
return;
|
||||
};
|
||||
let contents = contents
|
||||
.into_iter()
|
||||
.map(|f| FileContents {
|
||||
path: f.path,
|
||||
contents: f.contents,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if let Err(e) = (async {
|
||||
let contents = to_bson(&contents)
|
||||
.context("Failed to serialize contents to bson")?;
|
||||
let id =
|
||||
ObjectId::from_str(id).context("Id is not valid ObjectId")?;
|
||||
db_client()
|
||||
.stacks
|
||||
.update_one(
|
||||
doc! { "_id": id },
|
||||
doc! { "$set": { "info.deployed_contents": contents } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update stack 'deployed_contents'")?;
|
||||
anyhow::Ok(())
|
||||
})
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Update content cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
let _ = update_update(update.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
enum DeployIfChangedAction {
|
||||
/// Changes to any compose or env files
|
||||
/// always lead to this.
|
||||
FullDeploy,
|
||||
/// If the above is not met, then changes to
|
||||
/// any changed additional file with `requires = "Restart"`
|
||||
/// and empty services array will lead to this.
|
||||
FullRestart,
|
||||
/// If all changed additional files have specific services
|
||||
/// they depend on, collect the final necessary
|
||||
/// services to deploy / restart.
|
||||
/// If eg `deploy` is empty, no services will be redeployed, same for `restart`.
|
||||
/// If both are empty, nothing is to be done.
|
||||
Services {
|
||||
deploy: Vec<String>,
|
||||
restart: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
fn resolve_deploy_if_changed_action(
|
||||
deployed_contents: &[FileContents],
|
||||
latest_contents: &[StackRemoteFileContents],
|
||||
all_services: &[String],
|
||||
) -> DeployIfChangedAction {
|
||||
let mut full_restart = false;
|
||||
let mut deploy = HashSet::<String>::new();
|
||||
let mut restart = HashSet::<String>::new();
|
||||
|
||||
for latest in latest_contents {
|
||||
let Some(deployed) =
|
||||
deployed_contents.iter().find(|c| c.path == latest.path)
|
||||
else {
|
||||
// If file doesn't exist in deployed contents, do full
|
||||
// deploy to align this.
|
||||
return DeployIfChangedAction::FullDeploy;
|
||||
};
|
||||
// Ignore unchanged files
|
||||
if latest.contents == deployed.contents {
|
||||
continue;
|
||||
}
|
||||
match (latest.requires, latest.services.is_empty()) {
|
||||
(StackFileRequires::Redeploy, true) => {
|
||||
// File has requires = "Redeploy" at global level.
|
||||
// Can do early return here.
|
||||
return DeployIfChangedAction::FullDeploy;
|
||||
}
|
||||
(StackFileRequires::Redeploy, false) => {
|
||||
// Requires redeploy on specific services
|
||||
deploy.extend(latest.services.clone());
|
||||
}
|
||||
(StackFileRequires::Restart, true) => {
|
||||
// Services empty -> Full restart
|
||||
full_restart = true;
|
||||
}
|
||||
(StackFileRequires::Restart, false) => {
|
||||
restart.extend(latest.services.clone());
|
||||
}
|
||||
(StackFileRequires::None, _) => {
|
||||
// File can be ignored even with changes.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match (full_restart, deploy.is_empty()) {
|
||||
// Full restart required with NO deploys needed -> Full Restart
|
||||
(true, true) => DeployIfChangedAction::FullRestart,
|
||||
// Full restart required WITH deploys needed -> Deploy those, restart all others
|
||||
(true, false) => DeployIfChangedAction::Services {
|
||||
restart: all_services
|
||||
.iter()
|
||||
// Only keep ones that don't need deploy
|
||||
.filter(|&s| !deploy.contains(s))
|
||||
.cloned()
|
||||
.collect(),
|
||||
deploy: deploy.into_iter().collect(),
|
||||
},
|
||||
// No full restart needed -> Deploy / restart as. pickedup.
|
||||
(false, _) => DeployIfChangedAction::Services {
|
||||
deploy: deploy.into_iter().collect(),
|
||||
restart: restart.into_iter().collect(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,6 +674,31 @@ impl Resolve<ExecuteArgs> for BatchPullStack {
|
||||
}
|
||||
}
|
||||
|
||||
async fn maybe_pull_stack(
|
||||
stack: &Stack,
|
||||
update: Option<&mut Update>,
|
||||
) -> anyhow::Result<()> {
|
||||
if stack.config.files_on_host
|
||||
|| (stack.config.repo.is_empty()
|
||||
&& stack.config.linked_repo.is_empty())
|
||||
{
|
||||
// Not repo based, no pull necessary
|
||||
return Ok(());
|
||||
}
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
let repo = if stack.config.repo.is_empty()
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
Some(resource::get::<Repo>(&stack.config.linked_repo).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
pull_stack_inner(stack.clone(), Vec::new(), &server, repo, update)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_stack_inner(
|
||||
mut stack: Stack,
|
||||
services: Vec<String>,
|
||||
@@ -433,7 +761,7 @@ pub async fn pull_stack_inner(
|
||||
.await?;
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(server).await;
|
||||
update_cache_for_server(server, true).await;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
@@ -630,3 +958,95 @@ impl Resolve<ExecuteArgs> for DestroyStack {
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunStackService {
|
||||
#[instrument(name = "RunStackService", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let action_state =
|
||||
action_states().stack.get_or_insert_default(&stack.id).await;
|
||||
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token =
|
||||
stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let registry_token = crate::helpers::registry_token(
|
||||
&stack.config.registry_provider,
|
||||
&stack.config.registry_account,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
|
||||
)?;
|
||||
|
||||
let secret_replacers = if !stack.config.skip_secret_interp {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut()
|
||||
&& !repo.config.skip_secret_interp
|
||||
{
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let log = periphery_client(&server)?
|
||||
.request(ComposeRun {
|
||||
stack,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
service: self.service,
|
||||
command: self.command,
|
||||
no_tty: self.no_tty,
|
||||
no_deps: self.no_deps,
|
||||
detach: self.detach,
|
||||
service_ports: self.service_ports,
|
||||
env: self.env,
|
||||
workdir: self.workdir,
|
||||
user: self.user,
|
||||
entrypoint: self.entrypoint,
|
||||
pull: self.pull,
|
||||
})
|
||||
.await?;
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,10 +77,8 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
};
|
||||
|
||||
// get the action state for the sync (or insert default).
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.get_or_insert_default(&sync.id)
|
||||
.await;
|
||||
let action_state =
|
||||
action_states().sync.get_or_insert_default(&sync.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure sync not already busy before updating.
|
||||
|
||||
@@ -131,8 +131,8 @@ impl Resolve<ReadArgs> for GetActionsSummary {
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
(_, action_states) if action_states.running > 0 => {
|
||||
res.running += action_states.running;
|
||||
}
|
||||
(ActionState::Ok, _) => res.ok += 1,
|
||||
(ActionState::Failed, _) => res.failed += 1,
|
||||
|
||||
@@ -71,12 +71,24 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let mut res = GetServersSummaryResponse::default();
|
||||
|
||||
for server in servers {
|
||||
res.total += 1;
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
res.healthy += 1;
|
||||
// Check for version mismatch
|
||||
let has_version_mismatch = !server.info.version.is_empty()
|
||||
&& server.info.version != "Unknown"
|
||||
&& server.info.version != core_version;
|
||||
|
||||
if has_version_mismatch {
|
||||
res.warning += 1;
|
||||
} else {
|
||||
res.healthy += 1;
|
||||
}
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
res.unhealthy += 1;
|
||||
|
||||
@@ -93,7 +93,7 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -138,7 +138,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
continue;
|
||||
}
|
||||
if action_states
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
|
||||
@@ -16,10 +16,7 @@ impl Resolve<WriteArgs> for CreateAction {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Action>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +32,7 @@ impl Resolve<WriteArgs> for CopyAction {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Action>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,7 @@ impl Resolve<WriteArgs> for CreateAlerter {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Alerter>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +32,7 @@ impl Resolve<WriteArgs> for CopyAlerter {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Alerter>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -50,10 +50,7 @@ impl Resolve<WriteArgs> for CreateBuild {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +68,7 @@ impl Resolve<WriteArgs> for CopyBuild {
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +180,7 @@ async fn write_dockerfile_contents_git(
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut clone_args: RepoExecutionArgs = if !build
|
||||
let mut repo_args: RepoExecutionArgs = if !build
|
||||
.config
|
||||
.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
@@ -196,8 +190,8 @@ async fn write_dockerfile_contents_git(
|
||||
} else {
|
||||
(&build).into()
|
||||
};
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(root.display().to_string());
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let build_path = build
|
||||
.config
|
||||
@@ -220,11 +214,11 @@ async fn write_dockerfile_contents_git(
|
||||
})?;
|
||||
}
|
||||
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
let access_token = if let Some(account) = &repo_args.account {
|
||||
git_token(&repo_args.provider, account, |https| repo_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
@@ -235,7 +229,7 @@ async fn write_dockerfile_contents_git(
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
&repo_args,
|
||||
access_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
@@ -249,9 +243,11 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
clone_args,
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
)
|
||||
@@ -298,7 +294,7 @@ async fn write_dockerfile_contents_git(
|
||||
&format!("{}: Commit Dockerfile", args.user.username),
|
||||
&root,
|
||||
&build_path.join(&dockerfile_path),
|
||||
&build.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -16,10 +16,7 @@ impl Resolve<WriteArgs> for CreateBuilder {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Builder>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +32,7 @@ impl Resolve<WriteArgs> for CopyBuilder {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Builder>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,10 +38,8 @@ impl Resolve<WriteArgs> for CreateDeployment {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +56,8 @@ impl Resolve<WriteArgs> for CopyDeployment {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Deployment>(&self.name, config.into(), user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,10 +149,7 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
});
|
||||
}
|
||||
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Deployment>(&self.name, config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ pub struct WriteArgs {
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum WriteRequest {
|
||||
// ==== USER ====
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
UpdateUserUsername(UpdateUserUsername),
|
||||
UpdateUserPassword(UpdateUserPassword),
|
||||
DeleteUser(DeleteUser),
|
||||
|
||||
@@ -16,10 +16,7 @@ impl Resolve<WriteArgs> for CreateProcedure {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateProcedureResponse> {
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Procedure>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,10 +33,8 @@ impl Resolve<WriteArgs> for CopyProcedure {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ impl Resolve<WriteArgs> for CreateRepo {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::create::<Repo>(&self.name, self.config, user).await?)
|
||||
resource::create::<Repo>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +58,7 @@ impl Resolve<WriteArgs> for CopyRepo {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Repo>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Repo>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,10 +30,7 @@ impl Resolve<WriteArgs> for CreateServer {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Server>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,10 +46,8 @@ impl Resolve<WriteArgs> for CopyServer {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
|
||||
resource::create::<Server>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@ use periphery_client::api::compose::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::execute::pull_stack_inner,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
@@ -52,10 +51,7 @@ impl Resolve<WriteArgs> for CreateStack {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Stack>(&self.name, self.config, user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +67,8 @@ impl Resolve<WriteArgs> for CopyStack {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
|
||||
resource::create::<Stack>(&self.name, config.into(), user).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,6 +279,8 @@ async fn write_stack_file_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
@@ -335,7 +331,7 @@ async fn write_stack_file_contents_git(
|
||||
&format!("{username}: Write Stack File"),
|
||||
&root,
|
||||
&file_path,
|
||||
&stack.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -430,7 +426,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.file_paths().to_vec(),
|
||||
file_paths: stack.all_file_dependencies(),
|
||||
name: stack.name.clone(),
|
||||
run_directory: stack.config.run_directory.clone(),
|
||||
})
|
||||
@@ -452,6 +448,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
@@ -490,6 +490,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &remote_contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
@@ -556,24 +560,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
.await
|
||||
.context("failed to update stack info on db")?;
|
||||
|
||||
if (stack.config.poll_for_updates || stack.config.auto_update)
|
||||
&& !stack.config.server_id.is_empty()
|
||||
{
|
||||
let (server, state) =
|
||||
get_server_with_state(&stack.config.server_id).await?;
|
||||
if state == ServerState::Ok {
|
||||
let name = stack.name.clone();
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), &server, repo, None)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to pull latest images for Stack {name} | {e:#}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,10 +68,8 @@ impl Resolve<WriteArgs> for CreateResourceSync {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
Ok(
|
||||
resource::create::<ResourceSync>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<ResourceSync>(&self.name, self.config, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,14 +86,8 @@ impl Resolve<WriteArgs> for CopyResourceSync {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<ResourceSync>(&self.name, config.into(), user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,6 +315,8 @@ async fn write_sync_file_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
@@ -373,7 +367,7 @@ async fn write_sync_file_contents_git(
|
||||
&format!("{}: Commit Resource File", args.user.username),
|
||||
&root,
|
||||
&resource_path.join(&file_path),
|
||||
&sync.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -8,22 +8,17 @@ use database::mungos::{
|
||||
use komodo_client::{
|
||||
api::write::{CreateTag, DeleteTag, RenameTag, UpdateTagColor},
|
||||
entities::{
|
||||
action::Action,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::{Tag, TagColor},
|
||||
action::Action, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, stack::Stack, sync::ResourceSync, tag::Tag,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::{get_tag, get_tag_check_owner},
|
||||
resource,
|
||||
state::db_client,
|
||||
@@ -37,16 +32,25 @@ impl Resolve<WriteArgs> for CreateTag {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Tag> {
|
||||
if core_config().disable_non_admin_create && !user.admin {
|
||||
return Err(
|
||||
anyhow!("Non admins cannot create tags")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&self.name).is_ok() {
|
||||
return Err(anyhow!("tag name cannot be ObjectId").into());
|
||||
return Err(
|
||||
anyhow!("Tag name cannot be ObjectId")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut tag = Tag {
|
||||
id: Default::default(),
|
||||
name: self.name,
|
||||
color: TagColor::Slate,
|
||||
color: self.color.unwrap_or_default(),
|
||||
owner: user.id.clone(),
|
||||
unused: false,
|
||||
};
|
||||
|
||||
tag.id = db_client()
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
DeleteUser, DeleteUserResponse, UpdateUserPassword,
|
||||
UpdateUserPasswordResponse, UpdateUserUsername,
|
||||
UpdateUserUsernameResponse,
|
||||
},
|
||||
entities::NoData,
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use database::{
|
||||
hash_password,
|
||||
mungos::mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData,
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
@@ -18,7 +23,85 @@ use super::WriteArgs;
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateLocalUser {
|
||||
#[instrument(name = "CreateLocalUser", skip(admin, self), fields(admin_id = admin.id, username = self.username))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<CreateLocalUserResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin-only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if self.username.is_empty() {
|
||||
return Err(anyhow!("Username cannot be empty.").into());
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&self.username).is_ok() {
|
||||
return Err(
|
||||
anyhow!("Username cannot be valid ObjectId").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if self.password.is_empty() {
|
||||
return Err(anyhow!("Password cannot be empty.").into());
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
|
||||
if db
|
||||
.users
|
||||
.find_one(doc! { "username": &self.username })
|
||||
.await
|
||||
.context("Failed to query for existing users")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("Username already taken.").into());
|
||||
}
|
||||
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
let hashed_password = hash_password(self.password)?;
|
||||
|
||||
let mut user = User {
|
||||
id: Default::default(),
|
||||
username: self.username,
|
||||
enabled: true,
|
||||
admin: false,
|
||||
super_admin: false,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recents: Default::default(),
|
||||
all: Default::default(),
|
||||
config: UserConfig::Local {
|
||||
password: hashed_password,
|
||||
},
|
||||
};
|
||||
|
||||
user.id = db_client()
|
||||
.users
|
||||
.insert_one(&user)
|
||||
.await
|
||||
.context("failed to create user")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
user.sanitize();
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserUsername {
|
||||
#[instrument(name = "UpdateUserUsername", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -36,6 +119,13 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
|
||||
if self.username.is_empty() {
|
||||
return Err(anyhow!("Username cannot be empty.").into());
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&self.username).is_ok() {
|
||||
return Err(
|
||||
anyhow!("Username cannot be valid ObjectId").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
if db
|
||||
.users
|
||||
@@ -62,6 +152,7 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserPassword {
|
||||
#[instrument(name = "UpdateUserPassword", skip(user, self), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -84,12 +175,16 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteUser {
|
||||
#[instrument(name = "DeleteUser", skip(admin), fields(user = self.user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<DeleteUserResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("Calling user is not admin.").into());
|
||||
return Err(
|
||||
anyhow!("This method is admin-only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
if admin.username == self.user || admin.id == self.user {
|
||||
return Err(anyhow!("User cannot delete themselves.").into());
|
||||
@@ -125,6 +220,14 @@ impl Resolve<WriteArgs> for DeleteUser {
|
||||
.delete_one(query)
|
||||
.await
|
||||
.context("Failed to delete user from database")?;
|
||||
// Also remove user id from all user groups
|
||||
if let Err(e) = db
|
||||
.user_groups
|
||||
.update_many(doc! {}, doc! { "$pull": { "users": &user.id } })
|
||||
.await
|
||||
{
|
||||
warn!("Failed to remove deleted user from user groups | {e:?}");
|
||||
};
|
||||
Ok(user)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{komodo_timestamp, user_group::UserGroup},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -23,7 +25,10 @@ impl Resolve<WriteArgs> for CreateUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let user_group = UserGroup {
|
||||
name: self.name,
|
||||
@@ -58,7 +63,10 @@ impl Resolve<WriteArgs> for RenameUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
@@ -84,7 +92,10 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -117,7 +128,10 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -161,7 +175,10 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -205,7 +222,10 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -252,7 +272,10 @@ impl Resolve<WriteArgs> for SetEveryoneUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
|
||||
@@ -4,7 +4,9 @@ use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{Operation, ResourceTarget, variable::Variable},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
@@ -22,6 +24,13 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can create variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let CreateVariable {
|
||||
name,
|
||||
value,
|
||||
@@ -29,10 +38,6 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
is_secret,
|
||||
} = self;
|
||||
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables").into());
|
||||
}
|
||||
|
||||
let variable = Variable {
|
||||
name,
|
||||
value,
|
||||
@@ -44,7 +49,7 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
.variables
|
||||
.insert_one(&variable)
|
||||
.await
|
||||
.context("failed to create variable on db")?;
|
||||
.context("Failed to create variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -69,7 +74,10 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableValueResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let UpdateVariableValue { name, value } = self;
|
||||
@@ -87,7 +95,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
doc! { "$set": { "value": &value } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable value on db")?;
|
||||
.context("Failed to update variable value on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -107,7 +115,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
)
|
||||
};
|
||||
|
||||
update.push_simple_log("update variable value", log);
|
||||
update.push_simple_log("Update Variable Value", log);
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
@@ -123,7 +131,10 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.variables
|
||||
@@ -132,7 +143,7 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
doc! { "$set": { "description": &self.description } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable description on db")?;
|
||||
.context("Failed to update variable description on db")?;
|
||||
Ok(get_variable(&self.name).await?)
|
||||
}
|
||||
}
|
||||
@@ -144,7 +155,10 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableIsSecretResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.variables
|
||||
@@ -153,7 +167,7 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
doc! { "$set": { "is_secret": self.is_secret } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable is secret on db")?;
|
||||
.context("Failed to update variable is secret on db")?;
|
||||
Ok(get_variable(&self.name).await?)
|
||||
}
|
||||
}
|
||||
@@ -164,14 +178,17 @@ impl Resolve<WriteArgs> for DeleteVariable {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can delete variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can delete variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let variable = get_variable(&self.name).await?;
|
||||
db_client()
|
||||
.variables
|
||||
.delete_one(doc! { "name": &self.name })
|
||||
.await
|
||||
.context("failed to delete variable on db")?;
|
||||
.context("Failed to delete variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -180,7 +197,7 @@ impl Resolve<WriteArgs> for DeleteVariable {
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("delete variable", format!("{variable:#?}"));
|
||||
.push_simple_log("Delete Variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
@@ -187,8 +187,8 @@ impl GoogleOauthClient {
|
||||
Ok(body)
|
||||
} else {
|
||||
let text = res.text().await.context(format!(
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
Err(anyhow!("method: POST | status: {status} | text: {text}"))
|
||||
}
|
||||
}
|
||||
@@ -207,5 +207,6 @@ pub struct GoogleUser {
|
||||
#[serde(rename = "sub")]
|
||||
pub id: String,
|
||||
pub email: String,
|
||||
#[serde(default)]
|
||||
pub picture: String,
|
||||
}
|
||||
|
||||
@@ -8,13 +8,15 @@ use database::mungos::mongodb::bson::doc;
|
||||
use jsonwebtoken::{
|
||||
DecodingKey, EncodingKey, Header, Validation, decode, encode,
|
||||
};
|
||||
use komodo_client::entities::config::core::CoreConfig;
|
||||
use komodo_client::{
|
||||
api::auth::JwtResponse, entities::config::core::CoreConfig,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::helpers::random_string;
|
||||
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (JwtResponse, u128)>>;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct JwtClaims {
|
||||
@@ -51,16 +53,20 @@ impl JwtClient {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn encode(&self, user_id: String) -> anyhow::Result<String> {
|
||||
pub fn encode(
|
||||
&self,
|
||||
user_id: String,
|
||||
) -> anyhow::Result<JwtResponse> {
|
||||
let iat = unix_timestamp_ms();
|
||||
let exp = iat + self.ttl_ms;
|
||||
let claims = JwtClaims {
|
||||
id: user_id,
|
||||
id: user_id.clone(),
|
||||
iat,
|
||||
exp,
|
||||
};
|
||||
encode(&self.header, &claims, &self.encoding_key)
|
||||
.context("failed at signing claim")
|
||||
let jwt = encode(&self.header, &claims, &self.encoding_key)
|
||||
.context("failed at signing claim")?;
|
||||
Ok(JwtResponse { user_id, jwt })
|
||||
}
|
||||
|
||||
pub fn decode(&self, jwt: &str) -> anyhow::Result<JwtClaims> {
|
||||
@@ -70,7 +76,10 @@ impl JwtClient {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn create_exchange_token(&self, jwt: String) -> String {
|
||||
pub async fn create_exchange_token(
|
||||
&self,
|
||||
jwt: JwtResponse,
|
||||
) -> String {
|
||||
let exchange_token = random_string(40);
|
||||
self.exchange_tokens.lock().await.insert(
|
||||
exchange_token.clone(),
|
||||
@@ -86,7 +95,7 @@ impl JwtClient {
|
||||
pub async fn redeem_exchange_token(
|
||||
&self,
|
||||
exchange_token: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
) -> anyhow::Result<JwtResponse> {
|
||||
let (jwt, valid_until) = self
|
||||
.exchange_tokens
|
||||
.lock()
|
||||
|
||||
@@ -8,8 +8,8 @@ use database::{
|
||||
};
|
||||
use komodo_client::{
|
||||
api::auth::{
|
||||
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
|
||||
LoginLocalUserResponse,
|
||||
LoginLocalUser, LoginLocalUserResponse, SignUpLocalUser,
|
||||
SignUpLocalUserResponse,
|
||||
},
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
@@ -21,12 +21,12 @@ use crate::{
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
impl Resolve<AuthArgs> for CreateLocalUser {
|
||||
#[instrument(name = "CreateLocalUser", skip(self))]
|
||||
impl Resolve<AuthArgs> for SignUpLocalUser {
|
||||
#[instrument(name = "SignUpLocalUser", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
) -> serror::Result<CreateLocalUserResponse> {
|
||||
) -> serror::Result<SignUpLocalUserResponse> {
|
||||
let core_config = core_config();
|
||||
|
||||
if !core_config.local_auth {
|
||||
@@ -47,16 +47,27 @@ impl Resolve<AuthArgs> for CreateLocalUser {
|
||||
return Err(anyhow!("Password cannot be empty string").into());
|
||||
}
|
||||
|
||||
let hashed_password = hash_password(self.password)?;
|
||||
let db = db_client();
|
||||
|
||||
let no_users_exist =
|
||||
db_client().users.find_one(Document::new()).await?.is_none();
|
||||
db.users.find_one(Document::new()).await?.is_none();
|
||||
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled").into());
|
||||
}
|
||||
|
||||
if db
|
||||
.users
|
||||
.find_one(doc! { "username": &self.username })
|
||||
.await
|
||||
.context("Failed to query for existing users")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("Username already taken.").into());
|
||||
}
|
||||
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
let hashed_password = hash_password(self.password)?;
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
@@ -85,11 +96,10 @@ impl Resolve<AuthArgs> for CreateLocalUser {
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
let jwt = jwt_client()
|
||||
.encode(user_id)
|
||||
.context("failed to generate jwt for user")?;
|
||||
|
||||
Ok(CreateLocalUserResponse { jwt })
|
||||
jwt_client()
|
||||
.encode(user_id.clone())
|
||||
.context("failed to generate jwt for user")
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,10 +141,9 @@ impl Resolve<AuthArgs> for LoginLocalUser {
|
||||
return Err(anyhow!("invalid credentials").into());
|
||||
}
|
||||
|
||||
let jwt = jwt_client()
|
||||
.encode(user.id)
|
||||
.context("failed at generating jwt for user")?;
|
||||
|
||||
Ok(LoginLocalUserResponse { jwt })
|
||||
jwt_client()
|
||||
.encode(user.id.clone())
|
||||
.context("failed at generating jwt for user")
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,6 +109,14 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.komodo_database_db_name
|
||||
.unwrap_or(config.database.db_name),
|
||||
},
|
||||
init_admin_username: maybe_read_item_from_file(
|
||||
env.komodo_init_admin_username_file,
|
||||
env.komodo_init_admin_username
|
||||
).or(config.init_admin_username),
|
||||
init_admin_password: maybe_read_item_from_file(
|
||||
env.komodo_init_admin_password_file,
|
||||
env.komodo_init_admin_password
|
||||
).unwrap_or(config.init_admin_password),
|
||||
oidc_enabled: env.komodo_oidc_enabled.unwrap_or(config.oidc_enabled),
|
||||
oidc_provider: env.komodo_oidc_provider.unwrap_or(config.oidc_provider),
|
||||
oidc_redirect_host: env.komodo_oidc_redirect_host.unwrap_or(config.oidc_redirect_host),
|
||||
@@ -169,7 +177,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
port: env.komodo_port.unwrap_or(config.port),
|
||||
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
|
||||
timezone: env.komodo_timezone.unwrap_or(config.timezone),
|
||||
first_server: env.komodo_first_server.unwrap_or(config.first_server),
|
||||
first_server: env.komodo_first_server.or(config.first_server),
|
||||
first_server_name: env.komodo_first_server_name.unwrap_or(config.first_server_name),
|
||||
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
|
||||
jwt_ttl: env
|
||||
.komodo_jwt_ttl
|
||||
@@ -214,6 +223,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.disable_user_registration),
|
||||
disable_non_admin_create: env.komodo_disable_non_admin_create
|
||||
.unwrap_or(config.disable_non_admin_create),
|
||||
disable_init_resources: env.komodo_disable_init_resources
|
||||
.unwrap_or(config.disable_init_resources),
|
||||
enable_fancy_toml: env.komodo_enable_fancy_toml
|
||||
.unwrap_or(config.enable_fancy_toml),
|
||||
lock_login_credentials_for: env.komodo_lock_login_credentials_for
|
||||
@@ -239,6 +250,7 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
},
|
||||
pretty_startup_config: env.komodo_pretty_startup_config.unwrap_or(config.pretty_startup_config),
|
||||
unsafe_unsanitized_startup_config: env.komodo_unsafe_unsanitized_startup_config.unwrap_or(config.unsafe_unsanitized_startup_config),
|
||||
internet_interface: env.komodo_internet_interface.unwrap_or(config.internet_interface),
|
||||
ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled),
|
||||
ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file),
|
||||
|
||||
@@ -16,17 +16,16 @@ use super::cache::Cache;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ActionStates {
|
||||
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
|
||||
pub deployment:
|
||||
Cache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
|
||||
pub procedure:
|
||||
Cache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
pub action: Cache<String, Arc<ActionState<ActionActionState>>>,
|
||||
pub resource_sync:
|
||||
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
|
||||
pub sync: Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
}
|
||||
|
||||
/// Need to be able to check "busy" with write lock acquired.
|
||||
@@ -62,17 +61,33 @@ impl<States: Default + Busy + Copy + Send + 'static>
|
||||
/// Returns a guard that returns the states to default (not busy) when dropped.
|
||||
pub fn update(
|
||||
&self,
|
||||
handler: impl Fn(&mut States),
|
||||
update_fn: impl Fn(&mut States),
|
||||
) -> anyhow::Result<UpdateGuard<'_, States>> {
|
||||
self.update_custom(
|
||||
update_fn,
|
||||
|states| *states = Default::default(),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
/// Will acquire lock, optionally check busy, and if not will
|
||||
/// run the provided update function on the states.
|
||||
/// Returns a guard that calls the provided return_fn when dropped.
|
||||
pub fn update_custom(
|
||||
&self,
|
||||
update_fn: impl Fn(&mut States),
|
||||
return_fn: impl Fn(&mut States) + Send + 'static,
|
||||
busy_check: bool,
|
||||
) -> anyhow::Result<UpdateGuard<'_, States>> {
|
||||
let mut lock = self
|
||||
.0
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("action state lock poisoned | {e:?}"))?;
|
||||
if lock.busy() {
|
||||
return Err(anyhow!("resource is busy"));
|
||||
.map_err(|e| anyhow!("Action state lock poisoned | {e:?}"))?;
|
||||
if busy_check && lock.busy() {
|
||||
return Err(anyhow!("Resource is busy"));
|
||||
}
|
||||
handler(&mut *lock);
|
||||
Ok(UpdateGuard(&self.0))
|
||||
update_fn(&mut *lock);
|
||||
Ok(UpdateGuard(&self.0, Box::new(return_fn)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,6 +97,7 @@ impl<States: Default + Busy + Copy + Send + 'static>
|
||||
/// user could drop UpdateGuard.
|
||||
pub struct UpdateGuard<'a, States: Default + Send + 'static>(
|
||||
&'a Mutex<States>,
|
||||
Box<dyn Fn(&mut States) + Send>,
|
||||
);
|
||||
|
||||
impl<States: Default + Send + 'static> Drop
|
||||
@@ -95,6 +111,6 @@ impl<States: Default + Send + 'static> Drop
|
||||
return;
|
||||
}
|
||||
};
|
||||
*lock = States::default();
|
||||
self.1(&mut *lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1101,6 +1101,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::RunStackService(req) => {
|
||||
let req = ExecuteRequest::RunStackService(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::RunStackService(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at RunStackService"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::BatchDestroyStack(_) => {
|
||||
// All batch executions must be expanded in `execute_stage`
|
||||
return Err(anyhow!(
|
||||
@@ -1124,6 +1141,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::SendAlert(req) => {
|
||||
let req = ExecuteRequest::SendAlert(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::SendAlert(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at SendAlert"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::ClearRepoCache(req) => {
|
||||
let req = ExecuteRequest::ClearRepoCache(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
@@ -1136,7 +1170,41 @@ async fn execute_execution(
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at TestAlerter"),
|
||||
.context("Failed at ClearRepoCache"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::BackupCoreDatabase(req) => {
|
||||
let req = ExecuteRequest::BackupCoreDatabase(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::BackupCoreDatabase(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at BackupCoreDatabase"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::GlobalAutoUpdate(req) => {
|
||||
let req = ExecuteRequest::GlobalAutoUpdate(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::GlobalAutoUpdate(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at GlobalAutoUpdate"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
|
||||
@@ -3,7 +3,7 @@ use async_timing_util::{
|
||||
ONE_DAY_MS, Timelength, unix_timestamp_ms, wait_until_timelength,
|
||||
};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::future::join_all;
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use periphery_client::api::image::PruneImages;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
@@ -30,24 +30,26 @@ pub fn spawn_prune_loop() {
|
||||
}
|
||||
|
||||
async fn prune_images() -> anyhow::Result<()> {
|
||||
let futures = find_collect(&db_client().servers, None, None)
|
||||
.await
|
||||
.context("failed to get servers from db")?
|
||||
.into_iter()
|
||||
.filter(|server| {
|
||||
server.config.enabled && server.config.auto_prune
|
||||
})
|
||||
.map(|server| async move {
|
||||
(
|
||||
async {
|
||||
periphery_client(&server)?.request(PruneImages {}).await
|
||||
}
|
||||
.await,
|
||||
server,
|
||||
)
|
||||
});
|
||||
let mut futures = find_collect(
|
||||
&db_client().servers,
|
||||
doc! { "config.enabled": true, "config.auto_prune": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to get servers from db")?
|
||||
.into_iter()
|
||||
.map(|server| async move {
|
||||
(
|
||||
async {
|
||||
periphery_client(&server)?.request(PruneImages {}).await
|
||||
}
|
||||
.await,
|
||||
server,
|
||||
)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
for (res, server) in join_all(futures).await {
|
||||
while let Some((res, server)) = futures.next().await {
|
||||
if let Err(e) = res {
|
||||
error!(
|
||||
"failed to prune images on server {} ({}) | {e:#}",
|
||||
|
||||
@@ -13,26 +13,31 @@ use database::mungos::{
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{Action, ActionState},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
docker::container::{ContainerListItem, ContainerStateStatusEnum},
|
||||
permission::{PermissionLevel, PermissionLevelAndSpecifics},
|
||||
procedure::{Procedure, ProcedureState},
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
stats::SystemInformation,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::{User, admin_service_user},
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
use komodo_client::{
|
||||
busy::Busy,
|
||||
entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{Action, ActionState},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
docker::container::{
|
||||
ContainerListItem, ContainerStateStatusEnum,
|
||||
},
|
||||
permission::{PermissionLevel, PermissionLevelAndSpecifics},
|
||||
procedure::{Procedure, ProcedureState},
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
stats::SystemInformation,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::{User, admin_service_user},
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::stats;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -467,7 +472,7 @@ pub async fn get_action_state(id: &String) -> ActionState {
|
||||
.action
|
||||
.get(id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.running))
|
||||
.map(|s| s.get().map(|s| s.busy()))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
@@ -483,7 +488,7 @@ pub async fn get_procedure_state(id: &String) -> ProcedureState {
|
||||
.procedure
|
||||
.get(id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.running))
|
||||
.map(|s| s.get().map(|s| s.busy()))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
|
||||
@@ -492,6 +492,13 @@ pub async fn init_execution_update(
|
||||
return Ok(Default::default());
|
||||
}
|
||||
|
||||
ExecuteRequest::RunStackService(data) => (
|
||||
Operation::RunStackService,
|
||||
ResourceTarget::Stack(
|
||||
resource::get::<Stack>(&data.stack).await?.id,
|
||||
),
|
||||
),
|
||||
|
||||
// Alerter
|
||||
ExecuteRequest::TestAlerter(data) => (
|
||||
Operation::TestAlerter,
|
||||
@@ -499,11 +506,20 @@ pub async fn init_execution_update(
|
||||
resource::get::<Alerter>(&data.alerter).await?.id,
|
||||
),
|
||||
),
|
||||
ExecuteRequest::SendAlert(_) => {
|
||||
(Operation::SendAlert, ResourceTarget::system())
|
||||
}
|
||||
|
||||
// Maintenance
|
||||
ExecuteRequest::ClearRepoCache(_data) => {
|
||||
(Operation::ClearRepoCache, ResourceTarget::system())
|
||||
}
|
||||
ExecuteRequest::BackupCoreDatabase(_data) => {
|
||||
(Operation::BackupCoreDatabase, ResourceTarget::system())
|
||||
}
|
||||
ExecuteRequest::GlobalAutoUpdate(_data) => {
|
||||
(Operation::GlobalAutoUpdate, ResourceTarget::system())
|
||||
}
|
||||
};
|
||||
|
||||
let mut update = make_update(target, operation, user);
|
||||
|
||||
@@ -45,10 +45,14 @@ async fn app() -> anyhow::Result<()> {
|
||||
|
||||
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
if core_config().pretty_startup_config {
|
||||
info!("{:#?}", config.sanitized());
|
||||
} else {
|
||||
info!("{:?}", config.sanitized());
|
||||
match (
|
||||
config.pretty_startup_config,
|
||||
config.unsafe_unsanitized_startup_config,
|
||||
) {
|
||||
(true, true) => info!("{:#?}", config),
|
||||
(true, false) => info!("{:#?}", config.sanitized()),
|
||||
(false, true) => info!("{:?}", config),
|
||||
(false, false) => info!("{:?}", config.sanitized()),
|
||||
}
|
||||
|
||||
// Init jwt client to crash on failure
|
||||
@@ -65,7 +69,7 @@ async fn app() -> anyhow::Result<()> {
|
||||
// Spawn background tasks
|
||||
monitor::spawn_monitor_loop();
|
||||
resource::spawn_resource_refresh_loop();
|
||||
resource::spawn_all_resources_refresh_loop();
|
||||
resource::spawn_all_resources_cache_refresh_loop();
|
||||
resource::spawn_build_state_refresh_loop();
|
||||
resource::spawn_repo_state_refresh_loop();
|
||||
resource::spawn_procedure_state_refresh_loop();
|
||||
|
||||
@@ -7,8 +7,10 @@ use komodo_client::entities::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts, monitor::deployment_status_cache, resource,
|
||||
state::db_client,
|
||||
alert::send_alerts,
|
||||
monitor::deployment_status_cache,
|
||||
resource,
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -17,6 +19,7 @@ pub async fn alert_deployments(
|
||||
server_names: &HashMap<String, String>,
|
||||
) {
|
||||
let mut alerts = Vec::<Alert>::new();
|
||||
let action_states = action_states();
|
||||
for status in deployment_status_cache().get_list().await {
|
||||
// Don't alert if prev None
|
||||
let Some(prev) = status.prev else {
|
||||
@@ -31,6 +34,20 @@ pub async fn alert_deployments(
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't alert if deploying
|
||||
if action_states
|
||||
.deployment
|
||||
.get(&status.curr.id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.deploying))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if status.curr.state != prev {
|
||||
// send alert
|
||||
let Ok(deployment) =
|
||||
|
||||
@@ -178,6 +178,77 @@ pub async fn alert_servers(
|
||||
),
|
||||
}
|
||||
|
||||
// ===================
|
||||
// SERVER VERSION MISMATCH
|
||||
// ===================
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let has_version_mismatch = server_status.state == ServerState::Ok
|
||||
&& !server_status.version.is_empty()
|
||||
&& server_status.version != "Unknown"
|
||||
&& server_status.version != core_version;
|
||||
|
||||
let version_alert = server_alerts.as_ref().and_then(|alerts| {
|
||||
alerts.get(&AlertDataVariant::ServerVersionMismatch)
|
||||
});
|
||||
|
||||
match (has_version_mismatch, version_alert) {
|
||||
(true, None) => {
|
||||
// Only open version mismatch alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerVersionMismatch,
|
||||
)
|
||||
{
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: false,
|
||||
resolved_ts: None,
|
||||
level: SeverityLevel::Warning,
|
||||
target: ResourceTarget::Server(server_status.id.clone()),
|
||||
data: AlertData::ServerVersionMismatch {
|
||||
id: server_status.id.clone(),
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
server_version: server_status.version.clone(),
|
||||
core_version: core_version.to_string(),
|
||||
},
|
||||
};
|
||||
// Use send_unreachable_alerts as a proxy for general server alerts
|
||||
alerts_to_open
|
||||
.push((alert, server.config.send_version_mismatch_alerts))
|
||||
}
|
||||
}
|
||||
(true, Some(alert)) => {
|
||||
// Update existing alert with current version info
|
||||
let mut alert = alert.clone();
|
||||
alert.data = AlertData::ServerVersionMismatch {
|
||||
id: server_status.id.clone(),
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
server_version: server_status.version.clone(),
|
||||
core_version: core_version.to_string(),
|
||||
};
|
||||
// Don't send notification for updates
|
||||
alerts_to_update.push((alert, false));
|
||||
}
|
||||
(false, Some(alert)) => {
|
||||
// Version is now correct, close the alert
|
||||
alert_ids_to_close.push((
|
||||
alert.clone(),
|
||||
server.config.send_version_mismatch_alerts,
|
||||
));
|
||||
}
|
||||
(false, None) => {
|
||||
// Reset buffer state when no mismatch and no alert
|
||||
buffer.reset(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerVersionMismatch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let Some(health) = &server_status.health else {
|
||||
continue;
|
||||
};
|
||||
|
||||
@@ -9,7 +9,7 @@ use komodo_client::entities::{
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
resource,
|
||||
state::{db_client, stack_status_cache},
|
||||
state::{action_states, db_client, stack_status_cache},
|
||||
};
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -17,6 +17,7 @@ pub async fn alert_stacks(
|
||||
ts: i64,
|
||||
server_names: &HashMap<String, String>,
|
||||
) {
|
||||
let action_states = action_states();
|
||||
let mut alerts = Vec::<Alert>::new();
|
||||
for status in stack_status_cache().get_list().await {
|
||||
// Don't alert if prev None
|
||||
@@ -32,6 +33,20 @@ pub async fn alert_stacks(
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't alert if deploying
|
||||
if action_states
|
||||
.stack
|
||||
.get(&status.curr.id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.deploying))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
.unwrap_or_default()
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if status.curr.state != prev {
|
||||
// send alert
|
||||
let Ok(stack) =
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use async_timing_util::wait_until_timelength;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::future::join_all;
|
||||
@@ -15,10 +17,11 @@ use komodo_client::entities::{
|
||||
};
|
||||
use periphery_client::api::{self, git::GetLatestCommit};
|
||||
use serror::Serror;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
helpers::{cache::Cache, periphery_client},
|
||||
monitor::{alert::check_alerts, record::record_server_stats},
|
||||
state::{db_client, deployment_status_cache, repo_status_cache},
|
||||
};
|
||||
@@ -110,14 +113,47 @@ async fn refresh_server_cache(ts: i64) {
|
||||
}
|
||||
};
|
||||
let futures = servers.into_iter().map(|server| async move {
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, false).await;
|
||||
});
|
||||
join_all(futures).await;
|
||||
tokio::join!(check_alerts(ts), record_server_stats(ts));
|
||||
}
|
||||
|
||||
/// Makes sure cache for server doesn't update too frequently / simultaneously.
|
||||
/// If forced, will still block against simultaneous update.
|
||||
fn update_cache_for_server_controller()
|
||||
-> &'static Cache<String, Arc<Mutex<i64>>> {
|
||||
static CACHE: OnceLock<Cache<String, Arc<Mutex<i64>>>> =
|
||||
OnceLock::new();
|
||||
CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
/// The background loop will call this with force: false,
|
||||
/// which exits early if the lock is busy or it was completed too recently.
|
||||
/// If force is true, it will wait on simultaneous calls, and will
|
||||
/// ignore the restriction on being completed too recently.
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn update_cache_for_server(server: &Server) {
|
||||
pub async fn update_cache_for_server(server: &Server, force: bool) {
|
||||
// Concurrency controller to ensure it isn't done too often
|
||||
// when it happens in other contexts.
|
||||
let controller = update_cache_for_server_controller()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
let mut lock = match controller.try_lock() {
|
||||
Ok(lock) => lock,
|
||||
Err(_) if force => controller.lock().await,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let now = komodo_timestamp();
|
||||
|
||||
// early return if called again sooner than 1s.
|
||||
if !force && *lock > now - 1_000 {
|
||||
return;
|
||||
}
|
||||
|
||||
*lock = now;
|
||||
|
||||
let (deployments, builds, repos, stacks) = tokio::join!(
|
||||
find_collect(
|
||||
&db_client().deployments,
|
||||
|
||||
@@ -21,6 +21,7 @@ pub async fn record_server_stats(ts: i64) {
|
||||
ts,
|
||||
sid: status.id.clone(),
|
||||
cpu_perc: stats.cpu_perc,
|
||||
load_average: stats.load_average.clone(),
|
||||
mem_total_gb: stats.mem_total_gb,
|
||||
mem_used_gb: stats.mem_used_gb,
|
||||
disk_total_gb,
|
||||
|
||||
@@ -81,7 +81,7 @@ pub async fn update_deployment_cache(
|
||||
// If image already has tag, leave it,
|
||||
// otherwise default the tag to latest
|
||||
if image.contains(':') {
|
||||
image
|
||||
image.to_string()
|
||||
} else {
|
||||
format!("{image}:latest")
|
||||
}
|
||||
@@ -92,6 +92,9 @@ pub async fn update_deployment_cache(
|
||||
..
|
||||
}) = &container
|
||||
{
|
||||
// Docker will automatically strip `docker.io` from incoming image names re #468.
|
||||
// Need to strip it in order to match by image name and find available updates.
|
||||
let image = image.strip_prefix("docker.io/").unwrap_or(&image);
|
||||
images
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
@@ -250,20 +253,21 @@ pub async fn update_stack_cache(
|
||||
}
|
||||
}.is_match(&container.name)
|
||||
}).cloned();
|
||||
// If image already has tag, leave it,
|
||||
// otherwise default the tag to latest
|
||||
let image = image.clone();
|
||||
let image = if image.contains(':') {
|
||||
image
|
||||
image.to_string()
|
||||
} else {
|
||||
image + ":latest"
|
||||
format!("{image}:latest")
|
||||
};
|
||||
let update_available = if let Some(ContainerListItem { image_id: Some(curr_image_id), .. }) = &container {
|
||||
// Docker will automatically strip `docker.io` from incoming image names re #468.
|
||||
// Need to strip it in order to match by image tag and find available update.
|
||||
let image =
|
||||
image.strip_prefix("docker.io/").unwrap_or(&image);
|
||||
images
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
.map(|i| &i.id != curr_image_id)
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
.map(|i| &i.id != curr_image_id)
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
@@ -36,10 +36,10 @@ fn is_container_environment() -> bool {
|
||||
}
|
||||
|
||||
// Check cgroup for container runtime indicators
|
||||
if let Ok(content) = std::fs::read_to_string(CGROUP_FILE) {
|
||||
if content.contains("docker") || content.contains("containerd") {
|
||||
return true;
|
||||
}
|
||||
if let Ok(content) = std::fs::read_to_string(CGROUP_FILE)
|
||||
&& (content.contains("docker") || content.contains("containerd"))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
@@ -142,7 +142,7 @@ async fn find_gateway(
|
||||
}
|
||||
|
||||
let ip_cidr = ip_cidr.ok_or_else(|| anyhow!(
|
||||
"Could not find IP address for interface '{}'. Ensure interface has a valid IPv4 address",
|
||||
"Could not find IP address for interface '{}'. Ensure interface has a valid IPv4 address",
|
||||
interface_name
|
||||
))?;
|
||||
|
||||
@@ -167,14 +167,13 @@ async fn find_gateway(
|
||||
if line.contains("via") {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if let Some(via_idx) = parts.iter().position(|&x| x == "via")
|
||||
&& let Some(&gateway) = parts.get(via_idx + 1)
|
||||
{
|
||||
if let Some(&gateway) = parts.get(via_idx + 1) {
|
||||
trace!(
|
||||
"Found gateway {} for {} from routing table",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
trace!(
|
||||
"Found gateway {} for {} from routing table",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -206,14 +205,14 @@ async fn find_gateway(
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = route_test {
|
||||
if output.status.success() {
|
||||
trace!(
|
||||
"Gateway {} is reachable via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
if let Ok(output) = route_test
|
||||
&& output.status.success()
|
||||
{
|
||||
trace!(
|
||||
"Gateway {} is reachable via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
|
||||
// Fallback: assume .1 is gateway (Docker standard)
|
||||
@@ -266,10 +265,10 @@ async fn set_default_gateway(
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = remove_default {
|
||||
if output.status.success() {
|
||||
trace!("Removed existing default routes");
|
||||
}
|
||||
if let Ok(output) = remove_default
|
||||
&& output.status.success()
|
||||
{
|
||||
trace!("Removed existing default routes");
|
||||
}
|
||||
|
||||
// Add new default route
|
||||
|
||||
@@ -116,9 +116,11 @@ impl super::KomodoResource for Build {
|
||||
git_provider,
|
||||
repo,
|
||||
branch,
|
||||
image_registry_domain: optional_string(
|
||||
build.config.image_registry.domain,
|
||||
),
|
||||
image_registry_domain: build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.and_then(|r| optional_string(&r.domain)),
|
||||
built_hash: build.info.built_hash,
|
||||
latest_hash: build.info.latest_hash,
|
||||
state,
|
||||
|
||||
@@ -188,7 +188,7 @@ impl super::KomodoResource for Deployment {
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -34,8 +34,10 @@ use komodo_client::{
|
||||
parsers::parse_string_list,
|
||||
};
|
||||
use partial_derive2::{Diff, MaybeNone, PartialDiff};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
api::{read::ReadArgs, write::WriteArgs},
|
||||
@@ -70,7 +72,8 @@ pub use procedure::{
|
||||
refresh_procedure_state_cache, spawn_procedure_state_refresh_loop,
|
||||
};
|
||||
pub use refresh::{
|
||||
refresh_all_resources_cache, spawn_all_resources_refresh_loop,
|
||||
refresh_all_resources_cache,
|
||||
spawn_all_resources_cache_refresh_loop,
|
||||
spawn_resource_refresh_loop,
|
||||
};
|
||||
pub use repo::{
|
||||
@@ -228,6 +231,12 @@ pub trait KomodoResource {
|
||||
pub async fn get<T: KomodoResource>(
|
||||
id_or_name: &str,
|
||||
) -> anyhow::Result<Resource<T::Config, T::Info>> {
|
||||
if id_or_name.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Cannot find {} with empty name / id",
|
||||
T::resource_type()
|
||||
));
|
||||
}
|
||||
T::coll()
|
||||
.find_one(id_or_name_filter(id_or_name))
|
||||
.await
|
||||
@@ -451,22 +460,31 @@ pub async fn create<T: KomodoResource>(
|
||||
name: &str,
|
||||
mut config: T::PartialConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<Resource<T::Config, T::Info>> {
|
||||
) -> serror::Result<Resource<T::Config, T::Info>> {
|
||||
if !T::user_can_create(user) {
|
||||
return Err(anyhow!(
|
||||
"User does not have permissions to create {}.",
|
||||
T::resource_type()
|
||||
));
|
||||
return Err(
|
||||
anyhow!(
|
||||
"User does not have permissions to create {}.",
|
||||
T::resource_type()
|
||||
)
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if name.is_empty() {
|
||||
return Err(anyhow!("Must provide non-empty name for resource."));
|
||||
return Err(
|
||||
anyhow!("Must provide non-empty name for resource")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let name = T::validated_name(name);
|
||||
|
||||
if ObjectId::from_str(&name).is_ok() {
|
||||
return Err(anyhow!("valid ObjectIds cannot be used as names."));
|
||||
return Err(
|
||||
anyhow!("Valid ObjectIds cannot be used as names")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure an existing resource with same name doesn't already exist
|
||||
@@ -482,7 +500,10 @@ pub async fn create<T: KomodoResource>(
|
||||
.into_iter()
|
||||
.any(|r| r.name == name)
|
||||
{
|
||||
return Err(anyhow!("Must provide unique name for resource."));
|
||||
return Err(
|
||||
anyhow!("Resource with name '{}' already exists", name)
|
||||
.status_code(StatusCode::CONFLICT),
|
||||
);
|
||||
}
|
||||
|
||||
let start_ts = komodo_timestamp();
|
||||
@@ -706,6 +727,7 @@ pub async fn update_meta<T: KomodoResource>(
|
||||
Ok(tag) => Ok(tag.id),
|
||||
Err(_) => CreateTag {
|
||||
name: tag.to_string(),
|
||||
color: None,
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
@@ -845,9 +867,15 @@ pub async fn delete<T: KomodoResource>(
|
||||
);
|
||||
update.push_simple_log("Deleted Toml", toml);
|
||||
|
||||
if let Err(e) = T::post_delete(&resource, &mut update).await {
|
||||
update.push_error_log("post delete", format_serror(&e.into()));
|
||||
}
|
||||
tokio::join!(
|
||||
async {
|
||||
if let Err(e) = T::post_delete(&resource, &mut update).await {
|
||||
update
|
||||
.push_error_log("post delete", format_serror(&e.into()));
|
||||
}
|
||||
},
|
||||
delete_from_alerters::<T>(&resource.id)
|
||||
);
|
||||
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
@@ -857,6 +885,26 @@ pub async fn delete<T: KomodoResource>(
|
||||
Ok(resource)
|
||||
}
|
||||
|
||||
async fn delete_from_alerters<T: KomodoResource>(id: &str) {
|
||||
let target_bson = doc! {
|
||||
"type": T::resource_type().as_ref(),
|
||||
"id": id,
|
||||
};
|
||||
if let Err(e) = db_client()
|
||||
.alerters
|
||||
.update_many(Document::new(), doc! {
|
||||
"$pull": {
|
||||
"config.resources": &target_bson,
|
||||
"config.except_resources": target_bson,
|
||||
}
|
||||
})
|
||||
.await
|
||||
.context("Failed to clear deleted resource from alerter whitelist / blacklist")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
// =======
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
|
||||
@@ -5,6 +5,7 @@ use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOneOptions},
|
||||
};
|
||||
use futures::{TryStreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::Execution,
|
||||
entities::{
|
||||
@@ -709,6 +710,15 @@ async fn validate_config(
|
||||
.await?;
|
||||
params.stack = stack.id;
|
||||
}
|
||||
Execution::RunStackService(params) => {
|
||||
let stack = super::get_check_permissions::<Stack>(
|
||||
¶ms.stack,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
params.stack = stack.id;
|
||||
}
|
||||
Execution::BatchDestroyStack(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
@@ -725,6 +735,24 @@ async fn validate_config(
|
||||
.await?;
|
||||
params.alerter = alerter.id;
|
||||
}
|
||||
Execution::SendAlert(params) => {
|
||||
params.alerters = params
|
||||
.alerters
|
||||
.iter()
|
||||
.map(async |alerter| {
|
||||
let id = super::get_check_permissions::<Alerter>(
|
||||
alerter,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?
|
||||
.id;
|
||||
anyhow::Ok(id)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
}
|
||||
Execution::ClearRepoCache(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
@@ -732,6 +760,20 @@ async fn validate_config(
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::BackupCoreDatabase(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Non admin user cannot trigger core database backup"
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::GlobalAutoUpdate(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Non admin user cannot trigger global auto update"
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::Sleep(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,13 +12,13 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::{execute::pull_deployment_inner, write::WriteArgs},
|
||||
api::write::WriteArgs,
|
||||
config::core_config,
|
||||
helpers::all_resources::AllResourcesById,
|
||||
state::{all_resources_cache, db_client},
|
||||
};
|
||||
|
||||
pub fn spawn_all_resources_refresh_loop() {
|
||||
pub fn spawn_all_resources_cache_refresh_loop() {
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(15));
|
||||
loop {
|
||||
@@ -57,7 +57,6 @@ pub fn spawn_resource_refresh_loop() {
|
||||
|
||||
async fn refresh_all() {
|
||||
refresh_stacks().await;
|
||||
refresh_deployments().await;
|
||||
refresh_builds().await;
|
||||
refresh_repos().await;
|
||||
refresh_syncs().await;
|
||||
@@ -87,42 +86,6 @@ async fn refresh_stacks() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn refresh_deployments() {
|
||||
let servers = find_collect(&db_client().servers, None, None)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
warn!(
|
||||
"Failed to get Servers from database in refresh task | {e:#}"
|
||||
)
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let Ok(deployments) = find_collect(&db_client().deployments, None, None)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
warn!(
|
||||
"Failed to get Deployments from database in refresh task | {e:#}"
|
||||
)
|
||||
})
|
||||
else {
|
||||
return;
|
||||
};
|
||||
for deployment in deployments {
|
||||
if (deployment.config.poll_for_updates
|
||||
|| deployment.config.auto_update)
|
||||
&& let Some(server) =
|
||||
servers.iter().find(|s| s.id == deployment.config.server_id)
|
||||
{
|
||||
let name = deployment.name.clone();
|
||||
if let Err(e) = pull_deployment_inner(deployment, server).await
|
||||
{
|
||||
warn!(
|
||||
"Failed to pull latest image for Deployment {name} | {e:#}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn refresh_builds() {
|
||||
let Ok(builds) = find_collect(&db_client().builds, None, None)
|
||||
.await
|
||||
|
||||
@@ -300,6 +300,7 @@ async fn get_repo_state_from_db(id: &str) -> RepoState {
|
||||
"$or": [
|
||||
{ "operation": "CloneRepo" },
|
||||
{ "operation": "PullRepo" },
|
||||
{ "operation": "BuildRepo" },
|
||||
],
|
||||
})
|
||||
.with_options(
|
||||
|
||||
@@ -75,12 +75,16 @@ impl super::KomodoResource for Server {
|
||||
.unwrap_or(String::from("Unknown")),
|
||||
region: server.config.region,
|
||||
address: server.config.address,
|
||||
external_address: server.config.external_address,
|
||||
send_unreachable_alerts: server
|
||||
.config
|
||||
.send_unreachable_alerts,
|
||||
send_cpu_alerts: server.config.send_cpu_alerts,
|
||||
send_mem_alerts: server.config.send_mem_alerts,
|
||||
send_disk_alerts: server.config.send_disk_alerts,
|
||||
send_version_mismatch_alerts: server
|
||||
.config
|
||||
.send_version_mismatch_alerts,
|
||||
terminals_disabled,
|
||||
container_exec_disabled,
|
||||
},
|
||||
@@ -119,7 +123,7 @@ impl super::KomodoResource for Server {
|
||||
created: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
update_cache_for_server(created).await;
|
||||
update_cache_for_server(created, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -141,7 +145,7 @@ impl super::KomodoResource for Server {
|
||||
updated: &Self,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
update_cache_for_server(updated).await;
|
||||
update_cache_for_server(updated, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -252,7 +252,7 @@ impl super::KomodoResource for Stack {
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ impl super::KomodoResource for ResourceSync {
|
||||
|
||||
async fn busy(id: &String) -> anyhow::Result<bool> {
|
||||
action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -242,7 +242,7 @@ async fn get_resource_sync_state(
|
||||
data: &ResourceSyncInfo,
|
||||
) -> ResourceSyncState {
|
||||
if let Some(state) = action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(id)
|
||||
.await
|
||||
.and_then(|s| {
|
||||
|
||||
@@ -72,7 +72,7 @@ pub async fn execute_compose<T: ExecuteCompose>(
|
||||
.push(T::execute(periphery, stack, services, extras).await?);
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -3,14 +3,16 @@ use std::{fs, path::PathBuf};
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use komodo_client::entities::{
|
||||
FileContents, RepoExecutionArgs, repo::Repo, stack::Stack,
|
||||
FileContents, RepoExecutionArgs,
|
||||
repo::Repo,
|
||||
stack::{Stack, StackRemoteFileContents},
|
||||
update::Log,
|
||||
};
|
||||
|
||||
use crate::{config::core_config, helpers::git_token};
|
||||
|
||||
pub struct RemoteComposeContents {
|
||||
pub successful: Vec<FileContents>,
|
||||
pub successful: Vec<StackRemoteFileContents>,
|
||||
pub errored: Vec<FileContents>,
|
||||
pub hash: Option<String>,
|
||||
pub message: Option<String>,
|
||||
@@ -38,23 +40,25 @@ pub async fn get_repo_compose_contents(
|
||||
let mut successful = Vec::new();
|
||||
let mut errored = Vec::new();
|
||||
|
||||
for path in stack.file_paths() {
|
||||
let file_path = run_directory.join(path);
|
||||
for file in stack.all_file_dependencies() {
|
||||
let file_path = run_directory.join(&file.path);
|
||||
if !file_path.exists()
|
||||
&& let Some(missing_files) = &mut missing_files
|
||||
{
|
||||
missing_files.push(path.to_string());
|
||||
missing_files.push(file.path.clone());
|
||||
}
|
||||
// If file does not exist, will show up in err case so the log is handled
|
||||
match fs::read_to_string(&file_path).with_context(|| {
|
||||
format!("Failed to read file contents from {file_path:?}")
|
||||
}) {
|
||||
Ok(contents) => successful.push(FileContents {
|
||||
path: path.to_string(),
|
||||
Ok(contents) => successful.push(StackRemoteFileContents {
|
||||
path: file.path,
|
||||
contents,
|
||||
services: file.services,
|
||||
requires: file.requires,
|
||||
}),
|
||||
Err(e) => errored.push(FileContents {
|
||||
path: path.to_string(),
|
||||
path: file.path,
|
||||
contents: format_serror(&e.into()),
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use colored::Colorize;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{Document, doc, oid::ObjectId, to_document},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::execute::RunAction,
|
||||
api::write::{CreateBuilder, CreateServer},
|
||||
api::{
|
||||
auth::SignUpLocalUser,
|
||||
execute::{
|
||||
BackupCoreDatabase, Execution, GlobalAutoUpdate, RunAction,
|
||||
},
|
||||
write::{
|
||||
CreateBuilder, CreateProcedure, CreateServer, CreateTag,
|
||||
UpdateResourceMeta,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
|
||||
komodo_timestamp,
|
||||
procedure::{EnabledExecution, ProcedureConfig, ProcedureStage},
|
||||
server::{PartialServerConfig, Server},
|
||||
sync::ResourceSync,
|
||||
tag::TagColor,
|
||||
update::Log,
|
||||
user::{action_user, system_user},
|
||||
},
|
||||
@@ -22,44 +32,32 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::execute::{ExecuteArgs, ExecuteRequest},
|
||||
api::write::WriteArgs,
|
||||
api::{
|
||||
auth::AuthArgs,
|
||||
execute::{ExecuteArgs, ExecuteRequest},
|
||||
write::WriteArgs,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
helpers::update::init_execution_update,
|
||||
network, resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
/// This function should be run on startup,
|
||||
/// after the db client has been initialized
|
||||
pub async fn on_startup() {
|
||||
// Configure manual network interface if specified
|
||||
network::configure_internet_gateway().await;
|
||||
|
||||
tokio::join!(
|
||||
in_progress_update_cleanup(),
|
||||
open_alert_cleanup(),
|
||||
ensure_first_server_and_builder(),
|
||||
clean_up_server_templates(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Runs the Actions with `run_at_startup: true`
|
||||
pub async fn run_startup_actions() {
|
||||
let startup_actions =
|
||||
match database::mungos::find::find_collect::<Action>(
|
||||
&db_client().actions,
|
||||
doc! { "config.run_at_startup": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(actions) => actions,
|
||||
Err(e) => {
|
||||
error!("Failed to fetch actions for startup | {e:#?}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let startup_actions = match find_collect(
|
||||
&db_client().actions,
|
||||
doc! { "config.run_at_startup": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(actions) => actions,
|
||||
Err(e) => {
|
||||
error!("Failed to fetch actions for startup | {e:#?}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
for action in startup_actions {
|
||||
let name = action.name;
|
||||
@@ -99,6 +97,21 @@ pub async fn run_startup_actions() {
|
||||
}
|
||||
}
|
||||
|
||||
/// This function should be run on startup,
|
||||
/// after the db client has been initialized
|
||||
pub async fn on_startup() {
|
||||
// Configure manual network interface if specified
|
||||
network::configure_internet_gateway().await;
|
||||
|
||||
tokio::join!(
|
||||
in_progress_update_cleanup(),
|
||||
open_alert_cleanup(),
|
||||
clean_up_server_templates(),
|
||||
ensure_first_server_and_builder(),
|
||||
ensure_init_user_and_resources(),
|
||||
);
|
||||
}
|
||||
|
||||
async fn in_progress_update_cleanup() {
|
||||
let log = Log::error(
|
||||
"Komodo shutdown",
|
||||
@@ -184,10 +197,10 @@ async fn open_alert_cleanup() {
|
||||
|
||||
/// Ensures a default server / builder exists with the defined address
|
||||
async fn ensure_first_server_and_builder() {
|
||||
let first_server = &core_config().first_server;
|
||||
if first_server.is_empty() {
|
||||
let config = core_config();
|
||||
let Some(address) = config.first_server.clone() else {
|
||||
return;
|
||||
}
|
||||
};
|
||||
let db = db_client();
|
||||
let Ok(server) = db
|
||||
.servers
|
||||
@@ -201,9 +214,9 @@ async fn ensure_first_server_and_builder() {
|
||||
server
|
||||
} else {
|
||||
match (CreateServer {
|
||||
name: format!("server-{}", random_string(5)),
|
||||
name: config.first_server_name.clone(),
|
||||
config: PartialServerConfig {
|
||||
address: Some(first_server.to_string()),
|
||||
address: Some(address),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
@@ -229,7 +242,7 @@ async fn ensure_first_server_and_builder() {
|
||||
return;
|
||||
};
|
||||
if let Err(e) = (CreateBuilder {
|
||||
name: String::from("local"),
|
||||
name: config.first_server_name.clone(),
|
||||
config: PartialBuilderConfig::Server(
|
||||
PartialServerBuilderConfig {
|
||||
server_id: Some(server.id),
|
||||
@@ -248,6 +261,160 @@ async fn ensure_first_server_and_builder() {
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_init_user_and_resources() {
|
||||
let db = db_client();
|
||||
|
||||
// Assumes if there are any existing users, procedures, or tags,
|
||||
// the default procedures do not need to be set up.
|
||||
let Ok((None, None, None)) = tokio::try_join!(
|
||||
db.users.find_one(Document::new()),
|
||||
db.procedures.find_one(Document::new()),
|
||||
db.tags.find_one(Document::new()),
|
||||
).inspect_err(|e| error!("Failed to initialize default procedures | Failed to query db | {e:?}")) else {
|
||||
return
|
||||
};
|
||||
|
||||
let config = core_config();
|
||||
|
||||
// Init admin user if set in config.
|
||||
if let Some(username) = &config.init_admin_username {
|
||||
info!("Creating init admin user...");
|
||||
SignUpLocalUser {
|
||||
username: username.clone(),
|
||||
password: config.init_admin_password.clone(),
|
||||
}
|
||||
.resolve(&AuthArgs::default())
|
||||
.await
|
||||
.expect("Failed to initialize default admin user.");
|
||||
db.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.expect("Failed to query database for initial user")
|
||||
.expect("Failed to find initial user after creation");
|
||||
};
|
||||
|
||||
if config.disable_init_resources {
|
||||
info!("System resources init {}", "DISABLED".red());
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Creating init system resources...");
|
||||
|
||||
let write_args = WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
};
|
||||
|
||||
// Create default 'system' tag
|
||||
let default_tags = match (CreateTag {
|
||||
name: String::from("system"),
|
||||
color: Some(TagColor::Red),
|
||||
})
|
||||
.resolve(&write_args)
|
||||
.await
|
||||
{
|
||||
Ok(tag) => vec![tag.id],
|
||||
Err(e) => {
|
||||
warn!("Failed to create default tag | {:#}", e.error);
|
||||
Vec::new()
|
||||
}
|
||||
};
|
||||
|
||||
// Backup Core Database
|
||||
async {
|
||||
let Ok(config) = ProcedureConfig::builder()
|
||||
.stages(vec![ProcedureStage {
|
||||
name: String::from("Stage 1"),
|
||||
enabled: true,
|
||||
executions: vec![
|
||||
EnabledExecution {
|
||||
execution: Execution::BackupCoreDatabase(BackupCoreDatabase {}),
|
||||
enabled: true
|
||||
}
|
||||
]
|
||||
}])
|
||||
.schedule(String::from("Every day at 01:00"))
|
||||
.build()
|
||||
.inspect_err(|e| error!("Failed to initialize backup core database procedure | Failed to build Procedure | {e:?}")) else {
|
||||
return;
|
||||
};
|
||||
let procedure = match (CreateProcedure {
|
||||
name: String::from("Backup Core Database"),
|
||||
config: config.into()
|
||||
}).resolve(&write_args).await {
|
||||
Ok(procedure) => procedure,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to initialize default database backup Procedure | Failed to create Procedure | {:#}",
|
||||
e.error
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Err(e) = (UpdateResourceMeta {
|
||||
target: ResourceTarget::Procedure(procedure.id),
|
||||
tags: Some(default_tags.clone()),
|
||||
description: Some(String::from(
|
||||
"Triggers the Core database backup at the scheduled time.",
|
||||
)),
|
||||
template: None,
|
||||
}).resolve(&write_args).await {
|
||||
warn!("Failed to update default database backup Procedure tags / description | {:#}", e.error);
|
||||
}
|
||||
}.await;
|
||||
|
||||
// GlobalAutoUpdate
|
||||
async {
|
||||
let Ok(config) = ProcedureConfig::builder()
|
||||
.stages(vec![ProcedureStage {
|
||||
name: String::from("Stage 1"),
|
||||
enabled: true,
|
||||
executions: vec![
|
||||
EnabledExecution {
|
||||
execution: Execution::GlobalAutoUpdate(GlobalAutoUpdate {}),
|
||||
enabled: true
|
||||
}
|
||||
]
|
||||
}])
|
||||
.schedule(String::from("Every day at 03:00"))
|
||||
.build()
|
||||
.inspect_err(|e| error!("Failed to initialize global auto update procedure | Failed to build Procedure | {e:?}")) else {
|
||||
return;
|
||||
};
|
||||
let procedure = match (CreateProcedure {
|
||||
name: String::from("Global Auto Update"),
|
||||
config: config.into(),
|
||||
})
|
||||
.resolve(&write_args)
|
||||
.await
|
||||
{
|
||||
Ok(procedure) => procedure,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to initialize global auto update Procedure | Failed to create Procedure | {:#}",
|
||||
e.error
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
if let Err(e) = (UpdateResourceMeta {
|
||||
target: ResourceTarget::Procedure(procedure.id),
|
||||
tags: Some(default_tags.clone()),
|
||||
description: Some(String::from(
|
||||
"Pulls and auto updates Stacks and Deployments using 'poll_for_updates' or 'auto_update'.",
|
||||
)),
|
||||
template: None,
|
||||
})
|
||||
.resolve(&write_args)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to update global auto update Procedure tags / description | {:#}",
|
||||
e.error
|
||||
);
|
||||
}
|
||||
}.await;
|
||||
}
|
||||
|
||||
/// v1.17.5 removes the ServerTemplate resource.
|
||||
/// References to this resource type need to be cleaned up
|
||||
/// to avoid type errors reading from the database.
|
||||
|
||||
@@ -40,6 +40,7 @@ pub fn db_client() -> &'static Client {
|
||||
.expect("db_client accessed before initialized")
|
||||
}
|
||||
|
||||
/// Must be called in app startup sequence.
|
||||
pub async fn init_db_client() {
|
||||
let client = Client::new(&core_config().database)
|
||||
.await
|
||||
@@ -134,11 +135,13 @@ pub fn action_states() -> &'static ActionStates {
|
||||
ACTION_STATES.get_or_init(ActionStates::default)
|
||||
}
|
||||
|
||||
/// Cache of ids to status
|
||||
pub type DeploymentStatusCache = Cache<
|
||||
String,
|
||||
Arc<History<CachedDeploymentStatus, DeploymentState>>,
|
||||
>;
|
||||
|
||||
/// Cache of ids to status
|
||||
pub fn deployment_status_cache() -> &'static DeploymentStatusCache {
|
||||
static DEPLOYMENT_STATUS_CACHE: OnceLock<DeploymentStatusCache> =
|
||||
OnceLock::new();
|
||||
|
||||
@@ -9,12 +9,15 @@ use komodo_client::{
|
||||
read::ListBuildVersions,
|
||||
},
|
||||
entities::{
|
||||
FileContents, ResourceTarget,
|
||||
ResourceTarget,
|
||||
deployment::{
|
||||
Deployment, DeploymentConfig, DeploymentImage, DeploymentState,
|
||||
PartialDeploymentConfig,
|
||||
},
|
||||
stack::{PartialStackConfig, Stack, StackConfig, StackState},
|
||||
stack::{
|
||||
PartialStackConfig, Stack, StackConfig,
|
||||
StackRemoteFileContents, StackState,
|
||||
},
|
||||
sync::SyncDeployUpdate,
|
||||
toml::ResourceToml,
|
||||
update::Log,
|
||||
@@ -554,7 +557,13 @@ fn build_cache_for_stack<'a>(
|
||||
&original.info.remote_contents,
|
||||
) {
|
||||
(Some(deployed_contents), Some(remote_contents)) => {
|
||||
for FileContents { path, contents } in remote_contents {
|
||||
for StackRemoteFileContents {
|
||||
path,
|
||||
contents,
|
||||
services: _services,
|
||||
requires: _requires,
|
||||
} in remote_contents
|
||||
{
|
||||
if let Some(deployed) =
|
||||
deployed_contents.iter().find(|c| &c.path == path)
|
||||
{
|
||||
|
||||
@@ -147,6 +147,7 @@ pub trait ExecuteResourceSync: ResourceSyncTrait {
|
||||
sync_user(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
{
|
||||
Ok(resource) => resource.id,
|
||||
Err(e) => {
|
||||
|
||||
@@ -661,6 +661,13 @@ impl ResourceSyncTrait for Procedure {
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunStackService(config) => {
|
||||
config.stack = resources
|
||||
.stacks
|
||||
.get(&config.stack)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::BatchDestroyStack(_config) => {}
|
||||
Execution::TestAlerter(config) => {
|
||||
config.alerter = resources
|
||||
@@ -669,7 +676,22 @@ impl ResourceSyncTrait for Procedure {
|
||||
.map(|a| a.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::SendAlert(config) => {
|
||||
config.alerters = config
|
||||
.alerters
|
||||
.iter()
|
||||
.map(|alerter| {
|
||||
resources
|
||||
.alerters
|
||||
.get(alerter)
|
||||
.map(|a| a.name.clone())
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
Execution::ClearRepoCache(_) => {}
|
||||
Execution::BackupCoreDatabase(_) => {}
|
||||
Execution::GlobalAutoUpdate(_) => {}
|
||||
Execution::Sleep(_) => {}
|
||||
}
|
||||
}
|
||||
@@ -803,6 +825,7 @@ impl ExecuteResourceSync for Procedure {
|
||||
sync_user(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
{
|
||||
Ok(resource) => resource.id,
|
||||
Err(e) => {
|
||||
|
||||
@@ -28,6 +28,8 @@ pub const TOML_PRETTY_OPTIONS: toml_pretty::Options =
|
||||
toml_pretty::Options {
|
||||
tab: " ",
|
||||
skip_empty_string: true,
|
||||
// Usually we do this, but has to be changed for some cases.
|
||||
skip_empty_object: true,
|
||||
max_inline_array_length: 30,
|
||||
inline_array: false,
|
||||
};
|
||||
@@ -750,6 +752,13 @@ impl ToToml for Procedure {
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunStackService(exec) => exec.stack.clone_from(
|
||||
all
|
||||
.stacks
|
||||
.get(&exec.stack)
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PauseStack(exec) => exec.stack.clone_from(
|
||||
all
|
||||
.stacks
|
||||
@@ -786,9 +795,22 @@ impl ToToml for Procedure {
|
||||
.map(|a| &a.name)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::SendAlert(exec) => {
|
||||
exec.alerters.iter_mut().for_each(|a| {
|
||||
a.clone_from(
|
||||
all
|
||||
.alerters
|
||||
.get(a)
|
||||
.map(|a| &a.name)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
})
|
||||
}
|
||||
Execution::None(_)
|
||||
| Execution::Sleep(_)
|
||||
| Execution::ClearRepoCache(_) => {}
|
||||
| Execution::ClearRepoCache(_)
|
||||
| Execution::BackupCoreDatabase(_)
|
||||
| Execution::GlobalAutoUpdate(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -823,8 +845,13 @@ impl ToToml for Procedure {
|
||||
for stage in stages {
|
||||
toml.push_str("\n\n[[procedure.config.stage]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(stage, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize procedures to toml")?,
|
||||
&toml_pretty::to_string(
|
||||
stage,
|
||||
// If the execution.params are fully missing,
|
||||
// deserialization will fail.
|
||||
TOML_PRETTY_OPTIONS.skip_empty_object(false),
|
||||
)
|
||||
.context("failed to serialize procedures to toml")?,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,4 +57,5 @@ axum.workspace = true
|
||||
clap.workspace = true
|
||||
envy.workspace = true
|
||||
uuid.workspace = true
|
||||
rand.workspace = true
|
||||
rand.workspace = true
|
||||
shell-escape.workspace = true
|
||||
@@ -1,6 +1,7 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
FROM rust:1.88.0-bullseye AS builder
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -10,7 +11,7 @@ COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/periphery ./bin/periphery
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_periphery --release
|
||||
RUN cargo build -p komodo_periphery --release && cargo strip
|
||||
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::path::PathBuf;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::{
|
||||
@@ -9,7 +12,7 @@ use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
EnvironmentVar, all_logs_success,
|
||||
build::{Build, BuildConfig},
|
||||
environment_vars_from_str, get_image_name, optional_string,
|
||||
environment_vars_from_str, optional_string,
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
};
|
||||
@@ -22,9 +25,7 @@ use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
build::{
|
||||
image_tags, parse_build_args, parse_secret_args, write_dockerfile,
|
||||
},
|
||||
build::{parse_build_args, parse_secret_args, write_dockerfile},
|
||||
config::periphery_config,
|
||||
docker::docker_login,
|
||||
helpers::{parse_extra_args, parse_labels},
|
||||
@@ -122,9 +123,10 @@ impl Resolve<super::Args> for build::Build {
|
||||
let build::Build {
|
||||
mut build,
|
||||
repo: linked_repo,
|
||||
registry_token,
|
||||
additional_tags,
|
||||
registry_tokens,
|
||||
mut replacers,
|
||||
commit_hash,
|
||||
additional_tags,
|
||||
} = self;
|
||||
|
||||
let mut logs = Vec::new();
|
||||
@@ -142,8 +144,6 @@ impl Resolve<super::Args> for build::Build {
|
||||
name,
|
||||
config:
|
||||
BuildConfig {
|
||||
version,
|
||||
image_tag,
|
||||
build_path,
|
||||
dockerfile_path,
|
||||
build_args,
|
||||
@@ -169,25 +169,41 @@ impl Resolve<super::Args> for build::Build {
|
||||
return Err(anyhow!("Build must be files on host mode, have a repo attached, or have dockerfile contents set to build").into());
|
||||
}
|
||||
|
||||
let registry_tokens = registry_tokens
|
||||
.iter()
|
||||
.map(|(domain, account, token)| {
|
||||
((domain.as_str(), account.as_str()), token.as_str())
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
// Maybe docker login
|
||||
let should_push = match docker_login(
|
||||
&image_registry.domain,
|
||||
&image_registry.account,
|
||||
registry_token.as_deref(),
|
||||
)
|
||||
.await
|
||||
let mut should_push = false;
|
||||
for (domain, account) in image_registry
|
||||
.iter()
|
||||
.map(|r| (r.domain.as_str(), r.account.as_str()))
|
||||
// This ensures uniqueness / prevents redundant logins
|
||||
.collect::<HashSet<_>>()
|
||||
{
|
||||
Ok(should_push) => should_push,
|
||||
Err(e) => {
|
||||
logs.push(Log::error(
|
||||
"Docker Login",
|
||||
format_serror(
|
||||
&e.context("failed to login to docker registry").into(),
|
||||
),
|
||||
));
|
||||
return Ok(logs);
|
||||
}
|
||||
};
|
||||
match docker_login(
|
||||
domain,
|
||||
account,
|
||||
registry_tokens.get(&(domain, account)).copied(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(logged_in) if logged_in => should_push = true,
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
logs.push(Log::error(
|
||||
"Docker Login",
|
||||
format_serror(
|
||||
&e.context("failed to login to docker registry").into(),
|
||||
),
|
||||
));
|
||||
return Ok(logs);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let build_path = if let Some(repo) = &linked_repo {
|
||||
periphery_config()
|
||||
@@ -245,8 +261,6 @@ impl Resolve<super::Args> for build::Build {
|
||||
}
|
||||
|
||||
// Get command parts
|
||||
let image_name =
|
||||
get_image_name(&build).context("failed to make image name")?;
|
||||
|
||||
// Add VERSION to build args (if not already there)
|
||||
let mut build_args = environment_vars_from_str(build_args)
|
||||
@@ -267,10 +281,15 @@ impl Resolve<super::Args> for build::Build {
|
||||
let labels = parse_labels(
|
||||
&environment_vars_from_str(labels).context("Invalid labels")?,
|
||||
);
|
||||
|
||||
let extra_args = parse_extra_args(extra_args);
|
||||
|
||||
let buildx = if *use_buildx { " buildx" } else { "" };
|
||||
let image_tags =
|
||||
image_tags(&image_name, image_tag, version, &additional_tags);
|
||||
|
||||
let image_tags = build
|
||||
.get_image_tags_as_arg(commit_hash.as_deref(), &additional_tags)
|
||||
.context("Failed to parse image tags into command")?;
|
||||
|
||||
let maybe_push = if should_push { " --push" } else { "" };
|
||||
|
||||
// Construct command
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::{
|
||||
run_komodo_command, run_komodo_command_with_sanitization,
|
||||
@@ -11,7 +9,7 @@ use komodo_client::entities::{
|
||||
FileContents, RepoExecutionResponse, all_logs_success,
|
||||
stack::{
|
||||
ComposeFile, ComposeProject, ComposeService,
|
||||
ComposeServiceDeploy, StackServiceNames,
|
||||
ComposeServiceDeploy, StackRemoteFileContents, StackServiceNames,
|
||||
},
|
||||
to_path_compatible_name,
|
||||
update::Log,
|
||||
@@ -19,11 +17,13 @@ use komodo_client::entities::{
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use shell_escape::unix::escape;
|
||||
use std::{borrow::Cow, path::PathBuf};
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
compose::{
|
||||
docker_compose, pull_or_clone_stack,
|
||||
docker_compose, env_file_args, pull_or_clone_stack,
|
||||
up::{maybe_login_registry, validate_files},
|
||||
write::write_stack,
|
||||
},
|
||||
@@ -169,9 +169,11 @@ impl Resolve<super::Args> for GetComposeContentsOnHost {
|
||||
|
||||
let mut res = GetComposeContentsOnHostResponse::default();
|
||||
|
||||
for path in file_paths {
|
||||
let full_path =
|
||||
run_directory.join(&path).components().collect::<PathBuf>();
|
||||
for file in file_paths {
|
||||
let full_path = run_directory
|
||||
.join(&file.path)
|
||||
.components()
|
||||
.collect::<PathBuf>();
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to read compose file contents at {full_path:?}"
|
||||
@@ -180,11 +182,16 @@ impl Resolve<super::Args> for GetComposeContentsOnHost {
|
||||
Ok(contents) => {
|
||||
// The path we store here has to be the same as incoming file path in the array,
|
||||
// in order for WriteComposeContentsToHost to write to the correct path.
|
||||
res.contents.push(FileContents { path, contents });
|
||||
res.contents.push(StackRemoteFileContents {
|
||||
path: file.path,
|
||||
contents,
|
||||
services: file.services,
|
||||
requires: file.requires,
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
res.errors.push(FileContents {
|
||||
path,
|
||||
path: file.path,
|
||||
contents: format_serror(&e.into()),
|
||||
});
|
||||
}
|
||||
@@ -351,19 +358,19 @@ impl Resolve<super::Args> for ComposePull {
|
||||
)?;
|
||||
|
||||
let file_paths = stack
|
||||
.file_paths()
|
||||
.iter()
|
||||
.all_file_paths()
|
||||
.into_iter()
|
||||
.map(|path| {
|
||||
(
|
||||
path,
|
||||
// This will remove any intermediate uneeded '/./' in the path
|
||||
run_directory.join(path).components().collect::<PathBuf>(),
|
||||
run_directory.join(&path).components().collect::<PathBuf>(),
|
||||
path,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Validate files
|
||||
for (path, full_path) in &file_paths {
|
||||
for (full_path, path) in &file_paths {
|
||||
if !full_path.exists() {
|
||||
return Err(anyhow!("Missing compose file at {path}").into());
|
||||
}
|
||||
@@ -382,24 +389,12 @@ impl Resolve<super::Args> for ComposePull {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
|
||||
let file_args = if stack.config.file_paths.is_empty() {
|
||||
String::from("compose.yaml")
|
||||
} else {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
let file_args = stack.compose_file_paths().join(" -f ");
|
||||
|
||||
let env_file = env_file_path
|
||||
.map(|path| format!(" --env-file {path}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
let additional_env_files = stack
|
||||
.config
|
||||
.additional_env_files
|
||||
.iter()
|
||||
.fold(String::new(), |mut output, file| {
|
||||
let _ = write!(output, " --env-file {file}");
|
||||
output
|
||||
});
|
||||
let env_file_args = env_file_args(
|
||||
env_file_path,
|
||||
&stack.config.additional_env_files,
|
||||
)?;
|
||||
|
||||
let project_name = stack.project_name(false);
|
||||
|
||||
@@ -407,7 +402,7 @@ impl Resolve<super::Args> for ComposePull {
|
||||
"Compose Pull",
|
||||
run_directory.as_ref(),
|
||||
format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} pull{service_args}",
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} pull{service_args}",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
@@ -514,35 +509,23 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
format!(" {}", services.join(" "))
|
||||
};
|
||||
|
||||
let file_args = if stack.config.file_paths.is_empty() {
|
||||
String::from("compose.yaml")
|
||||
} else {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
let file_args = stack.compose_file_paths().join(" -f ");
|
||||
|
||||
// This will be the last project name, which is the one that needs to be destroyed.
|
||||
// Might be different from the current project name, if user renames stack / changes to custom project name.
|
||||
let last_project_name = stack.project_name(false);
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let env_file = env_file_path
|
||||
.map(|path| format!(" --env-file {path}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
let additional_env_files = stack
|
||||
.config
|
||||
.additional_env_files
|
||||
.iter()
|
||||
.fold(String::new(), |mut output, file| {
|
||||
let _ = write!(output, " --env-file {file}");
|
||||
output
|
||||
});
|
||||
let env_file_args = env_file_args(
|
||||
env_file_path,
|
||||
&stack.config.additional_env_files,
|
||||
)?;
|
||||
|
||||
// Uses 'docker compose config' command to extract services (including image)
|
||||
// after performing interpolation
|
||||
{
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} config",
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} config",
|
||||
);
|
||||
let Some(config_log) = run_komodo_command_with_sanitization(
|
||||
"Compose Config",
|
||||
@@ -607,7 +590,7 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
let build_extra_args =
|
||||
parse_extra_args(&stack.config.build_extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} build{build_extra_args}{service_args}",
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} build{build_extra_args}{service_args}",
|
||||
);
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Compose Build",
|
||||
@@ -631,7 +614,7 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
// Pull images before destroying to minimize downtime.
|
||||
// If this fails, do not continue.
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} pull{service_args}",
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} pull{service_args}",
|
||||
);
|
||||
let log = run_komodo_command(
|
||||
"Compose Pull",
|
||||
@@ -659,7 +642,7 @@ impl Resolve<super::Args> for ComposeUp {
|
||||
// Run compose up
|
||||
let extra_args = parse_extra_args(&stack.config.extra_args);
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{additional_env_files}{env_file} up -d{extra_args}{service_args}",
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} up -d{extra_args}{service_args}",
|
||||
);
|
||||
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
@@ -713,3 +696,150 @@ impl Resolve<super::Args> for ComposeExecution {
|
||||
Ok(log)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<super::Args> for ComposeRun {
|
||||
#[instrument(name = "ComposeRun", level = "debug", skip_all, fields(stack = &self.stack.name, service = &self.service))]
|
||||
async fn resolve(self, _: &super::Args) -> serror::Result<Log> {
|
||||
let ComposeRun {
|
||||
mut stack,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
mut replacers,
|
||||
service,
|
||||
command,
|
||||
no_tty,
|
||||
no_deps,
|
||||
detach,
|
||||
service_ports,
|
||||
env,
|
||||
workdir,
|
||||
user,
|
||||
entrypoint,
|
||||
pull,
|
||||
} = self;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(None, &periphery_config().secrets);
|
||||
interpolator
|
||||
.interpolate_stack(&mut stack)?
|
||||
.push_logs(&mut Vec::new());
|
||||
replacers.extend(interpolator.secret_replacers);
|
||||
|
||||
let mut res = ComposeRunResponse::default();
|
||||
let (run_directory, env_file_path) = match write_stack(
|
||||
&stack,
|
||||
repo.as_ref(),
|
||||
git_token,
|
||||
replacers.clone(),
|
||||
&mut res,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return Ok(Log::error(
|
||||
"Write Stack",
|
||||
format_serror(&e.into()),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let run_directory = run_directory.canonicalize().context(
|
||||
"Failed to validate run directory on host after stack write (canonicalize error)",
|
||||
)?;
|
||||
|
||||
maybe_login_registry(&stack, registry_token, &mut Vec::new())
|
||||
.await;
|
||||
|
||||
let docker_compose = docker_compose();
|
||||
|
||||
let file_args = if stack.config.file_paths.is_empty() {
|
||||
String::from("compose.yaml")
|
||||
} else {
|
||||
stack.config.file_paths.join(" -f ")
|
||||
};
|
||||
|
||||
let env_file_args = env_file_args(
|
||||
env_file_path,
|
||||
&stack.config.additional_env_files,
|
||||
)?;
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
if pull.unwrap_or_default() {
|
||||
let pull_log = run_komodo_command(
|
||||
"Compose Pull",
|
||||
run_directory.as_ref(),
|
||||
format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} pull {service}",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
if !pull_log.success {
|
||||
return Ok(pull_log);
|
||||
}
|
||||
}
|
||||
|
||||
let mut run_flags = String::from(" --rm");
|
||||
if detach.unwrap_or_default() {
|
||||
run_flags.push_str(" -d");
|
||||
}
|
||||
if no_tty.unwrap_or_default() {
|
||||
run_flags.push_str(" --no-tty");
|
||||
}
|
||||
if no_deps.unwrap_or_default() {
|
||||
run_flags.push_str(" --no-deps");
|
||||
}
|
||||
if service_ports.unwrap_or_default() {
|
||||
run_flags.push_str(" --service-ports");
|
||||
}
|
||||
if let Some(dir) = workdir.as_ref() {
|
||||
run_flags.push_str(&format!(" --workdir {dir}"));
|
||||
}
|
||||
if let Some(user) = user.as_ref() {
|
||||
run_flags.push_str(&format!(" --user {user}"));
|
||||
}
|
||||
if let Some(entrypoint) = entrypoint.as_ref() {
|
||||
run_flags.push_str(&format!(" --entrypoint {entrypoint}"));
|
||||
}
|
||||
if let Some(env) = env {
|
||||
for (k, v) in env {
|
||||
run_flags.push_str(&format!(" -e {}={} ", k, v));
|
||||
}
|
||||
}
|
||||
|
||||
let command_args = command
|
||||
.as_ref()
|
||||
.filter(|v| !v.is_empty())
|
||||
.map(|argv| {
|
||||
let joined = argv
|
||||
.iter()
|
||||
.map(|s| escape(Cow::Borrowed(s)).into_owned())
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
format!(" {joined}")
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
let command = format!(
|
||||
"{docker_compose} -p {project_name} -f {file_args}{env_file_args} run{run_flags} {service}{command_args}",
|
||||
);
|
||||
|
||||
let Some(log) = run_komodo_command_with_sanitization(
|
||||
"Compose Run",
|
||||
run_directory.as_path(),
|
||||
command,
|
||||
false,
|
||||
&replacers,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
unreachable!()
|
||||
};
|
||||
|
||||
Ok(log)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,6 +85,7 @@ pub enum PeripheryRequest {
|
||||
ComposePull(ComposePull),
|
||||
ComposeUp(ComposeUp),
|
||||
ComposeExecution(ComposeExecution),
|
||||
ComposeRun(ComposeRun),
|
||||
|
||||
// Container (Read)
|
||||
InspectContainer(InspectContainer),
|
||||
|
||||
@@ -347,7 +347,7 @@ async fn execute_command_on_terminal(
|
||||
);
|
||||
|
||||
let full_command = format!(
|
||||
"printf '\n{START_OF_OUTPUT}\n\n'; {command}; rc=$? printf '\n{KOMODO_EXIT_CODE}%d\n{END_OF_OUTPUT}\n' \"$rc\"\n"
|
||||
"printf '\n{START_OF_OUTPUT}\n\n'; {command}; rc=$?; printf '\n{KOMODO_EXIT_CODE}%d\n{END_OF_OUTPUT}\n' \"$rc\"\n"
|
||||
);
|
||||
|
||||
terminal
|
||||
|
||||
@@ -6,7 +6,7 @@ use std::{
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
entities::{EnvironmentVar, Version, update::Log},
|
||||
entities::{EnvironmentVar, update::Log},
|
||||
parsers::QUOTE_PATTERN,
|
||||
};
|
||||
|
||||
@@ -52,28 +52,6 @@ pub async fn write_dockerfile(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn image_tags(
|
||||
image_name: &str,
|
||||
custom_tag: &str,
|
||||
version: &Version,
|
||||
additional: &[String],
|
||||
) -> String {
|
||||
let Version { major, minor, .. } = version;
|
||||
let custom_tag = if custom_tag.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!("-{custom_tag}")
|
||||
};
|
||||
let additional = additional
|
||||
.iter()
|
||||
.map(|tag| format!(" -t {image_name}:{tag}{custom_tag}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("");
|
||||
format!(
|
||||
" -t {image_name}:latest{custom_tag} -t {image_name}:{version}{custom_tag} -t {image_name}:{major}.{minor}{custom_tag} -t {image_name}:{major}{custom_tag}{additional}",
|
||||
)
|
||||
}
|
||||
|
||||
pub fn parse_build_args(build_args: &[EnvironmentVar]) -> String {
|
||||
build_args
|
||||
.iter()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::path::PathBuf;
|
||||
use std::{fmt::Write, path::PathBuf};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use komodo_client::entities::{
|
||||
RepoExecutionArgs, repo::Repo, stack::Stack,
|
||||
@@ -24,6 +24,35 @@ pub fn docker_compose() -> &'static str {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn env_file_args(
|
||||
env_file_path: Option<&str>,
|
||||
additional_env_files: &[String],
|
||||
) -> anyhow::Result<String> {
|
||||
let mut res = String::new();
|
||||
|
||||
for file in additional_env_files.iter().filter(|&path| {
|
||||
let Some(komodo_path) = env_file_path else {
|
||||
return true;
|
||||
};
|
||||
// Filter komodo env out of additional env file if its also in there.
|
||||
// It will be always be added last / have highest priority.
|
||||
path != komodo_path
|
||||
}) {
|
||||
write!(res, " --env-file {file}").with_context(|| {
|
||||
format!("Failed to write --env-file arg for {file}")
|
||||
})?;
|
||||
}
|
||||
|
||||
// Add this last, so it is applied on top
|
||||
if let Some(file) = env_file_path {
|
||||
write!(res, " --env-file {file}").with_context(|| {
|
||||
format!("Failed to write --env-file arg for {file}")
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn down(
|
||||
project: &str,
|
||||
services: &[String],
|
||||
|
||||
@@ -3,7 +3,9 @@ use std::path::{Path, PathBuf};
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::entities::{
|
||||
FileContents, stack::Stack, update::Log,
|
||||
FileContents,
|
||||
stack::{Stack, StackRemoteFileContents},
|
||||
update::Log,
|
||||
};
|
||||
use periphery_client::api::compose::ComposeUpResponse;
|
||||
use tokio::fs;
|
||||
@@ -16,20 +18,24 @@ pub async fn validate_files(
|
||||
res: &mut ComposeUpResponse,
|
||||
) {
|
||||
let file_paths = stack
|
||||
.file_paths()
|
||||
.iter()
|
||||
.map(|path| {
|
||||
.all_file_dependencies()
|
||||
.into_iter()
|
||||
.map(|file| {
|
||||
(
|
||||
path,
|
||||
// This will remove any intermediate uneeded '/./' in the path
|
||||
run_directory.join(path).components().collect::<PathBuf>(),
|
||||
run_directory
|
||||
.join(&file.path)
|
||||
.components()
|
||||
.collect::<PathBuf>(),
|
||||
file,
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (path, full_path) in &file_paths {
|
||||
// First validate no missing files
|
||||
for (full_path, file) in &file_paths {
|
||||
if !full_path.exists() {
|
||||
res.missing_files.push(path.to_string());
|
||||
res.missing_files.push(file.path.clone());
|
||||
}
|
||||
}
|
||||
if !res.missing_files.is_empty() {
|
||||
@@ -37,21 +43,20 @@ pub async fn validate_files(
|
||||
"Validate Files",
|
||||
format_serror(
|
||||
&anyhow!(
|
||||
"Ensure the run_directory and file_paths are correct."
|
||||
"Missing files: {}", res.missing_files.join(", ")
|
||||
)
|
||||
.context("A compose file doesn't exist after writing stack.")
|
||||
.context("Ensure the run_directory and all file paths are correct.")
|
||||
.context("A file doesn't exist after writing stack.")
|
||||
.into(),
|
||||
),
|
||||
));
|
||||
return;
|
||||
}
|
||||
|
||||
for (path, full_path) in &file_paths {
|
||||
for (full_path, file) in file_paths {
|
||||
let file_contents =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to read compose file contents at {full_path:?}"
|
||||
)
|
||||
format!("Failed to read file contents at {full_path:?}")
|
||||
}) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
@@ -61,15 +66,17 @@ pub async fn validate_files(
|
||||
.push(Log::error("Read Compose File", error.clone()));
|
||||
// This should only happen for repo stacks, ie remote error
|
||||
res.remote_errors.push(FileContents {
|
||||
path: path.to_string(),
|
||||
path: file.path,
|
||||
contents: error,
|
||||
});
|
||||
return;
|
||||
}
|
||||
};
|
||||
res.file_contents.push(FileContents {
|
||||
path: path.to_string(),
|
||||
res.file_contents.push(StackRemoteFileContents {
|
||||
path: file.path,
|
||||
contents: file_contents,
|
||||
services: file.services,
|
||||
requires: file.requires,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ use komodo_client::entities::{
|
||||
stack::Stack, to_path_compatible_name, update::Log,
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::{ComposePullResponse, ComposeUpResponse},
|
||||
compose::{
|
||||
ComposePullResponse, ComposeRunResponse, ComposeUpResponse,
|
||||
},
|
||||
git::{CloneRepo, PullOrCloneRepo},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -43,6 +45,12 @@ impl WriteStackRes for &mut ComposePullResponse {
|
||||
}
|
||||
}
|
||||
|
||||
impl WriteStackRes for &mut ComposeRunResponse {
|
||||
fn logs(&mut self) -> &mut Vec<Log> {
|
||||
&mut self.logs
|
||||
}
|
||||
}
|
||||
|
||||
/// Either writes the stack file_contents to a file, or clones the repo.
|
||||
/// Asssumes all interpolation is already complete.
|
||||
/// Returns (run_directory, env_file_path, periphery_replacers)
|
||||
|
||||
@@ -37,7 +37,7 @@ pub fn periphery_config() -> &'static PeripheryConfig {
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>(),
|
||||
include_file_name: ".peripheryignore",
|
||||
include_file_name: ".peripheryinclude",
|
||||
merge_nested: args
|
||||
.merge_nested_config
|
||||
.unwrap_or(env.periphery_merge_nested_config),
|
||||
|
||||
@@ -60,7 +60,14 @@ async fn update_container_stats() {
|
||||
pub async fn get_container_stats(
|
||||
container_name: Option<String>,
|
||||
) -> anyhow::Result<Vec<ContainerStats>> {
|
||||
let format = "--format \"{{ json . }}\"";
|
||||
let format = "--format '{\"BlockIO\":\"{{ .BlockIO }}\", \
|
||||
\"CPUPerc\":\"{{ .CPUPerc }}\", \
|
||||
\"ID\":\"{{ .ID }}\", \
|
||||
\"MemPerc\":\"{{ .MemPerc }}\", \
|
||||
\"MemUsage\":\"{{ .MemUsage }}\", \
|
||||
\"Name\":\"{{ .Name }}\", \
|
||||
\"NetIO\":\"{{ .NetIO }}\",\
|
||||
\"PIDs\":\"{{ .PIDs }}\"}'";
|
||||
let container_name = match container_name {
|
||||
Some(name) => format!(" {name}"),
|
||||
None => "".to_string(),
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::{cmp::Ordering, sync::OnceLock};
|
||||
|
||||
use async_timing_util::wait_until_timelength;
|
||||
use komodo_client::entities::stats::{
|
||||
SingleDiskUsage, SystemInformation, SystemProcess, SystemStats,
|
||||
SingleDiskUsage, SystemInformation, SystemLoadAverage,
|
||||
SystemProcess, SystemStats,
|
||||
};
|
||||
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate, System};
|
||||
use tokio::sync::RwLock;
|
||||
@@ -95,8 +96,15 @@ impl StatsClient {
|
||||
network_egress_bytes += network.transmitted();
|
||||
}
|
||||
|
||||
let load_avg = System::load_average();
|
||||
|
||||
SystemStats {
|
||||
cpu_perc: self.system.global_cpu_usage(),
|
||||
load_average: SystemLoadAverage {
|
||||
one: load_avg.one,
|
||||
five: load_avg.five,
|
||||
fifteen: load_avg.fifteen,
|
||||
},
|
||||
mem_free_gb: self.system.free_memory() as f64 / BYTES_PER_GB,
|
||||
mem_used_gb: (total_mem - available_mem) as f64 / BYTES_PER_GB,
|
||||
mem_total_gb: total_mem as f64 / BYTES_PER_GB,
|
||||
|
||||
@@ -28,6 +28,7 @@ resolver_api.workspace = true
|
||||
# external
|
||||
tokio-tungstenite.workspace = true
|
||||
derive_builder.workspace = true
|
||||
urlencoding.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio-util.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -11,6 +11,8 @@ pub trait KomodoAuthRequest: HasResponse {}
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct JwtResponse {
|
||||
/// User ID for signed in user.
|
||||
pub user_id: String,
|
||||
/// A token the user can use to authenticate their requests.
|
||||
pub jwt: String,
|
||||
}
|
||||
@@ -47,19 +49,20 @@ pub struct GetLoginOptionsResponse {
|
||||
|
||||
//
|
||||
|
||||
/// Create a new local user account. Will fail if a user with the
|
||||
/// Sign up a new local user account. Will fail if a user with the
|
||||
/// given username already exists.
|
||||
/// Response: [CreateLocalUserResponse].
|
||||
/// Response: [SignUpLocalUserResponse].
|
||||
///
|
||||
/// Note. This method is only available if the core api has `local_auth` enabled.
|
||||
/// Note. This method is only available if the core api has `local_auth` enabled,
|
||||
/// and if user registration is not disabled (after the first user).
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoAuthRequest)]
|
||||
#[response(CreateLocalUserResponse)]
|
||||
#[response(SignUpLocalUserResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct CreateLocalUser {
|
||||
pub struct SignUpLocalUser {
|
||||
/// The username for the new user.
|
||||
pub username: String,
|
||||
/// The password for the new user.
|
||||
@@ -67,9 +70,9 @@ pub struct CreateLocalUser {
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
/// Response for [CreateLocalUser].
|
||||
/// Response for [SignUpLocalUser].
|
||||
#[typeshare]
|
||||
pub type CreateLocalUserResponse = JwtResponse;
|
||||
pub type SignUpLocalUserResponse = JwtResponse;
|
||||
|
||||
//
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ pub struct RunAction {
|
||||
|
||||
/// Custom arguments which are merged on top of the default arguments.
|
||||
/// CLI Format: `"VAR1=val1&VAR2=val2"`
|
||||
///
|
||||
///
|
||||
/// Webhook-triggered actions use this to pass WEBHOOK_BRANCH and WEBHOOK_BODY.
|
||||
#[clap(value_parser = args_parser)]
|
||||
pub args: Option<JsonObject>,
|
||||
@@ -60,7 +60,7 @@ pub struct BatchRunAction {
|
||||
/// Supports multiline and comma delineated combinations of the above.
|
||||
///
|
||||
/// Example:
|
||||
/// ```
|
||||
/// ```text
|
||||
/// # match all foo-* actions
|
||||
/// foo-*
|
||||
/// # add some more
|
||||
|
||||
@@ -4,7 +4,7 @@ use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::update::Update;
|
||||
use crate::entities::{alert::SeverityLevel, update::Update};
|
||||
|
||||
use super::KomodoExecuteRequest;
|
||||
|
||||
@@ -27,3 +27,39 @@ pub struct TestAlerter {
|
||||
/// Name or id
|
||||
pub alerter: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Send a custom alert message to configured Alerters. Response: [Update]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Resolve,
|
||||
EmptyTraits,
|
||||
Parser,
|
||||
)]
|
||||
#[empty_traits(KomodoExecuteRequest)]
|
||||
#[response(Update)]
|
||||
#[error(serror::Error)]
|
||||
pub struct SendAlert {
|
||||
/// The alert level.
|
||||
#[serde(default)]
|
||||
#[clap(long, short = 'l', default_value_t = SeverityLevel::Ok)]
|
||||
pub level: SeverityLevel,
|
||||
/// The alert message. Required.
|
||||
pub message: String,
|
||||
/// The alert details. Optional.
|
||||
#[serde(default)]
|
||||
#[arg(long, short = 'd', default_value_t = String::new())]
|
||||
pub details: String,
|
||||
/// Specific alerter names or ids.
|
||||
/// If empty / not passed, sends to all configured alerters
|
||||
/// with the `Custom` alert type whitelisted / not blacklisted.
|
||||
#[serde(default)]
|
||||
#[arg(long, short = 'a')]
|
||||
pub alerters: Vec<String>,
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ pub struct BatchRunBuild {
|
||||
/// Supports multiline and comma delineated combinations of the above.
|
||||
///
|
||||
/// Example:
|
||||
/// ```
|
||||
/// ```text
|
||||
/// # match all foo-* builds
|
||||
/// foo-*
|
||||
/// # add some more
|
||||
|
||||
@@ -62,7 +62,7 @@ pub struct BatchDeploy {
|
||||
/// Supports multiline and comma delineated combinations of the above.
|
||||
///
|
||||
/// Example:
|
||||
/// ```
|
||||
/// ```text
|
||||
/// # match all foo-* deployments
|
||||
/// foo-*
|
||||
/// # add some more
|
||||
@@ -270,7 +270,7 @@ pub struct BatchDestroyDeployment {
|
||||
/// Supports multiline and comma delineated combinations of the above.
|
||||
///
|
||||
/// Example:
|
||||
/// ```
|
||||
/// ```text
|
||||
/// # match all foo-* deployments
|
||||
/// foo-*
|
||||
/// # add some more
|
||||
|
||||
@@ -9,6 +9,7 @@ use crate::entities::update::Update;
|
||||
use super::KomodoExecuteRequest;
|
||||
|
||||
/// Clears all repos from the Core repo cache. Admin only.
|
||||
/// Response: [Update]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
@@ -25,13 +26,14 @@ use super::KomodoExecuteRequest;
|
||||
#[error(serror::Error)]
|
||||
pub struct ClearRepoCache {}
|
||||
|
||||
/// Backs up the database to compressed jsonl files. Admin only.
|
||||
/// Backs up the Komodo Core database to compressed jsonl files.
|
||||
/// Admin only. Response: [Update]
|
||||
///
|
||||
/// Mount a folder to `/backups`, and Core will use it to create
|
||||
/// timestamped database dumps, which can be restored using
|
||||
/// `ghcr.io/moghtech/komodo-util`.
|
||||
/// the Komodo CLI.
|
||||
///
|
||||
/// TODO: Link to docs
|
||||
/// https://komo.do/docs/setup/backup
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
@@ -46,4 +48,26 @@ pub struct ClearRepoCache {}
|
||||
#[empty_traits(KomodoExecuteRequest)]
|
||||
#[response(Update)]
|
||||
#[error(serror::Error)]
|
||||
pub struct BackupDatabase {}
|
||||
pub struct BackupCoreDatabase {}
|
||||
|
||||
/// Trigger a global poll for image updates on Stacks and Deployments
|
||||
/// with `poll_for_updates` or `auto_update` enabled.
|
||||
/// Admin only. Response: [Update]
|
||||
///
|
||||
/// 1. `docker compose pull` any Stacks / Deployments with `poll_for_updates` or `auto_update` enabled. This will pick up any available updates.
|
||||
/// 2. Redeploy Stacks / Deployments that have updates found and 'auto_update' enabled.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Resolve,
|
||||
EmptyTraits,
|
||||
Parser,
|
||||
)]
|
||||
#[empty_traits(KomodoExecuteRequest)]
|
||||
#[response(Update)]
|
||||
#[error(serror::Error)]
|
||||
pub struct GlobalAutoUpdate {}
|
||||
|
||||
@@ -152,12 +152,17 @@ pub enum Execution {
|
||||
StopStack(StopStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
RunStackService(RunStackService),
|
||||
|
||||
// ALERTER
|
||||
TestAlerter(TestAlerter),
|
||||
#[clap(alias = "alert")]
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// CORE
|
||||
// MAINTENANCE
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
|
||||
// SLEEP
|
||||
Sleep(Sleep),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user