mirror of
https://github.com/moghtech/komodo.git
synced 2026-03-16 21:51:05 -05:00
Compare commits
156 Commits
v2.0.0-dev
...
v1.19.0-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54d167fe1e | ||
|
|
6b6743876e | ||
|
|
38a4c4a470 | ||
|
|
84368411ca | ||
|
|
e41f350776 | ||
|
|
3fef8644cc | ||
|
|
baef9cc1b1 | ||
|
|
7c2403b7d1 | ||
|
|
95ffa5b4f5 | ||
|
|
72a095f6be | ||
|
|
67c869931f | ||
|
|
42ea59aa23 | ||
|
|
3abbab87e6 | ||
|
|
d16ebbe526 | ||
|
|
b44a6f7e57 | ||
|
|
1f078a0432 | ||
|
|
7f9b7aa6cf | ||
|
|
96aefa75a9 | ||
|
|
9cf2df74ef | ||
|
|
47f5e5bd37 | ||
|
|
51683612f5 | ||
|
|
108c9215d1 | ||
|
|
1fd3836a4a | ||
|
|
b765e9576d | ||
|
|
cea6fd2038 | ||
|
|
60c3eda08a | ||
|
|
b09419cd4b | ||
|
|
13ff216ea7 | ||
|
|
d56d34c448 | ||
|
|
d0da77a6b8 | ||
|
|
7725cfa5bd | ||
|
|
cc5d947412 | ||
|
|
3badb678db | ||
|
|
f8af6da1a2 | ||
|
|
dca0ae772b | ||
|
|
558fb7bade | ||
|
|
cb7943ed5b | ||
|
|
92967d3278 | ||
|
|
b796dc8398 | ||
|
|
7bdbaca4b4 | ||
|
|
a90971eea6 | ||
|
|
5643424ed8 | ||
|
|
b1f5cccc0a | ||
|
|
e82a268861 | ||
|
|
77b3c6d325 | ||
|
|
9f90a913fc | ||
|
|
76e84ddd6e | ||
|
|
2dd759e375 | ||
|
|
b2b9df0e2a | ||
|
|
6a42d47b6f | ||
|
|
ca440c14b6 | ||
|
|
f2478de853 | ||
|
|
ebb4ea3c87 | ||
|
|
77ffb14f7d | ||
|
|
7430798f31 | ||
|
|
881542a21c | ||
|
|
0857583778 | ||
|
|
7d9eab92e7 | ||
|
|
d9abbbadaf | ||
|
|
877a013fc0 | ||
|
|
a6811783ec | ||
|
|
7c406fd9db | ||
|
|
c84083062c | ||
|
|
4efc27b4db | ||
|
|
9f413fec21 | ||
|
|
e8e1d1dfbc | ||
|
|
bceb1e6ebf | ||
|
|
0222ad3b3c | ||
|
|
3dad87e18f | ||
|
|
1a5779ca14 | ||
|
|
a24fefca52 | ||
|
|
25e957ef0c | ||
|
|
0629744356 | ||
|
|
5f6469cb01 | ||
|
|
7b502cc6ca | ||
|
|
2c328f27e4 | ||
|
|
34da2063f9 | ||
|
|
cac20e4ff2 | ||
|
|
6c17049cb7 | ||
|
|
9f4b246e43 | ||
|
|
5dd865a856 | ||
|
|
7293a7004a | ||
|
|
b82ec53c33 | ||
|
|
7ce1fdaf78 | ||
|
|
ce0425befd | ||
|
|
4cbd9d2ac7 | ||
|
|
8b92d1e435 | ||
|
|
593967b6c7 | ||
|
|
e2a38a75f8 | ||
|
|
9847b9499a | ||
|
|
2e1d001bfe | ||
|
|
65be710adc | ||
|
|
f122a3e18b | ||
|
|
250bc017ca | ||
|
|
4f93ef4d52 | ||
|
|
9c871497e4 | ||
|
|
fa5ac54243 | ||
|
|
3720c92908 | ||
|
|
aa99982687 | ||
|
|
232437a9d8 | ||
|
|
ea0381ad41 | ||
|
|
10476e561d | ||
|
|
f3e77ccffe | ||
|
|
c94d83bc3a | ||
|
|
9ede3977c5 | ||
|
|
160709788a | ||
|
|
3c89a72841 | ||
|
|
9e71f008df | ||
|
|
c4cbfada00 | ||
|
|
24890e8aed | ||
|
|
5a272d1e98 | ||
|
|
1444e803de | ||
|
|
dd59f56b6d | ||
|
|
4729dc2e3c | ||
|
|
178203c07c | ||
|
|
919ae99e2c | ||
|
|
62ea22af7f | ||
|
|
d17ba3f9c4 | ||
|
|
03f3b2c80d | ||
|
|
e2c52fea6b | ||
|
|
ae9f2d9f74 | ||
|
|
842c67bb50 | ||
|
|
056050e534 | ||
|
|
88ac4cf9dd | ||
|
|
c58ec67f0a | ||
|
|
2ba2cb1e0b | ||
|
|
ebe9bd75ad | ||
|
|
4faf031b8a | ||
|
|
738c347155 | ||
|
|
4c1d7a61dc | ||
|
|
7af8e5b287 | ||
|
|
8bf4ea482a | ||
|
|
527b300caa | ||
|
|
d0067f0d2b | ||
|
|
cca040c718 | ||
|
|
63be48e2fb | ||
|
|
d723515d28 | ||
|
|
0c34695d1a | ||
|
|
3c4551ae19 | ||
|
|
da5ac544f6 | ||
|
|
c3dc710598 | ||
|
|
6c3b7cc30b | ||
|
|
8b19fdba40 | ||
|
|
5e3ef5910a | ||
|
|
08e2c51301 | ||
|
|
7943655fb8 | ||
|
|
e22eda939c | ||
|
|
a28892e726 | ||
|
|
295f9927d4 | ||
|
|
a9aad11a51 | ||
|
|
aafb5a68ed | ||
|
|
1e4c7b8498 | ||
|
|
aca4c09bf8 | ||
|
|
5af8dd4553 | ||
|
|
75627901ce | ||
|
|
adcb0c0755 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
target
|
||||
node_modules
|
||||
dist
|
||||
deno.lock
|
||||
.env
|
||||
.env.development
|
||||
.DS_Store
|
||||
@@ -9,5 +10,4 @@ dist
|
||||
/frontend/build
|
||||
/lib/ts_client/build
|
||||
|
||||
creds.toml
|
||||
.dev
|
||||
|
||||
1
.kminclude
Normal file
1
.kminclude
Normal file
@@ -0,0 +1 @@
|
||||
.dev
|
||||
1393
Cargo.lock
generated
1393
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
52
Cargo.toml
52
Cargo.toml
@@ -8,7 +8,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.18.4"
|
||||
version = "1.19.0-dev-9"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -23,8 +23,10 @@ environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
database = { path = "lib/database" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
config = { path = "lib/config" }
|
||||
logger = { path = "lib/logger" }
|
||||
cache = { path = "lib/cache" }
|
||||
git = { path = "lib/git" }
|
||||
@@ -35,20 +37,19 @@ serror = { version = "0.5.0", default-features = false }
|
||||
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
async_timing_util = "1.0.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.1"
|
||||
mongo_indexed = "2.0.2"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.1.2"
|
||||
mungos = "3.2.0"
|
||||
svi = "1.1.0"
|
||||
mungos = "3.2.1"
|
||||
svi = "1.2.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.20", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.45.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.15", features = ["io", "codec"] }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
pin-project-lite = "0.2.16"
|
||||
futures = "0.3.31"
|
||||
@@ -63,13 +64,14 @@ axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
indexmap = { version = "2.9.0", features = ["serde"] }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_yaml = "0.9.34"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.142"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.8.23"
|
||||
toml = "0.9.5"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.98"
|
||||
@@ -85,42 +87,44 @@ opentelemetry = "0.30.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.40", features = ["derive"] }
|
||||
clap = { version = "4.5.43", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.17.0", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false }
|
||||
openidconnect = "4.0.0"
|
||||
openidconnect = "4.0.1"
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.17.0"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.27"
|
||||
rustls = "0.23.31"
|
||||
hmac = "0.12.1"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.1"
|
||||
rand = "0.9.2"
|
||||
hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.1"
|
||||
sysinfo = "0.35.2"
|
||||
bollard = "0.19.2"
|
||||
sysinfo = "0.36.1"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.8.0"
|
||||
aws-sdk-ec2 = "1.139.0"
|
||||
aws-credential-types = "1.2.3"
|
||||
aws-config = "1.8.4"
|
||||
aws-sdk-ec2 = "1.156.0"
|
||||
aws-credential-types = "1.2.5"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.3"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.41"
|
||||
croner = "2.1.0"
|
||||
croner = "3.0.0"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.27", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
comfy-table = "7.1.4"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.10.0"
|
||||
dashmap = "6.1.0"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.87.0-bullseye AS builder
|
||||
FROM rust:1.88.0-bullseye AS builder
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -10,20 +10,20 @@ COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/periphery ./bin/periphery
|
||||
COPY ./bin/util ./bin/util
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN \
|
||||
cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_periphery --release && \
|
||||
cargo build -p komodo_util --release
|
||||
cargo build -p komodo_cli --release
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/util /util
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
|
||||
@@ -1,30 +1,36 @@
|
||||
[package]
|
||||
name = "komodo_cli"
|
||||
description = "Command line tool to execute Komodo actions"
|
||||
description = "Command line tool for Komodo"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "komodo"
|
||||
name = "km"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
# komodo_client = "1.16.12"
|
||||
environment_file.workspace = true
|
||||
komodo_client.workspace = true
|
||||
database.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
comfy-table.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_qs.workspace = true
|
||||
wildcard.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
chrono.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
clap.workspace = true
|
||||
envy.workspace = true
|
||||
@@ -1,22 +1,24 @@
|
||||
FROM rust:1.87.0-bullseye AS builder
|
||||
FROM rust:1.88.0-bullseye AS builder
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/util ./bin/util
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN cargo build -p komodo_util --release
|
||||
RUN cargo build -p komodo_cli --release
|
||||
|
||||
# Copy binaries to distroless base
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=builder /builder/target/release/util /usr/local/bin/util
|
||||
COPY --from=builder /builder/target/release/km /usr/local/bin/km
|
||||
|
||||
CMD [ "util" ]
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Util"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
@@ -14,14 +14,16 @@ FROM debian:bullseye-slim
|
||||
WORKDIR /app
|
||||
|
||||
## Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /util /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /util /app/arch/linux/arm64
|
||||
COPY --from=x86_64 /km /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /km /app/arch/linux/arm64
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/util && rm -r /app/arch
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/arch
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Util"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "util" ]
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
4
bin/cli/runfile.toml
Normal file
4
bin/cli/runfile.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[install-cli]
|
||||
alias = "ic"
|
||||
description = "installs the komodo-cli, available on the command line as 'km'"
|
||||
cmd = "cargo install --path ."
|
||||
@@ -7,10 +7,12 @@ FROM ${BINARIES_IMAGE} AS binaries
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=binaries /util /usr/local/bin/util
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Util"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "util" ]
|
||||
@@ -1,55 +0,0 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use komodo_client::api::execute::Execution;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
pub struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
pub command: Command,
|
||||
|
||||
/// The path to a creds file.
|
||||
///
|
||||
/// Note: If each of `url`, `key` and `secret` are passed,
|
||||
/// no file is required at this path.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
pub creds: String,
|
||||
|
||||
/// Pass url in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub url: Option<String>,
|
||||
/// Pass api key in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub key: Option<String>,
|
||||
/// Pass api secret in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub secret: Option<String>,
|
||||
|
||||
/// Always continue on user confirmation prompts.
|
||||
#[arg(long, short, default_value_t = false)]
|
||||
pub yes: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home =
|
||||
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
|
||||
format!("{home}/.config/komodo/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Runs an execution
|
||||
Execute {
|
||||
#[command(subcommand)]
|
||||
execution: Execution,
|
||||
},
|
||||
// Room for more
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CredsFile {
|
||||
pub url: String,
|
||||
pub key: String,
|
||||
pub secret: String,
|
||||
}
|
||||
281
bin/cli/src/command/container.rs
Normal file
281
bin/cli/src/command/container.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Color};
|
||||
use futures_util::{
|
||||
FutureExt, TryStreamExt, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
InspectDockerContainer, ListAllDockerContainers, ListServers,
|
||||
},
|
||||
entities::{
|
||||
config::cli::args::container::{
|
||||
Container, ContainerCommand, InspectContainer,
|
||||
},
|
||||
docker::container::{
|
||||
ContainerListItem, ContainerStateStatusEnum,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
command::{
|
||||
PrintTable, matches_wildcards, parse_wildcards, print_items,
|
||||
},
|
||||
config::cli_config,
|
||||
};
|
||||
|
||||
pub async fn handle(container: &Container) -> anyhow::Result<()> {
|
||||
match &container.command {
|
||||
None => list_containers(container).await,
|
||||
Some(ContainerCommand::Inspect(inspect)) => {
|
||||
inspect_container(inspect).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_containers(
|
||||
Container {
|
||||
all,
|
||||
down,
|
||||
reverse,
|
||||
containers: names,
|
||||
images,
|
||||
networks,
|
||||
servers,
|
||||
format,
|
||||
command: _,
|
||||
}: &Container,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
let containers = containers.into_iter().map(|c| {
|
||||
let server = if let Some(server_id) = c.server_id.as_ref()
|
||||
&& let Some(server) = server_map.get(server_id)
|
||||
{
|
||||
server
|
||||
} else {
|
||||
return (None, c);
|
||||
};
|
||||
(Some(server.name.as_str()), c)
|
||||
});
|
||||
|
||||
let names = parse_wildcards(names);
|
||||
let servers = parse_wildcards(servers);
|
||||
let images = parse_wildcards(images);
|
||||
let networks = parse_wildcards(networks);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|(server_name, c)| {
|
||||
let state_check = if *all {
|
||||
true
|
||||
} else if *down {
|
||||
!matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
} else {
|
||||
matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
};
|
||||
let network_check = matches_wildcards(
|
||||
&networks,
|
||||
&c.network_mode
|
||||
.as_deref()
|
||||
.map(|n| vec![n])
|
||||
.unwrap_or_default(),
|
||||
) || matches_wildcards(
|
||||
&networks,
|
||||
&c.networks.iter().map(String::as_str).collect::<Vec<_>>(),
|
||||
);
|
||||
state_check
|
||||
&& network_check
|
||||
&& matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&server_name
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
&& matches_wildcards(
|
||||
&images,
|
||||
&c.image.as_deref().map(|i| vec![i]).unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
containers.sort_by(|(a_s, a), (b_s, b)| {
|
||||
a.state
|
||||
.cmp(&b.state)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a_s.cmp(b_s))
|
||||
.then(a.network_mode.cmp(&b.network_mode))
|
||||
.then(a.image.cmp(&b.image))
|
||||
});
|
||||
if *reverse {
|
||||
containers.reverse();
|
||||
}
|
||||
print_items(containers, *format)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn inspect_container(
|
||||
inspect: &InspectContainer,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, mut containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
containers.iter_mut().for_each(|c| {
|
||||
let Some(server_id) = c.server_id.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let Some(server) = server_map.get(server_id) else {
|
||||
c.server_id = Some(String::from("Unknown"));
|
||||
return;
|
||||
};
|
||||
c.server_id = Some(server.name.clone());
|
||||
});
|
||||
|
||||
let names = [inspect.container.to_string()];
|
||||
let names = parse_wildcards(&names);
|
||||
let servers = parse_wildcards(&inspect.servers);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|c| {
|
||||
matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&c.server_id
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.map(|c| async move {
|
||||
client
|
||||
.read(InspectDockerContainer {
|
||||
container: c.name,
|
||||
server: c.server_id.context("No server...")?,
|
||||
})
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
containers.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
match containers.len() {
|
||||
0 => {
|
||||
println!(
|
||||
"{}: Did not find any containers matching '{}'",
|
||||
"INFO".green(),
|
||||
inspect.container.bold()
|
||||
);
|
||||
}
|
||||
1 => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&containers[0])
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&containers)
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&[
|
||||
"Container",
|
||||
"State",
|
||||
"Server",
|
||||
"Ports",
|
||||
"Networks",
|
||||
"Image",
|
||||
"Link",
|
||||
]
|
||||
}
|
||||
fn row(self) -> Vec<Cell> {
|
||||
let color = match self.1.state {
|
||||
ContainerStateStatusEnum::Running => Color::Green,
|
||||
ContainerStateStatusEnum::Paused => Color::DarkYellow,
|
||||
ContainerStateStatusEnum::Empty => Color::Grey,
|
||||
_ => Color::Red,
|
||||
};
|
||||
let mut networks = HashSet::new();
|
||||
if let Some(network) = self.1.network_mode {
|
||||
networks.insert(network);
|
||||
}
|
||||
for network in self.1.networks {
|
||||
networks.insert(network);
|
||||
}
|
||||
let mut networks = networks.into_iter().collect::<Vec<_>>();
|
||||
networks.sort();
|
||||
let mut ports = self
|
||||
.1
|
||||
.ports
|
||||
.into_iter()
|
||||
.flat_map(|p| p.public_port.map(|p| p.to_string()))
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
ports.sort();
|
||||
let ports = if ports.is_empty() {
|
||||
Cell::new("")
|
||||
} else {
|
||||
Cell::new(format!(":{}", ports.join(", :")))
|
||||
};
|
||||
let link = if let Some(server_id) = self.1.server_id {
|
||||
format!(
|
||||
"{}/servers/{server_id}/container/{}",
|
||||
cli_config().host,
|
||||
self.1.name
|
||||
)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.1.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.1.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(self.1.image.as_deref().unwrap_or("Unknown")),
|
||||
Cell::new(link),
|
||||
]
|
||||
}
|
||||
}
|
||||
320
bin/cli/src/command/database.rs
Normal file
320
bin/cli/src/command/database.rs
Normal file
@@ -0,0 +1,320 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::entities::{
|
||||
config::cli::args::database::DatabaseCommand, optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
DatabaseCommand::Backup { yes, .. } => backup(*yes).await,
|
||||
DatabaseCommand::Restore {
|
||||
restore_folder,
|
||||
index,
|
||||
yes,
|
||||
..
|
||||
} => restore(restore_folder.as_deref(), *index, *yes).await,
|
||||
DatabaseCommand::Prune { yes, .. } => prune(*yes).await,
|
||||
DatabaseCommand::Copy { yes, index, .. } => {
|
||||
copy(*index, *yes).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn backup(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup".green().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Backup all database contents to gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
database::utils::backup(&db, &config.backups_folder).await?;
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Know that new backup was taken successfully at this point,
|
||||
// safe to prune old backup folders
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn restore(
|
||||
restore_folder: Option<&Path>,
|
||||
index: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Restore".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Restores database contents from gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
println!(
|
||||
"\n{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if let Some(restore_folder) = restore_folder {
|
||||
println!("{}: {restore_folder:?}", " - Restore Folder".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start restore", yes)?;
|
||||
|
||||
let db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::restore(
|
||||
&db,
|
||||
&config.backups_folder,
|
||||
restore_folder,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn prune(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup Prune".cyan().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Prunes database backup folders when greater than the configured amount."
|
||||
.dimmed()
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
info!(
|
||||
"Backup pruning is disabled, enabled using 'max_backups' (KOMODO_CLI_MAX_BACKUPS)"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup prune", yes)?;
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn prune_inner() -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
let mut backups_dir =
|
||||
match tokio::fs::read_dir(&config.backups_folder)
|
||||
.await
|
||||
.context("Failed to read backups folder for prune")
|
||||
{
|
||||
Ok(backups_dir) => backups_dir,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let mut backup_folders = Vec::new();
|
||||
loop {
|
||||
match backups_dir.next_entry().await {
|
||||
Ok(Some(entry)) => {
|
||||
let Ok(metadata) = entry.metadata().await else {
|
||||
continue;
|
||||
};
|
||||
if metadata.is_dir() {
|
||||
backup_folders.push(entry.path());
|
||||
}
|
||||
}
|
||||
Ok(None) => break,
|
||||
Err(_) => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ordered from oldest -> newest
|
||||
backup_folders.sort();
|
||||
|
||||
let max_backups = config.max_backups as usize;
|
||||
let backup_folders_len = backup_folders.len();
|
||||
|
||||
// Early return if under the backup count threshold
|
||||
if backup_folders_len <= max_backups {
|
||||
info!("No backups to prune");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let to_delete =
|
||||
&backup_folders[..(backup_folders_len - max_backups)];
|
||||
|
||||
info!("Pruning old backups: {to_delete:?}");
|
||||
|
||||
for path in to_delete {
|
||||
if let Err(e) =
|
||||
tokio::fs::remove_dir_all(path).await.with_context(|| {
|
||||
format!("Failed to delete backup folder at {path:?}")
|
||||
})
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Copy".blue().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Copies database contents to another database.".dimmed()
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start copy", yes)?;
|
||||
|
||||
let source_db = database::init(&config.database).await?;
|
||||
let target_db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::copy(&source_db, &target_db).await
|
||||
}
|
||||
@@ -1,22 +1,25 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use futures_util::{StreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::{BatchExecutionResponse, Execution},
|
||||
entities::update::Update,
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchExecutionResponseItem, Execution,
|
||||
},
|
||||
entities::{resource_link, update::Update},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::wait_for_enter,
|
||||
state::{cli_args, komodo_client},
|
||||
};
|
||||
use crate::config::cli_config;
|
||||
|
||||
pub enum ExecutionResult {
|
||||
enum ExecutionResult {
|
||||
Single(Box<Update>),
|
||||
Batch(BatchExecutionResponse),
|
||||
}
|
||||
|
||||
pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
pub async fn handle(
|
||||
execution: &Execution,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
@@ -25,7 +28,7 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match &execution {
|
||||
match execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -212,259 +215,254 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::ClearRepoCache(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run execution")?;
|
||||
}
|
||||
super::wait_for_enter("run execution", yes)?;
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let res = match execution {
|
||||
Execution::RunAction(request) => komodo_client()
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let res = match execution.clone() {
|
||||
Execution::RunAction(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunAction(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunProcedure(request) => komodo_client()
|
||||
Execution::BatchRunAction(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunProcedure(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunProcedure(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunBuild(request) => komodo_client()
|
||||
Execution::BatchRunProcedure(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelBuild(request) => komodo_client()
|
||||
Execution::BatchRunBuild(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Deploy(request) => komodo_client()
|
||||
Execution::Deploy(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeploy(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullDeployment(request) => komodo_client()
|
||||
Execution::BatchDeploy(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartDeployment(request) => komodo_client()
|
||||
Execution::StartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartDeployment(request) => komodo_client()
|
||||
Execution::RestartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseDeployment(request) => komodo_client()
|
||||
Execution::PauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseDeployment(request) => komodo_client()
|
||||
Execution::UnpauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopDeployment(request) => komodo_client()
|
||||
Execution::StopDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyDeployment(request) => komodo_client()
|
||||
Execution::DestroyDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CloneRepo(request) => komodo_client()
|
||||
Execution::BatchDestroyDeployment(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CloneRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchCloneRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullRepo(request) => komodo_client()
|
||||
Execution::BatchCloneRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::BuildRepo(request) => komodo_client()
|
||||
Execution::BatchPullRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::BuildRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchBuildRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelRepoBuild(request) => komodo_client()
|
||||
Execution::BatchBuildRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelRepoBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartContainer(request) => komodo_client()
|
||||
Execution::StartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartContainer(request) => komodo_client()
|
||||
Execution::RestartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseContainer(request) => komodo_client()
|
||||
Execution::PauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseContainer(request) => komodo_client()
|
||||
Execution::UnpauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopContainer(request) => komodo_client()
|
||||
Execution::StopContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyContainer(request) => komodo_client()
|
||||
Execution::DestroyContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartAllContainers(request) => komodo_client()
|
||||
Execution::StartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartAllContainers(request) => komodo_client()
|
||||
Execution::RestartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseAllContainers(request) => komodo_client()
|
||||
Execution::PauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseAllContainers(request) => komodo_client()
|
||||
Execution::UnpauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopAllContainers(request) => komodo_client()
|
||||
Execution::StopAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneContainers(request) => komodo_client()
|
||||
Execution::PruneContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteNetwork(request) => komodo_client()
|
||||
Execution::DeleteNetwork(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneNetworks(request) => komodo_client()
|
||||
Execution::PruneNetworks(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteImage(request) => komodo_client()
|
||||
Execution::DeleteImage(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneImages(request) => komodo_client()
|
||||
Execution::PruneImages(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteVolume(request) => komodo_client()
|
||||
Execution::DeleteVolume(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneVolumes(request) => komodo_client()
|
||||
Execution::PruneVolumes(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneDockerBuilders(request) => komodo_client()
|
||||
Execution::PruneDockerBuilders(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneBuildx(request) => komodo_client()
|
||||
Execution::PruneBuildx(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneSystem(request) => komodo_client()
|
||||
Execution::PruneSystem(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RunSync(request) => komodo_client()
|
||||
Execution::RunSync(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::CommitSync(request) => komodo_client()
|
||||
Execution::CommitSync(request) => client
|
||||
.write(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeployStack(request) => komodo_client()
|
||||
Execution::DeployStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::DeployStackIfChanged(request) => komodo_client()
|
||||
Execution::BatchDeployStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::DeployStackIfChanged(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStackIfChanged(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullStack(request) => komodo_client()
|
||||
Execution::BatchDeployStackIfChanged(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::StartStack(request) => komodo_client()
|
||||
Execution::BatchPullStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::StartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartStack(request) => komodo_client()
|
||||
Execution::RestartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseStack(request) => komodo_client()
|
||||
Execution::PauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseStack(request) => komodo_client()
|
||||
Execution::UnpauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopStack(request) => komodo_client()
|
||||
Execution::StopStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyStack(request) => komodo_client()
|
||||
Execution::DestroyStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyStack(request) => komodo_client()
|
||||
Execution::BatchDestroyStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::TestAlerter(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::TestAlerter(request) => komodo_client()
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::ClearRepoCache(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
@@ -480,13 +478,67 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
|
||||
match res {
|
||||
Ok(ExecutionResult::Single(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
poll_update_until_complete(&update).await
|
||||
}
|
||||
Ok(ExecutionResult::Batch(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
Ok(ExecutionResult::Batch(updates)) => {
|
||||
let mut handles = updates
|
||||
.iter()
|
||||
.map(|update| async move {
|
||||
match update {
|
||||
BatchExecutionResponseItem::Ok(update) => {
|
||||
poll_update_until_complete(update).await
|
||||
}
|
||||
BatchExecutionResponseItem::Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
while let Some(res) = handles.next().await {
|
||||
match res {
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn poll_update_until_complete(
|
||||
update: &Update,
|
||||
) -> anyhow::Result<()> {
|
||||
let link = if update.id.is_empty() {
|
||||
let (resource_type, id) = update.target.extract_variant_id();
|
||||
resource_link(&cli_config().host, resource_type, id)
|
||||
} else {
|
||||
format!("{}/updates/{}", cli_config().host, update.id)
|
||||
};
|
||||
info!("Link: '{}'", link.bold());
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let update = client.poll_update_until_complete(&update.id).await?;
|
||||
if update.success {
|
||||
info!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION SUCCESSFUL".green(),
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION FAILED".red(),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
968
bin/cli/src/command/list.rs
Normal file
968
bin/cli/src/command/list.rs
Normal file
@@ -0,0 +1,968 @@
|
||||
use std::{cmp::Ordering, collections::HashMap};
|
||||
|
||||
use comfy_table::{Attribute, Cell, Color};
|
||||
use futures_util::{FutureExt, try_join};
|
||||
use komodo_client::{
|
||||
KomodoClient,
|
||||
api::read::{
|
||||
ListActions, ListAlerters, ListBuilders, ListBuilds,
|
||||
ListDeployments, ListProcedures, ListRepos, ListResourceSyncs,
|
||||
ListSchedules, ListServers, ListStacks, ListTags,
|
||||
},
|
||||
entities::{
|
||||
ResourceTargetVariant,
|
||||
action::{ActionListItem, ActionListItemInfo, ActionState},
|
||||
alerter::{AlerterListItem, AlerterListItemInfo},
|
||||
build::{BuildListItem, BuildListItemInfo, BuildState},
|
||||
builder::{BuilderListItem, BuilderListItemInfo},
|
||||
config::cli::args::{
|
||||
self,
|
||||
list::{ListCommand, ResourceFilters},
|
||||
},
|
||||
deployment::{
|
||||
DeploymentListItem, DeploymentListItemInfo, DeploymentState,
|
||||
},
|
||||
procedure::{
|
||||
ProcedureListItem, ProcedureListItemInfo, ProcedureState,
|
||||
},
|
||||
repo::{RepoListItem, RepoListItemInfo, RepoState},
|
||||
resource::{ResourceListItem, ResourceQuery},
|
||||
resource_link,
|
||||
schedule::Schedule,
|
||||
server::{ServerListItem, ServerListItemInfo, ServerState},
|
||||
stack::{StackListItem, StackListItemInfo, StackState},
|
||||
sync::{
|
||||
ResourceSyncListItem, ResourceSyncListItemInfo,
|
||||
ResourceSyncState,
|
||||
},
|
||||
},
|
||||
};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{
|
||||
command::{
|
||||
PrintTable, format_timetamp, matches_wildcards, parse_wildcards,
|
||||
print_items,
|
||||
},
|
||||
config::cli_config,
|
||||
};
|
||||
|
||||
pub async fn handle(list: &args::list::List) -> anyhow::Result<()> {
|
||||
match &list.command {
|
||||
None => list_all(list).await,
|
||||
Some(ListCommand::Servers(filters)) => {
|
||||
list_resources::<ServerListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Stacks(filters)) => {
|
||||
list_resources::<StackListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Deployments(filters)) => {
|
||||
list_resources::<DeploymentListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Builds(filters)) => {
|
||||
list_resources::<BuildListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Repos(filters)) => {
|
||||
list_resources::<RepoListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Procedures(filters)) => {
|
||||
list_resources::<ProcedureListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Actions(filters)) => {
|
||||
list_resources::<ActionListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Syncs(filters)) => {
|
||||
list_resources::<ResourceSyncListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Schedules(filters)) => {
|
||||
list_schedules(filters).await
|
||||
}
|
||||
Some(ListCommand::Builders(filters)) => {
|
||||
list_resources::<BuilderListItem>(filters).await
|
||||
}
|
||||
Some(ListCommand::Alerters(filters)) => {
|
||||
list_resources::<AlerterListItem>(filters).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Includes all resources besides builds and alerters.
|
||||
async fn list_all(list: &args::list::List) -> anyhow::Result<()> {
|
||||
let filters: ResourceFilters = list.clone().into();
|
||||
let client = super::komodo_client().await?;
|
||||
let (
|
||||
tags,
|
||||
mut servers,
|
||||
mut stacks,
|
||||
mut deployments,
|
||||
mut builds,
|
||||
mut repos,
|
||||
mut procedures,
|
||||
mut actions,
|
||||
mut syncs,
|
||||
) = try_join!(
|
||||
client.read(ListTags::default()).map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
ServerListItem::list(client, &filters),
|
||||
StackListItem::list(client, &filters),
|
||||
DeploymentListItem::list(client, &filters),
|
||||
BuildListItem::list(client, &filters),
|
||||
RepoListItem::list(client, &filters),
|
||||
ProcedureListItem::list(client, &filters),
|
||||
ActionListItem::list(client, &filters),
|
||||
ResourceSyncListItem::list(client, &filters),
|
||||
)?;
|
||||
|
||||
if !servers.is_empty() {
|
||||
fix_tags(&mut servers, &tags);
|
||||
print_items(servers, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !stacks.is_empty() {
|
||||
fix_tags(&mut stacks, &tags);
|
||||
print_items(stacks, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !deployments.is_empty() {
|
||||
fix_tags(&mut deployments, &tags);
|
||||
print_items(deployments, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !builds.is_empty() {
|
||||
fix_tags(&mut builds, &tags);
|
||||
print_items(builds, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !repos.is_empty() {
|
||||
fix_tags(&mut repos, &tags);
|
||||
print_items(repos, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !procedures.is_empty() {
|
||||
fix_tags(&mut procedures, &tags);
|
||||
print_items(procedures, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !actions.is_empty() {
|
||||
fix_tags(&mut actions, &tags);
|
||||
print_items(actions, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
if !syncs.is_empty() {
|
||||
fix_tags(&mut syncs, &tags);
|
||||
print_items(syncs, filters.format)?;
|
||||
println!();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_resources<T>(
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
T: ListResources,
|
||||
ResourceListItem<T::Info>: PrintTable + Serialize,
|
||||
{
|
||||
let client = crate::command::komodo_client().await?;
|
||||
let (mut resources, tags) = tokio::try_join!(
|
||||
T::list(client, filters),
|
||||
client.read(ListTags::default()).map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>()))
|
||||
)?;
|
||||
fix_tags(&mut resources, &tags);
|
||||
if !resources.is_empty() {
|
||||
print_items(resources, filters.format)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_schedules(
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
let (mut schedules, tags) = tokio::try_join!(
|
||||
client
|
||||
.read(ListSchedules {
|
||||
tags: filters.tags.clone(),
|
||||
tag_behavior: Default::default(),
|
||||
})
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.filter(|s| s.next_scheduled_run.is_some())
|
||||
.collect::<Vec<_>>())),
|
||||
client.read(ListTags::default()).map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>()))
|
||||
)?;
|
||||
schedules.iter_mut().for_each(|resource| {
|
||||
resource.tags.iter_mut().for_each(|id| {
|
||||
let Some(name) = tags.get(id) else {
|
||||
*id = String::new();
|
||||
return;
|
||||
};
|
||||
id.clone_from(name);
|
||||
});
|
||||
});
|
||||
schedules.sort_by(|a, b| {
|
||||
match (a.next_scheduled_run, b.next_scheduled_run) {
|
||||
(Some(_), None) => return Ordering::Less,
|
||||
(None, Some(_)) => return Ordering::Greater,
|
||||
(Some(a), Some(b)) => return a.cmp(&b),
|
||||
(None, None) => {}
|
||||
}
|
||||
a.name.cmp(&b.name).then(a.enabled.cmp(&b.enabled))
|
||||
});
|
||||
if !schedules.is_empty() {
|
||||
print_items(schedules, filters.format)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fix_tags<T>(
|
||||
resources: &mut Vec<ResourceListItem<T>>,
|
||||
tags: &HashMap<String, String>,
|
||||
) {
|
||||
resources.iter_mut().for_each(|resource| {
|
||||
resource.tags.iter_mut().for_each(|id| {
|
||||
let Some(name) = tags.get(id) else {
|
||||
*id = String::new();
|
||||
return;
|
||||
};
|
||||
id.clone_from(name);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
trait ListResources: Sized
|
||||
where
|
||||
ResourceListItem<Self::Info>: PrintTable,
|
||||
{
|
||||
type Info;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<ResourceListItem<Self::Info>>>;
|
||||
}
|
||||
|
||||
// LIST
|
||||
|
||||
impl ListResources for ServerListItem {
|
||||
type Info = ServerListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let servers = client
|
||||
.read(ListServers {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?;
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let server_wildcards = parse_wildcards(&filters.servers);
|
||||
let mut servers = servers
|
||||
.into_iter()
|
||||
.filter(|server| {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(server.info.state, ServerState::Ok)
|
||||
} else {
|
||||
matches!(server.info.state, ServerState::Ok)
|
||||
};
|
||||
let name_items = &[server.name.as_str()];
|
||||
state_check
|
||||
&& matches_wildcards(&names, name_items)
|
||||
&& matches_wildcards(&server_wildcards, name_items)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
servers.sort_by(|a, b| {
|
||||
a.info.state.cmp(&b.info.state).then(a.name.cmp(&b.name))
|
||||
});
|
||||
Ok(servers)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for StackListItem {
|
||||
type Info = StackListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (servers, mut stacks) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers {
|
||||
query: ResourceQuery::builder().build(),
|
||||
})
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListStacks {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
stacks.iter_mut().for_each(|stack| {
|
||||
if stack.info.server_id.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Some(server) = servers.get(&stack.info.server_id) else {
|
||||
return;
|
||||
};
|
||||
stack.info.server_id.clone_from(&server.name);
|
||||
});
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let servers = parse_wildcards(&filters.servers);
|
||||
let mut stacks = stacks
|
||||
.into_iter()
|
||||
.filter(|stack| {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(stack.info.state, StackState::Running)
|
||||
} else {
|
||||
matches!(stack.info.state, StackState::Running)
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[stack.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&[stack.info.server_id.as_str()],
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
stacks.sort_by(|a, b| {
|
||||
a.info
|
||||
.state
|
||||
.cmp(&b.info.state)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a.info.server_id.cmp(&b.info.server_id))
|
||||
});
|
||||
Ok(stacks)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for DeploymentListItem {
|
||||
type Info = DeploymentListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (servers, mut deployments) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers {
|
||||
query: ResourceQuery::builder().build(),
|
||||
})
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListDeployments {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
deployments.iter_mut().for_each(|deployment| {
|
||||
if deployment.info.server_id.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Some(server) = servers.get(&deployment.info.server_id)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
deployment.info.server_id.clone_from(&server.name);
|
||||
});
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let servers = parse_wildcards(&filters.servers);
|
||||
let mut deployments = deployments
|
||||
.into_iter()
|
||||
.filter(|deployment| {
|
||||
let state_check = if filters.all {
|
||||
true
|
||||
} else if filters.down {
|
||||
!matches!(deployment.info.state, DeploymentState::Running)
|
||||
} else {
|
||||
matches!(deployment.info.state, DeploymentState::Running)
|
||||
};
|
||||
state_check
|
||||
&& matches_wildcards(&names, &[deployment.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&[deployment.info.server_id.as_str()],
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
deployments.sort_by(|a, b| {
|
||||
a.info
|
||||
.state
|
||||
.cmp(&b.info.state)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a.info.server_id.cmp(&b.info.server_id))
|
||||
});
|
||||
Ok(deployments)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for BuildListItem {
|
||||
type Info = BuildListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let (builders, mut builds) = tokio::try_join!(
|
||||
client
|
||||
.read(ListBuilders {
|
||||
query: ResourceQuery::builder().build(),
|
||||
})
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListBuilds {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
)?;
|
||||
builds.iter_mut().for_each(|build| {
|
||||
if build.info.builder_id.is_empty() {
|
||||
return;
|
||||
}
|
||||
let Some(builder) = builders.get(&build.info.builder_id) else {
|
||||
return;
|
||||
};
|
||||
build.info.builder_id.clone_from(&builder.name);
|
||||
});
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let builders = parse_wildcards(&filters.builders);
|
||||
let mut builds = builds
|
||||
.into_iter()
|
||||
.filter(|build| {
|
||||
matches_wildcards(&names, &[build.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&builders,
|
||||
&[build.info.builder_id.as_str()],
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
builds.sort_by(|a, b| {
|
||||
a.name
|
||||
.cmp(&b.name)
|
||||
.then(a.info.builder_id.cmp(&b.info.builder_id))
|
||||
.then(a.info.state.cmp(&b.info.state))
|
||||
});
|
||||
Ok(builds)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for RepoListItem {
|
||||
type Info = RepoListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut repos = client
|
||||
.read(ListRepos {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|repo| matches_wildcards(&names, &[repo.name.as_str()]))
|
||||
.collect::<Vec<_>>();
|
||||
repos.sort_by(|a, b| {
|
||||
a.name
|
||||
.cmp(&b.name)
|
||||
.then(a.info.server_id.cmp(&b.info.server_id))
|
||||
.then(a.info.builder_id.cmp(&b.info.builder_id))
|
||||
});
|
||||
Ok(repos)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for ProcedureListItem {
|
||||
type Info = ProcedureListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut procedures = client
|
||||
.read(ListProcedures {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|procedure| {
|
||||
matches_wildcards(&names, &[procedure.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
procedures.sort_by(|a, b| {
|
||||
match (a.info.next_scheduled_run, b.info.next_scheduled_run) {
|
||||
(Some(_), None) => return Ordering::Less,
|
||||
(None, Some(_)) => return Ordering::Greater,
|
||||
(Some(a), Some(b)) => return a.cmp(&b),
|
||||
(None, None) => {}
|
||||
}
|
||||
a.name.cmp(&b.name).then(a.info.state.cmp(&b.info.state))
|
||||
});
|
||||
Ok(procedures)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for ActionListItem {
|
||||
type Info = ActionListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut actions = client
|
||||
.read(ListActions {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|action| {
|
||||
matches_wildcards(&names, &[action.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
actions.sort_by(|a, b| {
|
||||
match (a.info.next_scheduled_run, b.info.next_scheduled_run) {
|
||||
(Some(_), None) => return Ordering::Less,
|
||||
(None, Some(_)) => return Ordering::Greater,
|
||||
(Some(a), Some(b)) => return a.cmp(&b),
|
||||
(None, None) => {}
|
||||
}
|
||||
a.name.cmp(&b.name).then(a.info.state.cmp(&b.info.state))
|
||||
});
|
||||
Ok(actions)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for ResourceSyncListItem {
|
||||
type Info = ResourceSyncListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut syncs = client
|
||||
.read(ListResourceSyncs {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|sync| matches_wildcards(&names, &[sync.name.as_str()]))
|
||||
.collect::<Vec<_>>();
|
||||
syncs.sort_by(|a, b| {
|
||||
a.name.cmp(&b.name).then(a.info.state.cmp(&b.info.state))
|
||||
});
|
||||
Ok(syncs)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for BuilderListItem {
|
||||
type Info = BuilderListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut builders = client
|
||||
.read(ListBuilders {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|builder| {
|
||||
matches_wildcards(&names, &[builder.name.as_str()])
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
builders.sort_by(|a, b| {
|
||||
a.name
|
||||
.cmp(&b.name)
|
||||
.then(a.info.builder_type.cmp(&b.info.builder_type))
|
||||
});
|
||||
Ok(builders)
|
||||
}
|
||||
}
|
||||
|
||||
impl ListResources for AlerterListItem {
|
||||
type Info = AlerterListItemInfo;
|
||||
async fn list(
|
||||
client: &KomodoClient,
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<Vec<Self>> {
|
||||
let names = parse_wildcards(&filters.names);
|
||||
let mut syncs = client
|
||||
.read(ListAlerters {
|
||||
query: ResourceQuery::builder()
|
||||
.tags(filters.tags.clone())
|
||||
// .tag_behavior(TagQueryBehavior::Any)
|
||||
.build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|sync| matches_wildcards(&names, &[sync.name.as_str()]))
|
||||
.collect::<Vec<_>>();
|
||||
syncs.sort_by(|a, b| {
|
||||
a.info
|
||||
.enabled
|
||||
.cmp(&b.info.enabled)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a.info.endpoint_type.cmp(&b.info.endpoint_type))
|
||||
});
|
||||
Ok(syncs)
|
||||
}
|
||||
}
|
||||
|
||||
// TABLE
|
||||
|
||||
impl PrintTable for ResourceListItem<ServerListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Server", "State", "Address", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<Cell> {
|
||||
let color = match self.info.state {
|
||||
ServerState::Ok => Color::Green,
|
||||
ServerState::NotOk => Color::Red,
|
||||
ServerState::Disabled => Color::Blue,
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.address),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Server,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<StackListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Stack", "State", "Server", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
StackState::Down => Color::Blue,
|
||||
StackState::Running => Color::Green,
|
||||
StackState::Paused => Color::DarkYellow,
|
||||
StackState::Unknown => Color::Magenta,
|
||||
_ => Color::Red,
|
||||
};
|
||||
// let source = if self.info.files_on_host {
|
||||
// "On Host"
|
||||
// } else if !self.info.repo.is_empty() {
|
||||
// self.info.repo_link.as_str()
|
||||
// } else {
|
||||
// "UI Defined"
|
||||
// };
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.server_id),
|
||||
// Cell::new(source),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Stack,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<DeploymentListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Deployment", "State", "Server", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
DeploymentState::NotDeployed => Color::Blue,
|
||||
DeploymentState::Running => Color::Green,
|
||||
DeploymentState::Paused => Color::DarkYellow,
|
||||
DeploymentState::Unknown => Color::Magenta,
|
||||
_ => Color::Red,
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.server_id),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Deployment,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<BuildListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Build", "State", "Builder", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
BuildState::Ok => Color::Green,
|
||||
BuildState::Building => Color::DarkYellow,
|
||||
BuildState::Unknown => Color::Magenta,
|
||||
BuildState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.builder_id),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Build,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<RepoListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Repo", "State", "Link", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
RepoState::Ok => Color::Green,
|
||||
RepoState::Building
|
||||
| RepoState::Cloning
|
||||
| RepoState::Pulling => Color::DarkYellow,
|
||||
RepoState::Unknown => Color::Magenta,
|
||||
RepoState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.repo_link),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Repo,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ProcedureListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Procedure", "State", "Next Run", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ProcedureState::Ok => Color::Green,
|
||||
ProcedureState::Running => Color::DarkYellow,
|
||||
ProcedureState::Unknown => Color::Magenta,
|
||||
ProcedureState::Failed => Color::Red,
|
||||
};
|
||||
let next_run = if let Some(ts) = self.info.next_scheduled_run {
|
||||
Cell::new(
|
||||
format_timetamp(ts)
|
||||
.unwrap_or(String::from("Invalid next ts")),
|
||||
)
|
||||
.add_attribute(Attribute::Bold)
|
||||
} else {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Procedure,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ActionListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Action", "State", "Next Run", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ActionState::Ok => Color::Green,
|
||||
ActionState::Running => Color::DarkYellow,
|
||||
ActionState::Unknown => Color::Magenta,
|
||||
ActionState::Failed => Color::Red,
|
||||
};
|
||||
let next_run = if let Some(ts) = self.info.next_scheduled_run {
|
||||
Cell::new(
|
||||
format_timetamp(ts)
|
||||
.unwrap_or(String::from("Invalid next ts")),
|
||||
)
|
||||
.add_attribute(Attribute::Bold)
|
||||
} else {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Action,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<ResourceSyncListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Sync", "State", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let color = match self.info.state {
|
||||
ResourceSyncState::Ok => Color::Green,
|
||||
ResourceSyncState::Pending | ResourceSyncState::Syncing => {
|
||||
Color::DarkYellow
|
||||
}
|
||||
ResourceSyncState::Unknown => Color::Magenta,
|
||||
ResourceSyncState::Failed => Color::Red,
|
||||
};
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::ResourceSync,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<BuilderListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Builder", "Type", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.builder_type),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Builder,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for ResourceListItem<AlerterListItemInfo> {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Alerter", "Type", "Enabled", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.endpoint_type),
|
||||
if self.info.enabled {
|
||||
Cell::new(self.info.enabled.to_string()).fg(Color::Green)
|
||||
} else {
|
||||
Cell::new(self.info.enabled.to_string()).fg(Color::Red)
|
||||
},
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(
|
||||
&cli_config().host,
|
||||
ResourceTargetVariant::Alerter,
|
||||
&self.id,
|
||||
)),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for Schedule {
|
||||
fn header() -> &'static [&'static str] {
|
||||
&["Name", "Type", "Next Run", "Tags", "Link"]
|
||||
}
|
||||
fn row(self) -> Vec<comfy_table::Cell> {
|
||||
let next_run = if let Some(ts) = self.next_scheduled_run {
|
||||
Cell::new(
|
||||
format_timetamp(ts)
|
||||
.unwrap_or(String::from("Invalid next ts")),
|
||||
)
|
||||
.add_attribute(Attribute::Bold)
|
||||
} else {
|
||||
Cell::new(String::from("None"))
|
||||
};
|
||||
let (resource_type, id) = self.target.extract_variant_id();
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.target.extract_variant_id().0),
|
||||
next_run,
|
||||
Cell::new(self.tags.join(", ")),
|
||||
Cell::new(resource_link(&cli_config().host, resource_type, id)),
|
||||
]
|
||||
}
|
||||
}
|
||||
161
bin/cli/src/command/mod.rs
Normal file
161
bin/cli/src/command/mod.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use chrono::TimeZone;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Table};
|
||||
use komodo_client::{
|
||||
KomodoClient, entities::config::cli::args::CliFormat,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
use wildcard::Wildcard;
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
pub mod container;
|
||||
pub mod database;
|
||||
pub mod execute;
|
||||
pub mod list;
|
||||
pub mod update;
|
||||
|
||||
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {
|
||||
static KOMODO_CLIENT: OnceCell<KomodoClient> =
|
||||
OnceCell::const_new();
|
||||
KOMODO_CLIENT
|
||||
.get_or_try_init(|| async {
|
||||
let config = cli_config();
|
||||
let (Some(key), Some(secret)) =
|
||||
(&config.cli_key, &config.cli_secret)
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"Must provide both cli_key and cli_secret"
|
||||
));
|
||||
};
|
||||
KomodoClient::new(&config.host, key, secret)
|
||||
.with_healthcheck()
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn wait_for_enter(
|
||||
press_enter_to: &str,
|
||||
skip: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if skip {
|
||||
println!();
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sanitizes uris of the form:
|
||||
/// `protocol://username:password@address`
|
||||
fn sanitize_uri(uri: &str) -> String {
|
||||
// protocol: `mongodb`
|
||||
// credentials_address: `username:password@address`
|
||||
let Some((protocol, credentials_address)) = uri.split_once("://")
|
||||
else {
|
||||
// If no protocol, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
// credentials: `username:password`
|
||||
let Some((credentials, address)) =
|
||||
credentials_address.split_once('@')
|
||||
else {
|
||||
// If no credentials, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
match credentials.split_once(':') {
|
||||
Some((username, _)) => {
|
||||
format!("{protocol}://{username}:*****@{address}")
|
||||
}
|
||||
None => {
|
||||
format!("{protocol}://*****@{address}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_items<T: PrintTable + Serialize>(
|
||||
items: Vec<T>,
|
||||
format: CliFormat,
|
||||
) -> anyhow::Result<()> {
|
||||
match format {
|
||||
CliFormat::Table => {
|
||||
let mut table = Table::new();
|
||||
table
|
||||
.load_preset(comfy_table::presets::UTF8_FULL)
|
||||
.set_header(
|
||||
T::header()
|
||||
.into_iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
for item in items {
|
||||
table.add_row(item.row());
|
||||
}
|
||||
println!("{table}");
|
||||
}
|
||||
CliFormat::Json => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&items)
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
trait PrintTable {
|
||||
fn header() -> &'static [&'static str];
|
||||
fn row(self) -> Vec<Cell>;
|
||||
}
|
||||
|
||||
fn parse_wildcards(items: &[String]) -> Vec<Wildcard<'_>> {
|
||||
items
|
||||
.iter()
|
||||
.flat_map(|i| {
|
||||
Wildcard::new(i.as_bytes()).inspect_err(|e| {
|
||||
warn!("Failed to parse wildcard: {i} | {e:?}")
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn matches_wildcards(
|
||||
wildcards: &[Wildcard<'_>],
|
||||
items: &[&str],
|
||||
) -> bool {
|
||||
if wildcards.is_empty() {
|
||||
return true;
|
||||
}
|
||||
items.iter().any(|item| {
|
||||
wildcards.iter().any(|wc| wc.is_match(item.as_bytes()))
|
||||
})
|
||||
}
|
||||
|
||||
fn format_timetamp(ts: i64) -> anyhow::Result<String> {
|
||||
let ts = chrono::Local
|
||||
.timestamp_millis_opt(ts)
|
||||
.single()
|
||||
.context("Invalid ts")?
|
||||
.format("%m/%d %H:%M:%S")
|
||||
.to_string();
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
// fn text_link(link: &str, text: &str) -> String {
|
||||
// format!("\x1b]8;;{link}\x07{text}\x1b]8;;\x07")
|
||||
// }
|
||||
43
bin/cli/src/command/update/mod.rs
Normal file
43
bin/cli/src/command/update/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use komodo_client::entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateCommand,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
};
|
||||
|
||||
mod resource;
|
||||
mod user;
|
||||
mod variable;
|
||||
|
||||
pub async fn handle(command: &UpdateCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateCommand::Build(update) => {
|
||||
resource::update::<PartialBuildConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Deployment(update) => {
|
||||
resource::update::<PartialDeploymentConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Repo(update) => {
|
||||
resource::update::<PartialRepoConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Server(update) => {
|
||||
resource::update::<PartialServerConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Stack(update) => {
|
||||
resource::update::<PartialStackConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Sync(update) => {
|
||||
resource::update::<PartialResourceSyncConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Variable {
|
||||
name,
|
||||
value,
|
||||
secret,
|
||||
yes,
|
||||
} => variable::update(name, value, *secret, *yes).await,
|
||||
UpdateCommand::User { username, command } => {
|
||||
user::update(username, command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
152
bin/cli/src/command/update/resource.rs
Normal file
152
bin/cli/src/command/update/resource.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
UpdateBuild, UpdateDeployment, UpdateRepo, UpdateResourceSync,
|
||||
UpdateServer, UpdateStack,
|
||||
},
|
||||
entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateResource,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
},
|
||||
};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
|
||||
pub async fn update<
|
||||
T: std::fmt::Debug + Serialize + DeserializeOwned + ResourceUpdate,
|
||||
>(
|
||||
UpdateResource {
|
||||
resource,
|
||||
update,
|
||||
yes,
|
||||
}: &UpdateResource,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update {}\n", "Mode".dimmed(), T::resource_type());
|
||||
println!(" - {}: {resource}", "Name".dimmed());
|
||||
|
||||
let config = serde_qs::from_str::<T>(update)
|
||||
.context("Failed to deserialize config")?;
|
||||
|
||||
match serde_json::to_string_pretty(&config) {
|
||||
Ok(config) => {
|
||||
println!(" - {}: {config}", "Update".dimmed());
|
||||
}
|
||||
Err(_) => {
|
||||
println!(" - {}: {config:#?}", "Update".dimmed());
|
||||
}
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update resource", *yes)?;
|
||||
|
||||
config.apply(resource).await
|
||||
}
|
||||
|
||||
pub trait ResourceUpdate {
|
||||
fn resource_type() -> &'static str;
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialBuildConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Build"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateBuild {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update build config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialDeploymentConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Deployment"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateDeployment {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update deployment config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialRepoConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Repo"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateRepo {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update repo config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialServerConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Server"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateServer {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update server config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialStackConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Stack"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateStack {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update stack config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialResourceSyncConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Sync"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateResourceSync {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update sync config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
122
bin/cli/src/command/update/user.rs
Normal file
122
bin/cli/src/command/update/user.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
cli::args::{CliEnabled, update::UpdateUserCommand},
|
||||
empty_or_redacted,
|
||||
},
|
||||
optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn update(
|
||||
username: &str,
|
||||
command: &UpdateUserCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateUserCommand::Password {
|
||||
password,
|
||||
unsanitized,
|
||||
yes,
|
||||
} => {
|
||||
update_password(username, password, *unsanitized, *yes).await
|
||||
}
|
||||
UpdateUserCommand::SuperAdmin { enabled, yes } => {
|
||||
update_super_admin(username, *enabled, *yes).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_password(
|
||||
username: &str,
|
||||
password: &str,
|
||||
unsanitized: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Password\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
if unsanitized {
|
||||
println!(" - {}: {password}", "Password".dimmed());
|
||||
} else {
|
||||
println!(
|
||||
" - {}: {}",
|
||||
"Password".dimmed(),
|
||||
empty_or_redacted(password)
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update password", yes)?;
|
||||
|
||||
info!("Updating password...");
|
||||
|
||||
let db = database::Client::new(&cli_config().database).await?;
|
||||
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
db.set_user_password(&user, password).await?;
|
||||
|
||||
info!("Password updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_super_admin(
|
||||
username: &str,
|
||||
super_admin: CliEnabled,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!("\n{}: Update Super Admin\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
println!(" - {}: {super_admin}\n", "Super Admin".dimmed());
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("update super admin", yes)?;
|
||||
|
||||
info!("Updating super admin...");
|
||||
|
||||
let db = database::Client::new(&config.database).await?;
|
||||
|
||||
// Make sure the user exists first before saying it is successful.
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
let super_admin: bool = super_admin.into();
|
||||
db.users
|
||||
.update_one(
|
||||
doc! { "username": user.username },
|
||||
doc! { "$set": { "super_admin": super_admin } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update user super admin on db")?;
|
||||
|
||||
info!("Super admin updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
70
bin/cli/src/command/update/variable.rs
Normal file
70
bin/cli/src/command/update/variable.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::api::{
|
||||
read::GetVariable,
|
||||
write::{
|
||||
CreateVariable, UpdateVariableIsSecret, UpdateVariableValue,
|
||||
},
|
||||
};
|
||||
|
||||
pub async fn update(
|
||||
name: &str,
|
||||
value: &str,
|
||||
secret: Option<bool>,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Variable\n", "Mode".dimmed());
|
||||
println!(" - {}: {name}", "Name".dimmed());
|
||||
println!(" - {}: {value}", "Value".dimmed());
|
||||
if let Some(secret) = secret {
|
||||
println!(" - {}: {secret}", "Is Secret".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update variable", yes)?;
|
||||
|
||||
let client = crate::command::komodo_client().await?;
|
||||
|
||||
let Ok(existing) = client
|
||||
.read(GetVariable {
|
||||
name: name.to_string(),
|
||||
})
|
||||
.await
|
||||
else {
|
||||
// Create the variable
|
||||
client
|
||||
.write(CreateVariable {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
is_secret: secret.unwrap_or_default(),
|
||||
description: Default::default(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to create variable")?;
|
||||
info!("Variable created ✅");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
client
|
||||
.write(UpdateVariableValue {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'value'")?;
|
||||
info!("Variable 'value' updated ✅");
|
||||
|
||||
let Some(secret) = secret else { return Ok(()) };
|
||||
|
||||
if secret != existing.is_secret {
|
||||
client
|
||||
.write(UpdateVariableIsSecret {
|
||||
name: name.to_string(),
|
||||
is_secret: secret,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'is_secret'")?;
|
||||
info!("Variable 'is_secret' updated to {secret} ✅");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
271
bin/cli/src/config.rs
Normal file
271
bin/cli/src/config.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
use std::{path::PathBuf, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use colored::Colorize;
|
||||
use environment_file::maybe_read_item_from_file;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
DatabaseConfig,
|
||||
cli::{
|
||||
CliConfig, Env,
|
||||
args::{CliArgs, Command, Execute, database::DatabaseCommand},
|
||||
},
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
|
||||
pub fn cli_args() -> &'static CliArgs {
|
||||
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn cli_env() -> &'static Env {
|
||||
static CLI_ARGS: OnceLock<Env> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(|| {
|
||||
match envy::from_env()
|
||||
.context("Failed to parse Komodo CLI environment")
|
||||
{
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}");
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn cli_config() -> &'static CliConfig {
|
||||
static CLI_CONFIG: OnceLock<CliConfig> = OnceLock::new();
|
||||
CLI_CONFIG.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let env = cli_env().clone();
|
||||
let config_paths = args
|
||||
.config_path
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_paths);
|
||||
let debug_startup =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: Komodo CLI version: {}",
|
||||
"DEBUG".cyan(),
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
println!(
|
||||
"{}: {}: {config_paths:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config Paths".dimmed(),
|
||||
);
|
||||
}
|
||||
|
||||
let config_keywords = args
|
||||
.config_keyword
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_keywords);
|
||||
let config_keywords = config_keywords
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: {}: {config_keywords:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config File Keywords".dimmed(),
|
||||
);
|
||||
}
|
||||
let mut unparsed_config = (config::ConfigLoader {
|
||||
paths: &config_paths
|
||||
.iter()
|
||||
.map(PathBuf::as_path)
|
||||
.collect::<Vec<_>>(),
|
||||
match_wildcards: &config_keywords,
|
||||
include_file_name: ".kminclude",
|
||||
merge_nested: env.komodo_cli_merge_nested_config,
|
||||
extend_array: env.komodo_cli_extend_config_arrays,
|
||||
debug_print: debug_startup,
|
||||
})
|
||||
.load::<serde_json::Map<String, serde_json::Value>>()
|
||||
.expect("failed at parsing config from paths");
|
||||
let init_parsed_config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config.clone()),
|
||||
)
|
||||
.context("Failed to parse config")
|
||||
.unwrap();
|
||||
|
||||
let (host, key, secret) = match &args.command {
|
||||
Command::Execute(Execute {
|
||||
host, key, secret, ..
|
||||
}) => (host.clone(), key.clone(), secret.clone()),
|
||||
_ => (None, None, None),
|
||||
};
|
||||
|
||||
let backups_folder = match &args.command {
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Backup { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Restore { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
_ => None,
|
||||
};
|
||||
let (uri, address, username, password, db_name) =
|
||||
match &args.command {
|
||||
Command::Database {
|
||||
command:
|
||||
DatabaseCommand::Copy {
|
||||
uri,
|
||||
address,
|
||||
username,
|
||||
password,
|
||||
db_name,
|
||||
..
|
||||
},
|
||||
} => (
|
||||
uri.clone(),
|
||||
address.clone(),
|
||||
username.clone(),
|
||||
password.clone(),
|
||||
db_name.clone(),
|
||||
),
|
||||
_ => (None, None, None, None, None),
|
||||
};
|
||||
|
||||
let profile = args
|
||||
.profile
|
||||
.as_ref()
|
||||
.or(init_parsed_config.default_profile.as_ref());
|
||||
|
||||
let unparsed_config = if let Some(profile) = profile
|
||||
&& !profile.is_empty()
|
||||
{
|
||||
// Find the profile config,
|
||||
// then merge it with the Default config.
|
||||
let serde_json::Value::Array(profiles) = unparsed_config
|
||||
.remove("profile")
|
||||
.context("Config has no profiles, but a profile is required")
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("`config.profile` is not array");
|
||||
};
|
||||
let Some(profile_config) = profiles.into_iter().find(|p| {
|
||||
let Ok(parsed) =
|
||||
serde_json::from_value::<CliConfig>(p.clone())
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
&parsed.config_profile == profile
|
||||
|| parsed
|
||||
.config_aliases
|
||||
.iter()
|
||||
.any(|alias| alias == profile)
|
||||
}) else {
|
||||
panic!("No profile matching '{profile}' was found.");
|
||||
};
|
||||
let serde_json::Value::Object(profile_config) = profile_config
|
||||
else {
|
||||
panic!("Profile config is not Object type.");
|
||||
};
|
||||
config::merge_config(
|
||||
unparsed_config,
|
||||
profile_config.clone(),
|
||||
env.komodo_cli_merge_nested_config,
|
||||
env.komodo_cli_extend_config_arrays,
|
||||
)
|
||||
.unwrap_or(profile_config)
|
||||
} else {
|
||||
unparsed_config
|
||||
};
|
||||
let config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config),
|
||||
)
|
||||
.context("Failed to parse final config")
|
||||
.unwrap();
|
||||
let config_profile = if config.config_profile.is_empty() {
|
||||
String::from("None")
|
||||
} else {
|
||||
config.config_profile
|
||||
};
|
||||
|
||||
CliConfig {
|
||||
config_profile,
|
||||
config_aliases: config.config_aliases,
|
||||
default_profile: config.default_profile,
|
||||
host: host
|
||||
.or(env.komodo_cli_host)
|
||||
.or(env.komodo_host)
|
||||
.unwrap_or(config.host),
|
||||
cli_key: key.or(env.komodo_cli_key).or(config.cli_key),
|
||||
cli_secret: secret
|
||||
.or(env.komodo_cli_secret)
|
||||
.or(config.cli_secret),
|
||||
backups_folder: backups_folder
|
||||
.or(env.komodo_cli_backups_folder)
|
||||
.unwrap_or(config.backups_folder),
|
||||
max_backups: env
|
||||
.komodo_cli_max_backups
|
||||
.unwrap_or(config.max_backups),
|
||||
database_target: DatabaseConfig {
|
||||
uri: uri
|
||||
.or(env.komodo_cli_database_target_uri)
|
||||
.unwrap_or(config.database_target.uri),
|
||||
address: address
|
||||
.or(env.komodo_cli_database_target_address)
|
||||
.unwrap_or(config.database_target.address),
|
||||
username: username
|
||||
.or(env.komodo_cli_database_target_username)
|
||||
.unwrap_or(config.database_target.username),
|
||||
password: password
|
||||
.or(env.komodo_cli_database_target_password)
|
||||
.unwrap_or(config.database_target.password),
|
||||
db_name: db_name
|
||||
.or(env.komodo_cli_database_target_db_name)
|
||||
.unwrap_or(config.database_target.db_name),
|
||||
app_name: config.database_target.app_name,
|
||||
},
|
||||
database: DatabaseConfig {
|
||||
uri: maybe_read_item_from_file(
|
||||
env.komodo_database_uri_file,
|
||||
env.komodo_database_uri,
|
||||
)
|
||||
.unwrap_or(config.database.uri),
|
||||
address: env
|
||||
.komodo_database_address
|
||||
.unwrap_or(config.database.address),
|
||||
username: maybe_read_item_from_file(
|
||||
env.komodo_database_username_file,
|
||||
env.komodo_database_username,
|
||||
)
|
||||
.unwrap_or(config.database.username),
|
||||
password: maybe_read_item_from_file(
|
||||
env.komodo_database_password_file,
|
||||
env.komodo_database_password,
|
||||
)
|
||||
.unwrap_or(config.database.password),
|
||||
db_name: env
|
||||
.komodo_database_db_name
|
||||
.unwrap_or(config.database.db_name),
|
||||
app_name: config.database.app_name,
|
||||
},
|
||||
cli_logging: LogConfig {
|
||||
level: env
|
||||
.komodo_cli_logging_level
|
||||
.unwrap_or(config.cli_logging.level),
|
||||
stdio: env
|
||||
.komodo_cli_logging_stdio
|
||||
.unwrap_or(config.cli_logging.stdio),
|
||||
pretty: env
|
||||
.komodo_cli_logging_pretty
|
||||
.unwrap_or(config.cli_logging.pretty),
|
||||
location: false,
|
||||
otlp_endpoint: env
|
||||
.komodo_cli_logging_otlp_endpoint
|
||||
.unwrap_or(config.cli_logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.komodo_cli_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_service_name),
|
||||
},
|
||||
profile: config.profile,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
|
||||
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,32 +1,72 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use colored::Colorize;
|
||||
use komodo_client::api::read::GetVersion;
|
||||
use anyhow::Context;
|
||||
use komodo_client::entities::config::cli::args;
|
||||
|
||||
mod args;
|
||||
mod exec;
|
||||
mod helpers;
|
||||
mod state;
|
||||
use crate::config::cli_config;
|
||||
|
||||
mod command;
|
||||
mod config;
|
||||
|
||||
async fn app() -> anyhow::Result<()> {
|
||||
dotenvy::dotenv().ok();
|
||||
logger::init(&config::cli_config().cli_logging)?;
|
||||
let args = config::cli_args();
|
||||
let env = config::cli_env();
|
||||
let debug_load =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
match &args.command {
|
||||
args::Command::Config {
|
||||
all_profiles,
|
||||
unsanitized,
|
||||
} => {
|
||||
let mut config = if *unsanitized {
|
||||
cli_config().clone()
|
||||
} else {
|
||||
cli_config().sanitized()
|
||||
};
|
||||
if !*all_profiles {
|
||||
config.profile = Default::default();
|
||||
}
|
||||
if debug_load {
|
||||
println!("\n{config:#?}");
|
||||
} else {
|
||||
println!(
|
||||
"\nCLI Config {}",
|
||||
serde_json::to_string_pretty(&config)
|
||||
.context("Failed to serialize config for pretty print")?
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
args::Command::Container(container) => {
|
||||
command::container::handle(container).await
|
||||
}
|
||||
args::Command::Inspect(inspect) => {
|
||||
command::container::inspect_container(inspect).await
|
||||
}
|
||||
args::Command::List(list) => command::list::handle(list).await,
|
||||
args::Command::Execute(args) => {
|
||||
command::execute::handle(&args.execution, args.yes).await
|
||||
}
|
||||
args::Command::Update { command } => {
|
||||
command::update::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
info!(
|
||||
"Komodo CLI version: {}",
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
|
||||
let version =
|
||||
state::komodo_client().read(GetVersion {}).await?.version;
|
||||
info!("Komodo Core version: {}", version.blue().bold());
|
||||
|
||||
match &state::cli_args().command {
|
||||
args::Command::Execute { execution } => {
|
||||
exec::run(execution.to_owned()).await?
|
||||
}
|
||||
let mut term_signal = tokio::signal::unix::signal(
|
||||
tokio::signal::unix::SignalKind::terminate(),
|
||||
)?;
|
||||
tokio::select! {
|
||||
res = tokio::spawn(app()) => res?,
|
||||
_ = term_signal.recv() => Ok(()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use clap::Parser;
|
||||
use komodo_client::KomodoClient;
|
||||
use merge_config_files::parse_config_file;
|
||||
|
||||
pub fn cli_args() -> &'static crate::args::CliArgs {
|
||||
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn komodo_client() -> &'static KomodoClient {
|
||||
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
|
||||
KOMODO_CLIENT.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let crate::args::CredsFile { url, key, secret } =
|
||||
match (&args.url, &args.key, &args.secret) {
|
||||
(Some(url), Some(key), Some(secret)) => {
|
||||
crate::args::CredsFile {
|
||||
url: url.clone(),
|
||||
key: key.clone(),
|
||||
secret: secret.clone(),
|
||||
}
|
||||
}
|
||||
(url, key, secret) => {
|
||||
let mut creds: crate::args::CredsFile =
|
||||
parse_config_file(cli_args().creds.as_str())
|
||||
.expect("failed to parse Komodo credentials");
|
||||
|
||||
if let Some(url) = url {
|
||||
creds.url.clone_from(url);
|
||||
}
|
||||
if let Some(key) = key {
|
||||
creds.key.clone_from(key);
|
||||
}
|
||||
if let Some(secret) = secret {
|
||||
creds.secret.clone_from(secret);
|
||||
}
|
||||
|
||||
creds
|
||||
}
|
||||
};
|
||||
futures::executor::block_on(
|
||||
KomodoClient::new(url, key, secret).with_healthcheck(),
|
||||
)
|
||||
.expect("failed to initialize Komodo client")
|
||||
})
|
||||
}
|
||||
@@ -20,21 +20,20 @@ periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
formatting.workspace = true
|
||||
database.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
cache.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
merge_config_files.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
derive_variants.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
toml_pretty.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
@@ -51,13 +50,14 @@ tokio-util.workspace = true
|
||||
axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yaml_ng.workspace = true
|
||||
typeshare.workspace = true
|
||||
chrono-tz.workspace = true
|
||||
indexmap.workspace = true
|
||||
octorust.workspace = true
|
||||
wildcard.workspace = true
|
||||
arc-swap.workspace = true
|
||||
colored.workspace = true
|
||||
dashmap.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.87.0-bullseye AS core-builder
|
||||
FROM rust:1.88.0-bullseye AS core-builder
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -9,9 +9,11 @@ COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_core --release
|
||||
RUN cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_cli --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
@@ -24,7 +26,7 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /config/starship.toml
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
@@ -32,9 +34,10 @@ RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
WORKDIR /app
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=core-builder /builder/target/release/core /usr/local/bin/core
|
||||
COPY --from=core-builder /builder/target/release/km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -46,9 +49,13 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
ENTRYPOINT [ "core" ]
|
||||
|
||||
@@ -3,12 +3,12 @@
|
||||
## Core deps installer
|
||||
|
||||
apt-get update
|
||||
apt-get install -y git curl ca-certificates
|
||||
apt-get install -y git curl ca-certificates iproute2
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Starship prompt
|
||||
curl -sS https://starship.rs/install.sh | sh -s -- --yes --bin-dir /usr/local/bin
|
||||
echo 'export STARSHIP_CONFIG=/config/starship.toml' >> /root/.bashrc
|
||||
echo 'export STARSHIP_CONFIG=/starship.toml' >> /root/.bashrc
|
||||
echo 'eval "$(starship init bash)"' >> /root/.bashrc
|
||||
|
||||
|
||||
@@ -15,20 +15,26 @@ FROM ${FRONTEND_IMAGE} AS frontend
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /config/starship.toml
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /core /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /core /app/arch/linux/arm64
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/arch
|
||||
|
||||
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /core /app/core/linux/amd64
|
||||
COPY --from=aarch64 /core /app/core/linux/arm64
|
||||
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
|
||||
|
||||
# Same for util
|
||||
COPY --from=x86_64 /km /app/km/linux/amd64
|
||||
COPY --from=aarch64 /km /app/km/linux/arm64
|
||||
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
|
||||
|
||||
# Copy default config / static frontend / deno binary
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend /frontend /app/frontend
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
@@ -41,9 +47,13 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "core" ]
|
||||
@@ -16,14 +16,15 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /config/starship.toml
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=binaries /core /usr/local/bin/core
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -35,9 +36,13 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "core" ]
|
||||
@@ -229,8 +229,7 @@ pub async fn send_alert(
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {}",
|
||||
sanitized_error
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
@@ -11,7 +12,6 @@ use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
stack::StackState,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
@@ -188,8 +188,7 @@ async fn send_custom_alert(
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with request: {}",
|
||||
sanitized_error
|
||||
"Error with request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
.context("failed at post request to alerter")?;
|
||||
@@ -245,35 +244,9 @@ fn resource_link(
|
||||
resource_type: ResourceTargetVariant,
|
||||
id: &str,
|
||||
) -> String {
|
||||
let path = match resource_type {
|
||||
ResourceTargetVariant::System => unreachable!(),
|
||||
ResourceTargetVariant::Build => format!("/builds/{id}"),
|
||||
ResourceTargetVariant::Builder => {
|
||||
format!("/builders/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
format!("/deployments/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Stack => {
|
||||
format!("/stacks/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Server => {
|
||||
format!("/servers/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Repo => format!("/repos/{id}"),
|
||||
ResourceTargetVariant::Alerter => {
|
||||
format!("/alerters/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
format!("/procedures/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Action => {
|
||||
format!("/actions/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}{path}", core_config().host)
|
||||
komodo_client::entities::resource_link(
|
||||
&core_config().host,
|
||||
resource_type,
|
||||
id,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -13,8 +13,7 @@ pub async fn send_alert(
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {} is working\n{link}",
|
||||
name,
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
@@ -27,19 +26,15 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {}{} is now reachable\n{link}",
|
||||
name, region
|
||||
)
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {:#?}", e))
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {}{} is unreachable ❌\n{link}{err}",
|
||||
name, region
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
@@ -54,8 +49,7 @@ pub async fn send_alert(
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
|
||||
name, region,
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
@@ -69,8 +63,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region,
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
@@ -85,8 +78,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region, path,
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
@@ -100,8 +92,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
@@ -113,8 +104,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
@@ -126,8 +116,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
@@ -141,8 +130,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
@@ -155,8 +143,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
|
||||
name, server_name, service, image,
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
@@ -171,8 +158,7 @@ pub async fn send_alert(
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
|
||||
name, server_name, images_label, images_str,
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
@@ -180,28 +166,25 @@ pub async fn send_alert(
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
|
||||
instance_id, message,
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {}\n{link}",
|
||||
name,
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {} failed\nversion: v{}\n{link}",
|
||||
name, version,
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {} failed\n{link}", name,)
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
@@ -254,8 +237,7 @@ async fn send_message(
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to ntfy | {} | failed to get response text",
|
||||
status
|
||||
"Failed to send message to ntfy | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
|
||||
@@ -12,8 +12,7 @@ pub async fn send_alert(
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {} is working\n{link}",
|
||||
name,
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
@@ -26,19 +25,15 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {}{} is now reachable\n{link}",
|
||||
name, region
|
||||
)
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {:#?}", e))
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {}{} is unreachable ❌\n{link}{err}",
|
||||
name, region
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
@@ -53,8 +48,7 @@ pub async fn send_alert(
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
|
||||
name, region,
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
@@ -68,8 +62,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region,
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
@@ -84,8 +77,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region, path,
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
@@ -99,8 +91,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
@@ -112,8 +103,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
@@ -125,8 +115,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
@@ -140,8 +129,7 @@ pub async fn send_alert(
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
@@ -154,8 +142,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
|
||||
name, server_name, service, image,
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
@@ -170,8 +157,7 @@ pub async fn send_alert(
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
|
||||
name, server_name, images_label, images_str,
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
@@ -179,16 +165,14 @@ pub async fn send_alert(
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
|
||||
instance_id, message,
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {}\n{link}",
|
||||
name,
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
@@ -199,7 +183,7 @@ pub async fn send_alert(
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {} failed\n{link}", name,)
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
@@ -252,8 +236,7 @@ async fn send_message(
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to pushover | {} | failed to get response text",
|
||||
status
|
||||
"Failed to send message to pushover | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
|
||||
@@ -450,8 +450,7 @@ pub async fn send_alert(
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {}",
|
||||
sanitized_error
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ pub fn router() -> Router {
|
||||
}
|
||||
|
||||
if google_oauth_client().is_some() {
|
||||
info!("🔑 Github Login Enabled");
|
||||
info!("🔑 Google Login Enabled");
|
||||
router = router.nest("/google", google::router())
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,10 @@ use std::{
|
||||
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_command;
|
||||
use config::merge_objects;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
@@ -14,6 +18,7 @@ use komodo_client::{
|
||||
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
|
||||
},
|
||||
entities::{
|
||||
FileFormat, JsonObject,
|
||||
action::Action,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
config::core::CoreConfig,
|
||||
@@ -22,8 +27,8 @@ use komodo_client::{
|
||||
update::Update,
|
||||
user::action_user,
|
||||
},
|
||||
parsers::parse_key_value_list,
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
@@ -46,7 +51,10 @@ use super::ExecuteArgs;
|
||||
impl super::BatchExecute for BatchRunAction {
|
||||
type Resource = Action;
|
||||
fn single_request(action: String) -> ExecuteRequest {
|
||||
ExecuteRequest::RunAction(RunAction { action })
|
||||
ExecuteRequest::RunAction(RunAction {
|
||||
action,
|
||||
args: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,6 +99,23 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let default_args = parse_action_arguments(
|
||||
&action.config.arguments,
|
||||
action.config.arguments_format,
|
||||
)
|
||||
.context("Failed to parse default Action arguments")?;
|
||||
|
||||
let args = merge_objects(
|
||||
default_args,
|
||||
self.args.unwrap_or_default(),
|
||||
true,
|
||||
true,
|
||||
)
|
||||
.context("Failed to merge request args with default args")?;
|
||||
|
||||
let args = serde_json::to_string(&args)
|
||||
.context("Failed to serialize action run arguments")?;
|
||||
|
||||
let CreateApiKeyResponse { key, secret } = CreateApiKey {
|
||||
name: update.id.clone(),
|
||||
expires: 0,
|
||||
@@ -103,7 +128,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let contents = &mut action.config.file_contents;
|
||||
|
||||
// Wrap the file contents in the execution context.
|
||||
*contents = full_contents(contents, &key, &secret);
|
||||
*contents = full_contents(contents, &args, &key, &secret);
|
||||
|
||||
let replacers =
|
||||
interpolate(contents, &mut update, key.clone(), secret.clone())
|
||||
@@ -179,7 +204,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -236,7 +261,13 @@ async fn interpolate(
|
||||
Ok(interpolator.secret_replacers)
|
||||
}
|
||||
|
||||
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
|
||||
fn full_contents(
|
||||
contents: &str,
|
||||
// Pre-serialized to JSON string.
|
||||
args: &str,
|
||||
key: &str,
|
||||
secret: &str,
|
||||
) -> String {
|
||||
let CoreConfig {
|
||||
port, ssl_enabled, ..
|
||||
} = core_config();
|
||||
@@ -261,6 +292,8 @@ const TOML = {{
|
||||
parseCargoToml: __TOML__.parse,
|
||||
}}
|
||||
|
||||
const ARGS = {args};
|
||||
|
||||
const komodo = KomodoClient('{base_url}', {{
|
||||
type: 'api-key',
|
||||
params: {{ key: '{key}', secret: '{secret}' }}
|
||||
@@ -366,3 +399,25 @@ fn delete_file(
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_action_arguments(
|
||||
args: &str,
|
||||
format: FileFormat,
|
||||
) -> anyhow::Result<JsonObject> {
|
||||
match format {
|
||||
FileFormat::KeyValue => {
|
||||
let args = parse_key_value_list(args)
|
||||
.context("Failed to parse args as key value list")?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, serde_json::Value::String(v)))
|
||||
.collect();
|
||||
Ok(args)
|
||||
}
|
||||
FileFormat::Toml => toml::from_str(args)
|
||||
.context("Failed to parse Toml to Action args"),
|
||||
FileFormat::Yaml => serde_yaml_ng::from_str(args)
|
||||
.context("Failed to parse Yaml to action args"),
|
||||
FileFormat::Json => serde_json::from_str(args)
|
||||
.context("Failed to parse Json to action args"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
use std::{future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, to_bson, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
@@ -22,14 +30,6 @@ use komodo_client::{
|
||||
user::auto_redeploy_user,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, to_bson, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -352,7 +352,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -408,7 +408,7 @@ async fn handle_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
use std::{pin::Pin, time::Instant};
|
||||
use std::{pin::Pin, sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use axum_extra::{TypedHeader, headers::ContentType};
|
||||
use database::mungos::by_id::find_one_by_id;
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
@@ -17,17 +18,19 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::by_id::find_one_by_id;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use serror::{AddStatusCodeError, Json};
|
||||
use tokio::sync::Mutex;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
config::core_config,
|
||||
helpers::update::{init_execution_update, update_update},
|
||||
resource::{KomodoResource, list_full_for_user_using_pattern},
|
||||
state::db_client,
|
||||
@@ -141,6 +144,9 @@ pub enum ExecuteRequest {
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== MAINTENANCE ====
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -195,8 +201,10 @@ pub fn inner_handler(
|
||||
Box::pin(async move {
|
||||
let req_id = Uuid::new_v4();
|
||||
|
||||
// need to validate no cancel is active before any update is created.
|
||||
// Need to validate no cancel is active before any update is created.
|
||||
// This ensures no double update created if Cancel is called more than once for the same request.
|
||||
build::validate_cancel_build(&request).await?;
|
||||
repo::validate_cancel_repo_build(&request).await?;
|
||||
|
||||
let update = init_execution_update(&request, &user).await?;
|
||||
|
||||
@@ -326,3 +334,71 @@ async fn batch_execute<E: BatchExecute>(
|
||||
});
|
||||
Ok(join_all(futures).await)
|
||||
}
|
||||
|
||||
fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
static CLEAR_REPO_CACHE_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
CLEAR_REPO_CACHE_LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(name = "ClearRepoCache", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = clear_repo_cache_lock()
|
||||
.try_lock()
|
||||
.context("Clear already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let mut contents =
|
||||
tokio::fs::read_dir(&core_config().repo_directory)
|
||||
.await
|
||||
.context("Failed to read repo cache directory")?;
|
||||
|
||||
loop {
|
||||
let path = match contents
|
||||
.next_entry()
|
||||
.await
|
||||
.context("Failed to read contents at path")
|
||||
{
|
||||
Ok(Some(contents)) => contents.path(),
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Read Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if path.is_dir() {
|
||||
match tokio::fs::remove_dir_all(&path)
|
||||
.await
|
||||
.context("Failed to clear contents at path")
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clear Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use formatting::{Color, bold, colored, format_serror, muted};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
@@ -14,7 +17,6 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -134,7 +136,7 @@ fn resolve_inner(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
@@ -15,13 +22,6 @@ use komodo_client::{
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -287,7 +287,7 @@ async fn handle_repo_update_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -520,7 +520,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -569,7 +569,7 @@ async fn handle_builder_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::bson::{doc, to_document};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
@@ -11,7 +12,6 @@ use komodo_client::{
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, to_document};
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -123,10 +123,10 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
if !repo.config.skip_secret_interp {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
if let Some(repo) = repo.as_mut()
|
||||
&& !repo.config.skip_secret_interp
|
||||
{
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
@@ -378,16 +378,16 @@ pub async fn pull_stack_inner(
|
||||
mut repo: Option<Repo>,
|
||||
mut update: Option<&mut Update>,
|
||||
) -> anyhow::Result<ComposePullResponse> {
|
||||
if let Some(update) = update.as_mut() {
|
||||
if !services.is_empty() {
|
||||
update.logs.push(Log::simple(
|
||||
"Service/s",
|
||||
format!(
|
||||
"Execution requested for Stack service/s {}",
|
||||
services.join(", ")
|
||||
),
|
||||
))
|
||||
}
|
||||
if let Some(update) = update.as_mut()
|
||||
&& !services.is_empty()
|
||||
{
|
||||
update.logs.push(Log::simple(
|
||||
"Service/s",
|
||||
format!(
|
||||
"Execution requested for Stack service/s {}",
|
||||
services.join(", ")
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
let git_token = stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
@@ -408,10 +408,10 @@ pub async fn pull_stack_inner(
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
if !repo.config.skip_secret_interp {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
if let Some(repo) = repo.as_mut()
|
||||
&& !repo.config.skip_secret_interp
|
||||
{
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
if let Some(update) = update {
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use formatting::{Color, colored, format_serror};
|
||||
use komodo_client::{
|
||||
api::{execute::RunSync, write::RefreshResourceSyncPending},
|
||||
@@ -22,8 +26,6 @@ use komodo_client::{
|
||||
user::sync_user,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
@@ -8,11 +13,6 @@ use komodo_client::{
|
||||
sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -6,8 +8,6 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -2,6 +2,10 @@ use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
@@ -13,10 +17,6 @@ use komodo_client::{
|
||||
update::UpdateStatus,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -6,8 +8,6 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -290,6 +290,7 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
enable_fancy_toml: config.enable_fancy_toml,
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetPermission, GetPermissionResponse, ListPermissions,
|
||||
@@ -7,7 +8,6 @@ use komodo_client::{
|
||||
},
|
||||
entities::permission::PermissionLevel,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::{Document, doc};
|
||||
use mungos::{
|
||||
use database::mongo_indexed::{Document, doc};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id, find::find_collect,
|
||||
mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -8,6 +8,10 @@ use anyhow::{Context, anyhow};
|
||||
use async_timing_util::{
|
||||
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
|
||||
};
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -32,10 +36,6 @@ use komodo_client::{
|
||||
update::Log,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::tag::Tag,
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_tag, state::db_client};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
@@ -13,7 +14,6 @@ use komodo_client::{
|
||||
sync::ResourceSync, toml::ResourcesToml, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
@@ -20,11 +25,6 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
|
||||
@@ -8,11 +13,6 @@ use komodo_client::{
|
||||
},
|
||||
entities::user::{UserConfig, admin_service_user},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_user, state::db_client};
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::api::read::*;
|
||||
use mungos::{
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
options::FindOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_variable, state::db_client};
|
||||
|
||||
@@ -4,13 +4,15 @@ use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
Extension, Json, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_bson,
|
||||
};
|
||||
use derive_variants::EnumVariants;
|
||||
use komodo_client::{
|
||||
api::user::*,
|
||||
entities::{api_key::ApiKey, komodo_timestamp, user::User},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -116,7 +118,7 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(update),
|
||||
database::mungos::update::Update::Set(update),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
@@ -141,7 +143,7 @@ impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
database::mungos::update::Update::Set(doc! {
|
||||
"last_update_view": komodo_timestamp()
|
||||
}),
|
||||
None,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::{path::PathBuf, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::mongodb::bson::to_document;
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
@@ -16,8 +18,6 @@ use komodo_client::{
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::mongodb::bson::to_document;
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -15,7 +16,6 @@ use komodo_client::{
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use periphery_client::api::{self, container::InspectContainer};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -227,7 +227,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
update_one_by_id(
|
||||
&db_client().deployments,
|
||||
&deployment.id,
|
||||
mungos::update::Update::Set(
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId, to_bson},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -8,13 +15,6 @@ use komodo_client::{
|
||||
permission::{UserTarget, UserTargetVariant},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId, to_bson},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_user, state::db_client};
|
||||
@@ -107,7 +107,7 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user_id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -6,10 +10,6 @@ use komodo_client::{
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -90,22 +90,22 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty domain")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty domain")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty username")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty username")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure update does not change id
|
||||
@@ -283,26 +283,26 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
self.account.id = None;
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
@@ -13,8 +17,6 @@ use komodo_client::{
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
@@ -117,7 +119,7 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
update_one_by_id(
|
||||
&db_client().repos,
|
||||
&repo.id,
|
||||
mungos::update::Update::Set(
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::{user::CreateApiKey, write::*},
|
||||
entities::{
|
||||
@@ -8,10 +12,6 @@ use komodo_client::{
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{api::user::UserArgs, state::db_client};
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, to_document};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
FileContents, NoData, Operation,
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
@@ -13,13 +17,12 @@ use komodo_client::{
|
||||
user::stack_user,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, to_document};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
|
||||
WriteCommitComposeContents, WriteComposeContentsToHost,
|
||||
WriteComposeContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -35,7 +38,6 @@ use crate::{
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::{
|
||||
get_stack_and_server,
|
||||
remote::{RemoteComposeContents, get_repo_compose_contents},
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
@@ -114,24 +116,13 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
file_path,
|
||||
contents,
|
||||
} = self;
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if !stack.config.files_on_host
|
||||
&& stack.config.repo.is_empty()
|
||||
&& stack.config.linked_repo.is_empty()
|
||||
@@ -146,77 +137,231 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
|
||||
update.push_simple_log("File contents to write", &contents);
|
||||
|
||||
let stack_id = stack.id.clone();
|
||||
|
||||
if stack.config.files_on_host {
|
||||
match periphery_client(&server)?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
file_path,
|
||||
contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
let git_token =
|
||||
stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
match periphery_client(&server)?
|
||||
.request(WriteCommitComposeContents {
|
||||
stack,
|
||||
repo,
|
||||
username: Some(user.username.clone()),
|
||||
file_path,
|
||||
contents,
|
||||
git_token,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(res) => {
|
||||
update.logs.extend(res.logs);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if let Err(e) = (RefreshStackCache { stack: stack_id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
write_stack_file_contents_on_host(
|
||||
stack, file_path, contents, update,
|
||||
)
|
||||
{
|
||||
.await
|
||||
} else {
|
||||
write_stack_file_contents_git(
|
||||
stack,
|
||||
&file_path,
|
||||
&contents,
|
||||
&user.username,
|
||||
update,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_stack_file_contents_on_host(
|
||||
stack: Stack,
|
||||
file_path: String,
|
||||
contents: String,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
if stack.config.server_id.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Cannot write file, Files on host Stack has not configured a Server"
|
||||
).into());
|
||||
}
|
||||
let (server, state) =
|
||||
get_server_with_state(&stack.config.server_id).await?;
|
||||
if state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot write file when server is unreachable or disabled"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
match periphery_client(&server)?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
file_path,
|
||||
contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
// Finish with a cache refresh
|
||||
if let Err(e) = (RefreshStackCache { stack: stack.id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn write_stack_file_contents_git(
|
||||
mut stack: Stack,
|
||||
file_path: &str,
|
||||
contents: &str,
|
||||
username: &str,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = if !stack.config.linked_repo.is_empty() {
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let git_token = stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let mut repo_args: RepoExecutionArgs = if let Some(repo) = &repo {
|
||||
repo.into()
|
||||
} else {
|
||||
(&stack).into()
|
||||
};
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let file_path = stack
|
||||
.config
|
||||
.run_directory
|
||||
.parse::<PathBuf>()
|
||||
.context("Run directory is not a valid path")?
|
||||
.join(file_path);
|
||||
let full_path =
|
||||
root.join(&file_path).components().collect::<PathBuf>();
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize stack file parent directory {parent:?}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&repo_args,
|
||||
git_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
.await;
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
}
|
||||
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
git_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) = tokio::fs::write(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write compose file contents to {full_path:?}"
|
||||
)
|
||||
})
|
||||
{
|
||||
update.push_error_log("Write File", format_serror(&e.into()));
|
||||
} else {
|
||||
update.push_simple_log(
|
||||
"Write File",
|
||||
format!("File written to {full_path:?}"),
|
||||
);
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
let commit_res = git::commit_file(
|
||||
&format!("{username}: Write Stack File"),
|
||||
&root,
|
||||
&file_path,
|
||||
&stack.config.branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.extend(commit_res.logs);
|
||||
|
||||
// Finish with a cache refresh
|
||||
if let Err(e) = (RefreshStackCache { stack: stack.id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
|
||||
@@ -4,6 +4,10 @@ use std::{
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{read::ExportAllResourcesToToml, write::*},
|
||||
@@ -32,15 +36,10 @@ use komodo_client::{
|
||||
user::sync_user,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
@@ -206,15 +205,16 @@ async fn write_sync_file_contents_on_host(
|
||||
let full_path = root.join(&resource_path).join(&file_path);
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).await.with_context(|| {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize resource file parent directory {parent:?}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
if let Err(e) = tokio::fs::write(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write resource file contents to {full_path:?}"
|
||||
)
|
||||
@@ -265,29 +265,32 @@ async fn write_sync_file_contents_git(
|
||||
contents,
|
||||
} = req;
|
||||
|
||||
let mut clone_args: RepoExecutionArgs = if let Some(repo) = &repo {
|
||||
let mut repo_args: RepoExecutionArgs = if let Some(repo) = &repo {
|
||||
repo.into()
|
||||
} else {
|
||||
(&sync).into()
|
||||
};
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(root.display().to_string());
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
let git_token = if let Some(account) = &repo_args.account {
|
||||
git_token(&repo_args.provider, account, |https| repo_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let file_path =
|
||||
file_path.parse::<PathBuf>().context("Invalid file path")?;
|
||||
let resource_path = resource_path
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid resource path")?;
|
||||
file_path.parse::<PathBuf>().with_context(|| {
|
||||
format!("File path is not a valid path: {file_path}")
|
||||
})?;
|
||||
let resource_path =
|
||||
resource_path.parse::<PathBuf>().with_context(|| {
|
||||
format!("Resource path is not a valid path: {resource_path}")
|
||||
})?;
|
||||
let full_path = root
|
||||
.join(&resource_path)
|
||||
.join(&file_path)
|
||||
@@ -295,7 +298,7 @@ async fn write_sync_file_contents_git(
|
||||
.collect::<PathBuf>();
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).await.with_context(|| {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize resource file parent directory {parent:?}"
|
||||
)
|
||||
@@ -307,8 +310,8 @@ async fn write_sync_file_contents_git(
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
access_token.as_deref(),
|
||||
&repo_args,
|
||||
git_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
.await;
|
||||
@@ -322,9 +325,9 @@ async fn write_sync_file_contents_git(
|
||||
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
clone_args,
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
git_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
@@ -343,8 +346,9 @@ async fn write_sync_file_contents_git(
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
if let Err(e) = tokio::fs::write(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write resource file contents to {full_path:?}"
|
||||
)
|
||||
@@ -378,10 +382,14 @@ async fn write_sync_file_contents_git(
|
||||
if let Err(e) = (RefreshResourceSyncPending { sync: sync.name })
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh sync pending after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh sync pending",
|
||||
format_serror(&e.error.into()),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -480,7 +488,7 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
.join(to_path_compatible_name(&sync.name))
|
||||
.join(&resource_path);
|
||||
if let Some(parent) = file_path.parent() {
|
||||
fs::create_dir_all(parent)
|
||||
tokio::fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize resource file parent directory {parent:?}"))?;
|
||||
};
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{delete_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::{CreateTag, DeleteTag, RenameTag, UpdateTagColor},
|
||||
entities::{
|
||||
@@ -17,10 +21,6 @@ use komodo_client::{
|
||||
tag::{Tag, TagColor},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -46,6 +46,7 @@ impl Resolve<WriteArgs> for CreateTag {
|
||||
name: self.name,
|
||||
color: TagColor::Slate,
|
||||
owner: user.id.clone(),
|
||||
unused: false,
|
||||
};
|
||||
|
||||
tag.id = db_client()
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
DeleteUser, DeleteUserResponse, UpdateUserPassword,
|
||||
UpdateUserPasswordResponse, UpdateUserUsername,
|
||||
UpdateUserUsernameResponse,
|
||||
},
|
||||
entities::{NoData, user::UserConfig},
|
||||
entities::NoData,
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config, helpers::hash_password, state::db_client,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
@@ -78,25 +76,7 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
|
||||
);
|
||||
}
|
||||
}
|
||||
let UserConfig::Local { .. } = user.config else {
|
||||
return Err(anyhow!("User is not local user").into());
|
||||
};
|
||||
if self.password.is_empty() {
|
||||
return Err(anyhow!("Password cannot be empty.").into());
|
||||
}
|
||||
let id = ObjectId::from_str(&user.id)
|
||||
.context("User id not valid ObjectId.")?;
|
||||
let hashed_password = hash_password(self.password)?;
|
||||
db_client()
|
||||
.users
|
||||
.update_one(
|
||||
doc! { "_id": id },
|
||||
doc! { "$set": {
|
||||
"config.data.password": hashed_password
|
||||
} },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update user password on database.")?;
|
||||
db_client().set_user_password(user, &self.password).await?;
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{komodo_timestamp, user_group::UserGroup},
|
||||
};
|
||||
use mungos::{
|
||||
use database::mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{komodo_timestamp, user_group::UserGroup},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{Operation, ResourceTarget, variable::Variable},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
|
||||
@@ -2,12 +2,12 @@ use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
Router, extract::Query, response::Redirect, routing::get,
|
||||
};
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
@@ -134,7 +134,7 @@ async fn callback(
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
} else {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
format!("{redirect}{splitter}token={exchange_token}")
|
||||
};
|
||||
Ok(Redirect::to(&redirect_url))
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{
|
||||
Router, extract::Query, response::Redirect, routing::get,
|
||||
};
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::user::{User, UserConfig};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
@@ -148,7 +148,7 @@ async fn callback(
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
} else {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
format!("{redirect}{splitter}token={exchange_token}")
|
||||
};
|
||||
Ok(Redirect::to(&redirect_url))
|
||||
}
|
||||
|
||||
@@ -4,11 +4,11 @@ use anyhow::{Context, anyhow};
|
||||
use async_timing_util::{
|
||||
Timelength, get_timelength_in_ms, unix_timestamp_ms,
|
||||
};
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use jsonwebtoken::{
|
||||
DecodingKey, EncodingKey, Header, Validation, decode, encode,
|
||||
};
|
||||
use komodo_client::entities::config::core::CoreConfig;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
|
||||
@@ -2,6 +2,10 @@ use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use database::{
|
||||
hash_password,
|
||||
mungos::mongodb::bson::{Document, doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::auth::{
|
||||
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
|
||||
@@ -9,14 +13,11 @@ use komodo_client::{
|
||||
},
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::auth::AuthArgs,
|
||||
config::core_config,
|
||||
helpers::hash_password,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ use axum::{
|
||||
extract::Request, http::HeaderMap, middleware::Next,
|
||||
response::Response,
|
||||
};
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{komodo_timestamp, user::User};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
@@ -6,11 +6,11 @@ use axum::{
|
||||
};
|
||||
use client::oidc_client;
|
||||
use dashmap::DashMap;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use mungos::mongodb::bson::{Document, doc};
|
||||
use openidconnect::{
|
||||
AccessTokenHash, AuthorizationCode, CsrfToken,
|
||||
EmptyAdditionalClaims, Nonce, OAuth2TokenResponse,
|
||||
@@ -31,11 +31,15 @@ use super::RedirectQuery;
|
||||
|
||||
pub mod client;
|
||||
|
||||
static APP_USER_AGENT: &str =
|
||||
concat!("Komodo/", env!("CARGO_PKG_VERSION"),);
|
||||
|
||||
fn reqwest_client() -> &'static reqwest::Client {
|
||||
static REQWEST: OnceLock<reqwest::Client> = OnceLock::new();
|
||||
REQWEST.get_or_init(|| {
|
||||
reqwest::Client::builder()
|
||||
.redirect(reqwest::redirect::Policy::none())
|
||||
.user_agent(APP_USER_AGENT)
|
||||
.build()
|
||||
.expect("Invalid OIDC reqwest client")
|
||||
})
|
||||
@@ -312,7 +316,7 @@ async fn callback(
|
||||
let exchange_token = jwt_client().create_exchange_token(jwt).await;
|
||||
let redirect_url = if let Some(redirect) = redirect {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
format!("{redirect}{splitter}token={exchange_token}")
|
||||
} else {
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
};
|
||||
|
||||
@@ -1,36 +1,69 @@
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use config::ConfigLoader;
|
||||
use environment_file::{
|
||||
maybe_read_item_from_file, maybe_read_list_from_file,
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
config::core::{
|
||||
AwsCredentials, CoreConfig, DatabaseConfig, Env,
|
||||
GithubWebhookAppConfig, GithubWebhookAppInstallationConfig,
|
||||
OauthCredentials,
|
||||
config::{
|
||||
DatabaseConfig,
|
||||
core::{
|
||||
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
|
||||
GithubWebhookAppInstallationConfig, OauthCredentials,
|
||||
},
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
use merge_config_files::parse_config_file;
|
||||
|
||||
pub fn core_config() -> &'static CoreConfig {
|
||||
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
|
||||
CORE_CONFIG.get_or_init(|| {
|
||||
let env: Env = match envy::from_env()
|
||||
.context("failed to parse core Env") {
|
||||
.context("Failed to parse Komodo Core environment") {
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:#?}");
|
||||
panic!("{e:?}");
|
||||
}
|
||||
};
|
||||
let config_path = &env.komodo_config_path;
|
||||
let config =
|
||||
parse_config_file::<CoreConfig>(config_path.as_str())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("failed at parsing config at {config_path} | {e:#}")
|
||||
});
|
||||
let installations = match (maybe_read_list_from_file(env.komodo_github_webhook_app_installations_ids_file,env.komodo_github_webhook_app_installations_ids), env.komodo_github_webhook_app_installations_namespaces) {
|
||||
let config = if env.komodo_config_paths.is_empty() {
|
||||
println!(
|
||||
"{}: No config paths found, using default config",
|
||||
"INFO".green(),
|
||||
);
|
||||
CoreConfig::default()
|
||||
} else {
|
||||
let config_keywords = env.komodo_config_keywords
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
println!(
|
||||
"{}: {}: {config_keywords:?}",
|
||||
"INFO".green(),
|
||||
"Config File Keywords".dimmed(),
|
||||
);
|
||||
(ConfigLoader {
|
||||
paths: &env.komodo_config_paths
|
||||
.iter()
|
||||
.map(PathBuf::as_path)
|
||||
.collect::<Vec<_>>(),
|
||||
match_wildcards: &config_keywords,
|
||||
include_file_name: ".kcoreinclude",
|
||||
merge_nested: env.komodo_merge_nested_config,
|
||||
extend_array: env.komodo_extend_config_arrays,
|
||||
debug_print: env.komodo_config_debug,
|
||||
}).load::<CoreConfig>()
|
||||
.expect("Failed at parsing config from paths")
|
||||
};
|
||||
|
||||
let installations = match (
|
||||
maybe_read_list_from_file(
|
||||
env.komodo_github_webhook_app_installations_ids_file,
|
||||
env.komodo_github_webhook_app_installations_ids
|
||||
),
|
||||
env.komodo_github_webhook_app_installations_namespaces
|
||||
) {
|
||||
(Some(ids), Some(namespaces)) => {
|
||||
if ids.len() != namespaces.len() {
|
||||
panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
|
||||
@@ -181,6 +214,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.disable_user_registration),
|
||||
disable_non_admin_create: env.komodo_disable_non_admin_create
|
||||
.unwrap_or(config.disable_non_admin_create),
|
||||
enable_fancy_toml: env.komodo_enable_fancy_toml
|
||||
.unwrap_or(config.enable_fancy_toml),
|
||||
lock_login_credentials_for: env.komodo_lock_login_credentials_for
|
||||
.unwrap_or(config.lock_login_credentials_for),
|
||||
local_auth: env.komodo_local_auth
|
||||
@@ -192,7 +227,10 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
stdio: env
|
||||
.komodo_logging_stdio
|
||||
.unwrap_or(config.logging.stdio),
|
||||
pretty: env.komodo_logging_pretty.unwrap_or(config.logging.pretty),
|
||||
pretty: env.komodo_logging_pretty
|
||||
.unwrap_or(config.logging.pretty),
|
||||
location: env.komodo_logging_location
|
||||
.unwrap_or(config.logging.location),
|
||||
otlp_endpoint: env
|
||||
.komodo_logging_otlp_endpoint
|
||||
.unwrap_or(config.logging.otlp_endpoint),
|
||||
@@ -201,6 +239,7 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
},
|
||||
pretty_startup_config: env.komodo_pretty_startup_config.unwrap_or(config.pretty_startup_config),
|
||||
internet_interface: env.komodo_internet_interface.unwrap_or(config.internet_interface),
|
||||
ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled),
|
||||
ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file),
|
||||
ssl_cert_file: env.komodo_ssl_cert_file.unwrap_or(config.ssl_cert_file),
|
||||
|
||||
@@ -63,7 +63,7 @@ impl<States: Default + Busy + Copy + Send + 'static>
|
||||
pub fn update(
|
||||
&self,
|
||||
handler: impl Fn(&mut States),
|
||||
) -> anyhow::Result<UpdateGuard<States>> {
|
||||
) -> anyhow::Result<UpdateGuard<'_, States>> {
|
||||
let mut lock = self
|
||||
.0
|
||||
.lock()
|
||||
|
||||
@@ -128,8 +128,7 @@ async fn get_aws_builder(
|
||||
stage: "build instance connected".to_string(),
|
||||
success: true,
|
||||
stdout: format!(
|
||||
"established contact with periphery on builder\nperiphery version: v{}",
|
||||
version
|
||||
"established contact with periphery on builder\nperiphery version: v{version}"
|
||||
),
|
||||
start_ts: start_connect_ts,
|
||||
end_ts: komodo_timestamp(),
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::{collections::HashMap, hash::Hash};
|
||||
|
||||
use komodo_client::busy::Busy;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -34,7 +33,7 @@ impl<
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_list(&self) -> Vec<T> {
|
||||
let cache = self.cache.read().await;
|
||||
cache.iter().map(|(_, e)| e.clone()).collect()
|
||||
cache.values().cloned().collect()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
@@ -46,22 +45,22 @@ impl<
|
||||
self.cache.write().await.insert(key.into(), val);
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, handler))]
|
||||
pub async fn update_entry<Key>(
|
||||
&self,
|
||||
key: Key,
|
||||
handler: impl Fn(&mut T),
|
||||
) where
|
||||
Key: Into<K> + std::fmt::Debug,
|
||||
{
|
||||
let mut cache = self.cache.write().await;
|
||||
handler(cache.entry(key.into()).or_default());
|
||||
}
|
||||
// #[instrument(level = "debug", skip(self, handler))]
|
||||
// pub async fn update_entry<Key>(
|
||||
// &self,
|
||||
// key: Key,
|
||||
// handler: impl Fn(&mut T),
|
||||
// ) where
|
||||
// Key: Into<K> + std::fmt::Debug,
|
||||
// {
|
||||
// let mut cache = self.cache.write().await;
|
||||
// handler(cache.entry(key.into()).or_default());
|
||||
// }
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn clear(&self) {
|
||||
self.cache.write().await.clear();
|
||||
}
|
||||
// #[instrument(level = "debug", skip(self))]
|
||||
// pub async fn clear(&self) {
|
||||
// self.cache.write().await.clear();
|
||||
// }
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn remove(&self, key: &K) {
|
||||
@@ -69,16 +68,16 @@ impl<
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
T: Clone + Default + Busy,
|
||||
> Cache<K, T>
|
||||
{
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn busy(&self, id: &K) -> bool {
|
||||
match self.get(id).await {
|
||||
Some(state) => state.busy(),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
// impl<
|
||||
// K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
// T: Clone + Default + Busy,
|
||||
// > Cache<K, T>
|
||||
// {
|
||||
// #[instrument(level = "debug", skip(self))]
|
||||
// pub async fn busy(&self, id: &K) -> bool {
|
||||
// match self.get(id).await {
|
||||
// Some(state) => state.busy(),
|
||||
// None => false,
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::{fmt::Write, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::{Bson, doc};
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::entities::{
|
||||
ResourceTarget,
|
||||
@@ -13,8 +15,6 @@ use komodo_client::entities::{
|
||||
stack::Stack,
|
||||
user::User,
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::{Bson, doc};
|
||||
use periphery_client::PeripheryClient;
|
||||
use rand::Rng;
|
||||
|
||||
@@ -54,15 +54,6 @@ pub fn random_string(length: usize) -> String {
|
||||
.collect()
|
||||
}
|
||||
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
pub fn hash_password<P>(password: P) -> anyhow::Result<String>
|
||||
where
|
||||
P: AsRef<[u8]>,
|
||||
{
|
||||
bcrypt::hash(password, BCRYPT_COST)
|
||||
.context("failed to hash password")
|
||||
}
|
||||
|
||||
/// First checks db for token, then checks core config.
|
||||
/// Only errors if db call errors.
|
||||
/// Returns (token, use_https)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::by_id::find_one_by_id;
|
||||
use formatting::{Color, bold, colored, format_serror, muted};
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
@@ -17,7 +18,6 @@ use komodo_client::{
|
||||
user::procedure_user,
|
||||
},
|
||||
};
|
||||
use mungos::by_id::find_one_by_id;
|
||||
use resolver_api::Resolve;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -1124,6 +1124,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::ClearRepoCache(req) => {
|
||||
let req = ExecuteRequest::ClearRepoCache(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::ClearRepoCache(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at TestAlerter"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::Sleep(req) => {
|
||||
let duration = Duration::from_millis(req.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
@@ -1215,7 +1232,10 @@ impl ExtendBatch for BatchRunProcedure {
|
||||
impl ExtendBatch for BatchRunAction {
|
||||
type Resource = Action;
|
||||
fn single_execution(action: String) -> Execution {
|
||||
Execution::RunAction(RunAction { action })
|
||||
Execution::RunAction(RunAction {
|
||||
action,
|
||||
args: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ use anyhow::Context;
|
||||
use async_timing_util::{
|
||||
ONE_DAY_MS, Timelength, unix_timestamp_ms, wait_until_timelength,
|
||||
};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::future::join_all;
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use periphery_client::api::image::PruneImages;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
@@ -6,6 +6,13 @@ use std::{
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use async_timing_util::{ONE_MIN_MS, unix_timestamp_ms};
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{Action, ActionState},
|
||||
@@ -27,13 +34,6 @@ use komodo_client::entities::{
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::stats;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::to_document,
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget,
|
||||
action::Action,
|
||||
@@ -14,10 +18,6 @@ use komodo_client::entities::{
|
||||
update::{Update, UpdateListItem},
|
||||
user::User,
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::to_document,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::execute::ExecuteRequest, resource, state::db_client,
|
||||
@@ -77,7 +77,7 @@ pub async fn add_update_without_send(
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn update_update(update: Update) -> anyhow::Result<()> {
|
||||
update_one_by_id(&db_client().updates, &update.id, mungos::update::Update::Set(to_document(&update)?), None)
|
||||
update_one_by_id(&db_client().updates, &update.id, database::mungos::update::Update::Set(to_document(&update)?), None)
|
||||
.await
|
||||
.context("failed to update the update on db. the update build process was deleted")?;
|
||||
let update = update_list_item(update).await?;
|
||||
@@ -499,6 +499,11 @@ pub async fn init_execution_update(
|
||||
resource::get::<Alerter>(&data.alerter).await?.id,
|
||||
),
|
||||
),
|
||||
|
||||
// Maintenance
|
||||
ExecuteRequest::ClearRepoCache(_data) => {
|
||||
(Operation::ClearRepoCache, ResourceTarget::system())
|
||||
}
|
||||
};
|
||||
|
||||
let mut update = make_update(target, operation, user);
|
||||
|
||||
@@ -7,7 +7,7 @@ use sha2::Sha256;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
listener::{VerifyBranch, VerifySecret},
|
||||
listener::{ExtractBranch, VerifySecret},
|
||||
};
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
@@ -53,19 +53,12 @@ struct GithubWebhookBody {
|
||||
branch: String,
|
||||
}
|
||||
|
||||
impl VerifyBranch for Github {
|
||||
fn verify_branch(
|
||||
body: &str,
|
||||
expected_branch: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
impl ExtractBranch for Github {
|
||||
fn extract_branch(body: &str) -> anyhow::Result<String> {
|
||||
let branch = serde_json::from_str::<GithubWebhookBody>(body)
|
||||
.context("Failed to parse github request body")?
|
||||
.branch
|
||||
.replace("refs/heads/", "");
|
||||
if branch == expected_branch {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("request branch does not match expected"))
|
||||
}
|
||||
Ok(branch)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use serde::Deserialize;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
listener::{VerifyBranch, VerifySecret},
|
||||
listener::{ExtractBranch, VerifySecret},
|
||||
};
|
||||
|
||||
/// Listener implementation for Gitlab type API
|
||||
@@ -40,19 +40,12 @@ struct GitlabWebhookBody {
|
||||
branch: String,
|
||||
}
|
||||
|
||||
impl VerifyBranch for Gitlab {
|
||||
fn verify_branch(
|
||||
body: &str,
|
||||
expected_branch: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
impl ExtractBranch for Gitlab {
|
||||
fn extract_branch(body: &str) -> anyhow::Result<String> {
|
||||
let branch = serde_json::from_str::<GitlabWebhookBody>(body)
|
||||
.context("Failed to parse gitlab request body")?
|
||||
.branch
|
||||
.replace("refs/heads/", "");
|
||||
if branch == expected_branch {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("request branch does not match expected"))
|
||||
}
|
||||
Ok(branch)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use axum::{Router, http::HeaderMap};
|
||||
use komodo_client::entities::resource::Resource;
|
||||
use tokio::sync::Mutex;
|
||||
@@ -37,13 +38,16 @@ trait VerifySecret {
|
||||
}
|
||||
|
||||
/// Implemented on the integration struct, eg [integrations::github::Github]
|
||||
trait VerifyBranch {
|
||||
/// Returns Err if the branch extracted from request
|
||||
/// body does not match the expected branch.
|
||||
fn verify_branch(
|
||||
body: &str,
|
||||
expected_branch: &str,
|
||||
) -> anyhow::Result<()>;
|
||||
trait ExtractBranch {
|
||||
fn extract_branch(body: &str) -> anyhow::Result<String>;
|
||||
fn verify_branch(body: &str, expected: &str) -> anyhow::Result<()> {
|
||||
let branch = Self::extract_branch(body)?;
|
||||
if branch == expected {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("request branch does not match expected"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For Procedures and Actions, incoming webhook
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::sync::OnceLock;
|
||||
use std::{str::FromStr, sync::OnceLock};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
api::{
|
||||
execute::*,
|
||||
@@ -13,6 +13,7 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::{
|
||||
api::{
|
||||
@@ -39,20 +40,20 @@ fn build_locks() -> &'static ListenerLockCache {
|
||||
BUILD_LOCKS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub async fn handle_build_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_build_webhook<B: super::ExtractBranch>(
|
||||
build: Build,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !build.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through from action state busy.
|
||||
let lock = build_locks().get_or_insert_default(&build.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !build.config.webhook_enabled {
|
||||
return Err(anyhow!("build does not have webhook enabled"));
|
||||
}
|
||||
|
||||
B::verify_branch(&body, &build.config.branch)?;
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
@@ -155,7 +156,7 @@ pub enum RepoWebhookOption {
|
||||
Build,
|
||||
}
|
||||
|
||||
pub async fn handle_repo_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_repo_webhook<B: super::ExtractBranch>(
|
||||
option: RepoWebhookOption,
|
||||
repo: Repo,
|
||||
body: String,
|
||||
@@ -174,22 +175,22 @@ pub async fn handle_repo_webhook<B: super::VerifyBranch>(
|
||||
}
|
||||
|
||||
async fn handle_repo_webhook_inner<
|
||||
B: super::VerifyBranch,
|
||||
B: super::ExtractBranch,
|
||||
E: RepoExecution,
|
||||
>(
|
||||
repo: Repo,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !repo.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through from action state busy.
|
||||
let lock = repo_locks().get_or_insert_default(&repo.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
return Err(anyhow!("repo does not have webhook enabled"));
|
||||
}
|
||||
|
||||
B::verify_branch(&body, &repo.config.branch)?;
|
||||
|
||||
E::resolve(repo).await
|
||||
@@ -269,7 +270,7 @@ pub enum StackWebhookOption {
|
||||
Deploy,
|
||||
}
|
||||
|
||||
pub async fn handle_stack_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_stack_webhook<B: super::ExtractBranch>(
|
||||
option: StackWebhookOption,
|
||||
stack: Stack,
|
||||
body: String,
|
||||
@@ -286,22 +287,22 @@ pub async fn handle_stack_webhook<B: super::VerifyBranch>(
|
||||
}
|
||||
|
||||
pub async fn handle_stack_webhook_inner<
|
||||
B: super::VerifyBranch,
|
||||
B: super::ExtractBranch,
|
||||
E: StackExecution,
|
||||
>(
|
||||
stack: Stack,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !stack.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through, from "action state busy".
|
||||
let lock = stack_locks().get_or_insert_default(&stack.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
return Err(anyhow!("stack does not have webhook enabled"));
|
||||
}
|
||||
|
||||
B::verify_branch(&body, &stack.config.branch)?;
|
||||
|
||||
E::resolve(stack).await.map_err(|e| e.error)
|
||||
@@ -365,7 +366,7 @@ pub enum SyncWebhookOption {
|
||||
Sync,
|
||||
}
|
||||
|
||||
pub async fn handle_sync_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_sync_webhook<B: super::ExtractBranch>(
|
||||
option: SyncWebhookOption,
|
||||
sync: ResourceSync,
|
||||
body: String,
|
||||
@@ -384,22 +385,22 @@ pub async fn handle_sync_webhook<B: super::VerifyBranch>(
|
||||
}
|
||||
|
||||
async fn handle_sync_webhook_inner<
|
||||
B: super::VerifyBranch,
|
||||
B: super::ExtractBranch,
|
||||
E: SyncExecution,
|
||||
>(
|
||||
sync: ResourceSync,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !sync.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through from action state busy.
|
||||
let lock = sync_locks().get_or_insert_default(&sync.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !sync.config.webhook_enabled {
|
||||
return Err(anyhow!("sync does not have webhook enabled"));
|
||||
}
|
||||
|
||||
B::verify_branch(&body, &sync.config.branch)?;
|
||||
|
||||
E::resolve(sync).await
|
||||
@@ -421,11 +422,15 @@ fn procedure_locks() -> &'static ListenerLockCache {
|
||||
PROCEDURE_LOCKS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub async fn handle_procedure_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_procedure_webhook<B: super::ExtractBranch>(
|
||||
procedure: Procedure,
|
||||
target_branch: &str,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !procedure.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through from action state busy.
|
||||
@@ -433,10 +438,6 @@ pub async fn handle_procedure_webhook<B: super::VerifyBranch>(
|
||||
procedure_locks().get_or_insert_default(&procedure.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !procedure.config.webhook_enabled {
|
||||
return Err(anyhow!("procedure does not have webhook enabled"));
|
||||
}
|
||||
|
||||
if target_branch != ANY_BRANCH {
|
||||
B::verify_branch(&body, target_branch)?;
|
||||
}
|
||||
@@ -471,28 +472,42 @@ fn action_locks() -> &'static ListenerLockCache {
|
||||
ACTION_LOCKS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub async fn handle_action_webhook<B: super::VerifyBranch>(
|
||||
pub async fn handle_action_webhook<B: super::ExtractBranch>(
|
||||
action: Action,
|
||||
target_branch: &str,
|
||||
body: String,
|
||||
) -> anyhow::Result<()> {
|
||||
if !action.config.webhook_enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Acquire and hold lock to make a task queue for
|
||||
// subsequent listener calls on same resource.
|
||||
// It would fail if we let it go through from action state busy.
|
||||
let lock = action_locks().get_or_insert_default(&action.id).await;
|
||||
let _lock = lock.lock().await;
|
||||
|
||||
if !action.config.webhook_enabled {
|
||||
return Err(anyhow!("action does not have webhook enabled"));
|
||||
}
|
||||
let branch = B::extract_branch(&body)?;
|
||||
|
||||
if target_branch != ANY_BRANCH {
|
||||
B::verify_branch(&body, target_branch)?;
|
||||
if target_branch != ANY_BRANCH && branch != target_branch {
|
||||
return Err(anyhow!("request branch does not match expected"));
|
||||
}
|
||||
|
||||
let user = git_webhook_user().to_owned();
|
||||
let req =
|
||||
ExecuteRequest::RunAction(RunAction { action: action.id });
|
||||
|
||||
let body = serde_json::Value::from_str(&body)
|
||||
.context("Failed to deserialize webhook body")?;
|
||||
let serde_json::Value::Object(args) = json!({
|
||||
"WEBHOOK_BRANCH": branch,
|
||||
"WEBHOOK_BODY": body,
|
||||
}) else {
|
||||
return Err(anyhow!("Something is wrong with serde_json..."));
|
||||
};
|
||||
|
||||
let req = ExecuteRequest::RunAction(RunAction {
|
||||
action: action.id,
|
||||
args: args.into(),
|
||||
});
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::RunAction(req) = req else {
|
||||
unreachable!()
|
||||
|
||||
@@ -11,7 +11,7 @@ use tracing::Instrument;
|
||||
use crate::resource::KomodoResource;
|
||||
|
||||
use super::{
|
||||
CustomSecret, VerifyBranch, VerifySecret,
|
||||
CustomSecret, ExtractBranch, VerifySecret,
|
||||
resources::{
|
||||
RepoWebhookOption, StackWebhookOption, SyncWebhookOption,
|
||||
handle_action_webhook, handle_build_webhook,
|
||||
@@ -42,7 +42,7 @@ fn default_branch() -> String {
|
||||
String::from("main")
|
||||
}
|
||||
|
||||
pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
|
||||
pub fn router<P: VerifySecret + ExtractBranch>() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/build/{id}",
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::{net::SocketAddr, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::Router;
|
||||
use axum_server::tls_rustls::RustlsConfig;
|
||||
use axum_server::{Handle, tls_rustls::RustlsConfig};
|
||||
use tower_http::{
|
||||
cors::{Any, CorsLayer},
|
||||
services::{ServeDir, ServeFile},
|
||||
@@ -18,10 +18,10 @@ mod api;
|
||||
mod auth;
|
||||
mod cloud;
|
||||
mod config;
|
||||
mod db;
|
||||
mod helpers;
|
||||
mod listener;
|
||||
mod monitor;
|
||||
mod network;
|
||||
mod permission;
|
||||
mod resource;
|
||||
mod schedule;
|
||||
@@ -104,6 +104,18 @@ async fn app() -> anyhow::Result<()> {
|
||||
let socket_addr = SocketAddr::from_str(&addr)
|
||||
.context("failed to parse listen address")?;
|
||||
|
||||
let handle = Handle::new();
|
||||
tokio::spawn({
|
||||
// Cannot run actions until the server is available.
|
||||
// We can use a handle for the server, and wait until
|
||||
// the handle is listening before running actions
|
||||
let handle = handle.clone();
|
||||
async move {
|
||||
handle.listening().await;
|
||||
startup::run_startup_actions().await;
|
||||
}
|
||||
});
|
||||
|
||||
if config.ssl_enabled {
|
||||
info!("🔒 Core SSL Enabled");
|
||||
rustls::crypto::ring::default_provider()
|
||||
@@ -117,6 +129,7 @@ async fn app() -> anyhow::Result<()> {
|
||||
.await
|
||||
.context("Invalid ssl cert / key")?;
|
||||
axum_server::bind_rustls(socket_addr, ssl_config)
|
||||
.handle(handle)
|
||||
.serve(app)
|
||||
.await
|
||||
.context("failed to start https server")
|
||||
@@ -124,6 +137,7 @@ async fn app() -> anyhow::Result<()> {
|
||||
info!("🔓 Core SSL Disabled");
|
||||
info!("Komodo Core starting on http://{socket_addr}");
|
||||
axum_server::bind(socket_addr)
|
||||
.handle(handle)
|
||||
.serve(app)
|
||||
.await
|
||||
.context("failed to start http server")
|
||||
|
||||
@@ -6,6 +6,12 @@ use std::{
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Indexed;
|
||||
use database::mungos::{
|
||||
bulk_update::{self, BulkUpdate},
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId, to_bson},
|
||||
};
|
||||
use derive_variants::ExtractVariant;
|
||||
use komodo_client::entities::{
|
||||
ResourceTarget,
|
||||
@@ -13,12 +19,6 @@ use komodo_client::entities::{
|
||||
komodo_timestamp, optional_string,
|
||||
server::{Server, ServerState},
|
||||
};
|
||||
use mongo_indexed::Indexed;
|
||||
use mungos::{
|
||||
bulk_update::{self, BulkUpdate},
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId, to_bson},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use async_timing_util::wait_until_timelength;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::future::join_all;
|
||||
use helpers::insert_stacks_status_unknown;
|
||||
use komodo_client::entities::{
|
||||
@@ -12,7 +13,6 @@ use komodo_client::entities::{
|
||||
stack::{ComposeProject, StackService, StackState},
|
||||
stats::SystemStats,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use periphery_client::api::{self, git::GetLatestCommit};
|
||||
use serror::Serror;
|
||||
|
||||
|
||||
312
bin/core/src/network.rs
Normal file
312
bin/core/src/network.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
//! # Network Configuration Module
|
||||
//!
|
||||
//! This module provides manual network interface configuration for multi-NIC Docker environments.
|
||||
//! It allows Komodo Core to specify which network interface should be used as the default route
|
||||
//! for internet traffic, which is particularly useful in complex networking setups with multiple
|
||||
//! network interfaces.
|
||||
//!
|
||||
//! ## Features
|
||||
//! - Automatic container environment detection
|
||||
//! - Interface validation (existence and UP state)
|
||||
//! - Gateway discovery from routing tables or network configuration
|
||||
//! - Safe default route modification with privilege checking
|
||||
//! - Comprehensive error handling and logging
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use tokio::process::Command;
|
||||
use tracing::{debug, info, trace, warn};
|
||||
|
||||
/// Standard gateway addresses to test for Docker networks
|
||||
const DOCKER_GATEWAY_CANDIDATES: &[&str] = &[".1", ".254"];
|
||||
|
||||
/// Container environment detection files
|
||||
const DOCKERENV_FILE: &str = "/.dockerenv";
|
||||
const CGROUP_FILE: &str = "/proc/1/cgroup";
|
||||
|
||||
/// Check if running in container environment
|
||||
fn is_container_environment() -> bool {
|
||||
// Check for Docker-specific indicators
|
||||
if std::path::Path::new(DOCKERENV_FILE).exists() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check container environment variable
|
||||
if std::env::var("container").is_ok() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check cgroup for container runtime indicators
|
||||
if let Ok(content) = std::fs::read_to_string(CGROUP_FILE) {
|
||||
if content.contains("docker") || content.contains("containerd") {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Configure internet gateway for specified interface
|
||||
pub async fn configure_internet_gateway() {
|
||||
use crate::config::core_config;
|
||||
|
||||
let config = core_config();
|
||||
|
||||
if !is_container_environment() {
|
||||
debug!("Not in container, skipping network configuration");
|
||||
return;
|
||||
}
|
||||
|
||||
if !config.internet_interface.is_empty() {
|
||||
debug!(
|
||||
"Configuring internet interface: {}",
|
||||
config.internet_interface
|
||||
);
|
||||
if let Err(e) =
|
||||
configure_manual_interface(&config.internet_interface).await
|
||||
{
|
||||
warn!("Failed to configure internet gateway: {e:#}");
|
||||
}
|
||||
} else {
|
||||
debug!("No interface specified, using default routing");
|
||||
}
|
||||
}
|
||||
|
||||
/// Configure interface as default route
|
||||
async fn configure_manual_interface(
|
||||
interface_name: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
// Verify interface exists and is up
|
||||
let interface_check = Command::new("ip")
|
||||
.args(["addr", "show", interface_name])
|
||||
.output()
|
||||
.await
|
||||
.context("Failed to check interface status")?;
|
||||
|
||||
if !interface_check.status.success() {
|
||||
return Err(anyhow!(
|
||||
"Interface '{}' does not exist or is not accessible. Available interfaces can be listed with 'ip addr show'",
|
||||
interface_name
|
||||
));
|
||||
}
|
||||
|
||||
let interface_info =
|
||||
String::from_utf8_lossy(&interface_check.stdout);
|
||||
if !interface_info.contains("state UP") {
|
||||
return Err(anyhow!(
|
||||
"Interface '{}' is not UP. Please ensure the interface is enabled and connected",
|
||||
interface_name
|
||||
));
|
||||
}
|
||||
|
||||
debug!("Interface {} is UP", interface_name);
|
||||
|
||||
let gateway = find_gateway(interface_name).await?;
|
||||
debug!("Found gateway {} for {}", gateway, interface_name);
|
||||
|
||||
set_default_gateway(&gateway, interface_name).await?;
|
||||
info!(
|
||||
"🌐 Configured {} as default gateway via {}",
|
||||
interface_name, gateway
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find gateway for interface
|
||||
async fn find_gateway(
|
||||
interface_name: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
// Get interface IP address
|
||||
let addr_output = Command::new("ip")
|
||||
.args(["addr", "show", interface_name])
|
||||
.output()
|
||||
.await
|
||||
.context("Failed to get interface address")?;
|
||||
|
||||
let addr_info = String::from_utf8_lossy(&addr_output.stdout);
|
||||
let mut ip_cidr = None;
|
||||
|
||||
// Extract IP/CIDR from interface info
|
||||
for line in addr_info.lines() {
|
||||
if line.trim().starts_with("inet ") && !line.contains("127.0.0.1")
|
||||
{
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if let Some(found_ip_cidr) = parts.get(1) {
|
||||
debug!(
|
||||
"Interface {} has IP {}",
|
||||
interface_name, found_ip_cidr
|
||||
);
|
||||
ip_cidr = Some(*found_ip_cidr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let ip_cidr = ip_cidr.ok_or_else(|| anyhow!(
|
||||
"Could not find IP address for interface '{}'. Ensure interface has a valid IPv4 address",
|
||||
interface_name
|
||||
))?;
|
||||
|
||||
trace!(
|
||||
"Finding gateway for interface {} in network {}",
|
||||
interface_name, ip_cidr
|
||||
);
|
||||
|
||||
// Try to find gateway from routing table
|
||||
let route_output = Command::new("ip")
|
||||
.args(["route", "show", "dev", interface_name])
|
||||
.output()
|
||||
.await
|
||||
.context("Failed to get routes for interface")?;
|
||||
|
||||
if route_output.status.success() {
|
||||
let routes = String::from_utf8(route_output.stdout)?;
|
||||
trace!("Routes for {}: {}", interface_name, routes.trim());
|
||||
|
||||
// Look for routes with gateway
|
||||
for line in routes.lines() {
|
||||
if line.contains("via") {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if let Some(via_idx) = parts.iter().position(|&x| x == "via")
|
||||
{
|
||||
if let Some(&gateway) = parts.get(via_idx + 1) {
|
||||
trace!(
|
||||
"Found gateway {} for {} from routing table",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Derive gateway from network configuration (Docker standard: .1)
|
||||
if let Some(network_base) = ip_cidr.split('/').next() {
|
||||
let ip_parts: Vec<&str> = network_base.split('.').collect();
|
||||
if ip_parts.len() == 4 {
|
||||
let potential_gateways: Vec<String> = DOCKER_GATEWAY_CANDIDATES
|
||||
.iter()
|
||||
.map(|suffix| {
|
||||
format!(
|
||||
"{}.{}.{}{}",
|
||||
ip_parts[0], ip_parts[1], ip_parts[2], suffix
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
for gateway in potential_gateways {
|
||||
trace!(
|
||||
"Testing potential gateway {} for {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
|
||||
// Check if gateway is reachable
|
||||
let route_test = Command::new("ip")
|
||||
.args(["route", "get", &gateway, "dev", interface_name])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = route_test {
|
||||
if output.status.success() {
|
||||
trace!(
|
||||
"Gateway {} is reachable via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: assume .1 is gateway (Docker standard)
|
||||
if gateway.ends_with(".1") {
|
||||
trace!(
|
||||
"Assuming Docker gateway {} for {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"Could not determine gateway for interface '{}' in network '{}'. \
|
||||
Ensure the interface is properly configured with a valid gateway",
|
||||
interface_name,
|
||||
ip_cidr
|
||||
))
|
||||
}
|
||||
|
||||
/// Set default gateway to use specified interface
|
||||
async fn set_default_gateway(
|
||||
gateway: &str,
|
||||
interface_name: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
trace!(
|
||||
"Setting default gateway to {} via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
|
||||
// Check if we have network privileges
|
||||
if !check_network_privileges().await {
|
||||
warn!(
|
||||
"⚠️ Container lacks network privileges (NET_ADMIN capability required)"
|
||||
);
|
||||
warn!(
|
||||
"Add 'cap_add: [\"NET_ADMIN\"]' to your docker-compose.yaml"
|
||||
);
|
||||
return Err(anyhow!(
|
||||
"Insufficient network privileges to modify routing table. \
|
||||
Container needs NET_ADMIN capability to configure network interfaces"
|
||||
));
|
||||
}
|
||||
|
||||
// Remove existing default routes
|
||||
let remove_default = Command::new("sh")
|
||||
.args(["-c", "ip route del default 2>/dev/null || true"])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = remove_default {
|
||||
if output.status.success() {
|
||||
trace!("Removed existing default routes");
|
||||
}
|
||||
}
|
||||
|
||||
// Add new default route
|
||||
let add_default_cmd = format!(
|
||||
"ip route add default via {gateway} dev {interface_name}"
|
||||
);
|
||||
trace!("Adding default route: {}", add_default_cmd);
|
||||
|
||||
let add_default = Command::new("sh")
|
||||
.args(["-c", &add_default_cmd])
|
||||
.output()
|
||||
.await
|
||||
.context("Failed to add default route")?;
|
||||
|
||||
if !add_default.status.success() {
|
||||
let error = String::from_utf8_lossy(&add_default.stderr)
|
||||
.trim()
|
||||
.to_string();
|
||||
return Err(anyhow!(
|
||||
"❌ Failed to set default gateway via '{}': {}. \
|
||||
Verify interface configuration and network permissions",
|
||||
interface_name,
|
||||
error
|
||||
));
|
||||
}
|
||||
|
||||
trace!("Default gateway set to {} via {}", gateway, interface_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if we have sufficient network privileges
|
||||
async fn check_network_privileges() -> bool {
|
||||
// Try to test NET_ADMIN capability with a harmless route operation
|
||||
let capability_test = Command::new("sh")
|
||||
.args(["-c", "ip route add 198.51.100.1/32 dev lo 2>/dev/null && ip route del 198.51.100.1/32 dev lo 2>/dev/null"])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
matches!(capability_test, Ok(output) if output.status.success())
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::find::find_collect;
|
||||
use futures::{FutureExt, future::BoxFuture};
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::{
|
||||
@@ -11,8 +13,6 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -83,6 +83,8 @@ pub fn get_user_permission_on_resource<'a, T: KomodoResource>(
|
||||
let resource = get::<T>(resource_id).await?;
|
||||
let initial_specific = if let Some(additional_target) =
|
||||
T::inherit_specific_permissions_from(&resource)
|
||||
// Ensure target is actually assigned
|
||||
&& !additional_target.is_empty()
|
||||
{
|
||||
GetPermission {
|
||||
target: additional_target,
|
||||
@@ -174,19 +176,19 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
|
||||
let resource_type = T::resource_type();
|
||||
|
||||
// Check user 'all' on variant
|
||||
if let Some(permission) = user.all.get(&resource_type).cloned() {
|
||||
if permission.level > PermissionLevel::None {
|
||||
return Ok(None);
|
||||
}
|
||||
if let Some(permission) = user.all.get(&resource_type).cloned()
|
||||
&& permission.level > PermissionLevel::None
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Check user groups 'all' on variant
|
||||
let groups = get_user_user_groups(&user.id).await?;
|
||||
for group in &groups {
|
||||
if let Some(permission) = group.all.get(&resource_type).cloned() {
|
||||
if permission.level > PermissionLevel::None {
|
||||
return Ok(None);
|
||||
}
|
||||
if let Some(permission) = group.all.get(&resource_type).cloned()
|
||||
&& permission.level > PermissionLevel::None
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOneOptions},
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
NoData, Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{
|
||||
@@ -12,10 +16,6 @@ use komodo_client::entities::{
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOneOptions},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::query::{get_action_state, get_last_run_at},
|
||||
@@ -155,6 +155,7 @@ impl super::KomodoResource for Action {
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
cancel_schedule(&ResourceTarget::Action(resource.id.clone()));
|
||||
action_state_cache().remove(&resource.id).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -221,4 +222,7 @@ async fn get_action_state_from_db(id: &str) -> ActionState {
|
||||
const DEFAULT_ACTION_FILE_CONTENTS: &str =
|
||||
"// Run actions using the pre initialized 'komodo' client.
|
||||
const version: Types.GetVersionResponse = await komodo.read('GetVersion', {});
|
||||
console.log('🦎 Komodo version:', version.version, '🦎\\n');";
|
||||
console.log('🦎 Komodo version:', version.version, '🦎\\n');
|
||||
|
||||
// Access arguments using the 'ARGS' object.
|
||||
console.log(ARGS);";
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use database::mungos::mongodb::Collection;
|
||||
use derive_variants::ExtractVariant;
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
@@ -9,7 +10,6 @@ use komodo_client::entities::{
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use mungos::mongodb::Collection;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOptions},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::RefreshBuildCache,
|
||||
@@ -21,10 +25,6 @@ use komodo_client::{
|
||||
user::{User, build_user},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -216,9 +216,10 @@ impl super::KomodoResource for Build {
|
||||
}
|
||||
|
||||
async fn post_delete(
|
||||
_resource: &Resource<Self::Config, Self::Info>,
|
||||
resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
build_state_cache().remove(&resource.id).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -255,30 +256,30 @@ async fn validate_config(
|
||||
config: &mut PartialBuildConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(builder_id) = &config.builder_id {
|
||||
if !builder_id.is_empty() {
|
||||
let builder = super::get_check_permissions::<Builder>(
|
||||
builder_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Build to this Builder")?;
|
||||
config.builder_id = Some(builder.id)
|
||||
}
|
||||
if let Some(builder_id) = &config.builder_id
|
||||
&& !builder_id.is_empty()
|
||||
{
|
||||
let builder = super::get_check_permissions::<Builder>(
|
||||
builder_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Build to this Builder")?;
|
||||
config.builder_id = Some(builder.id)
|
||||
}
|
||||
if let Some(linked_repo) = &config.linked_repo {
|
||||
if !linked_repo.is_empty() {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
linked_repo,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Repo to this Build")?;
|
||||
// in case it comes in as name
|
||||
config.linked_repo = Some(repo.id);
|
||||
}
|
||||
if let Some(linked_repo) = &config.linked_repo
|
||||
&& !linked_repo.is_empty()
|
||||
{
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
linked_repo,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Repo to this Build")?;
|
||||
// in case it comes in as name
|
||||
config.linked_repo = Some(repo.id);
|
||||
}
|
||||
if let Some(build_args) = &config.build_args {
|
||||
environment_vars_from_str(build_args)
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::{
|
||||
Collection,
|
||||
bson::{Document, doc, to_document},
|
||||
};
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::entities::{
|
||||
MergePartial, Operation, ResourceTarget, ResourceTargetVariant,
|
||||
@@ -13,10 +17,6 @@ use komodo_client::entities::{
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use mungos::mongodb::{
|
||||
Collection,
|
||||
bson::{Document, doc, to_document},
|
||||
};
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -119,7 +119,8 @@ impl super::KomodoResource for Builder {
|
||||
fn update_document(
|
||||
original: Resource<Self::Config, Self::Info>,
|
||||
config: Self::PartialConfig,
|
||||
) -> Result<Document, mungos::mongodb::bson::ser::Error> {
|
||||
) -> Result<Document, database::mungos::mongodb::bson::ser::Error>
|
||||
{
|
||||
let config = original.config.merge_partial(config);
|
||||
to_document(&config)
|
||||
}
|
||||
@@ -151,7 +152,9 @@ impl super::KomodoResource for Builder {
|
||||
.builds
|
||||
.update_many(
|
||||
doc! { "config.builder_id": &resource.id },
|
||||
mungos::update::Update::Set(doc! { "config.builder_id": "" }),
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "config.builder_id": "" },
|
||||
),
|
||||
)
|
||||
.await
|
||||
.context("failed to update_many builds on database")?;
|
||||
@@ -159,7 +162,9 @@ impl super::KomodoResource for Builder {
|
||||
.repos
|
||||
.update_many(
|
||||
doc! { "config.builder_id": &resource.id },
|
||||
mungos::update::Update::Set(doc! { "config.builder_id": "" }),
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "config.builder_id": "" },
|
||||
),
|
||||
)
|
||||
.await
|
||||
.context("failed to update_many repos on database")?;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::Collection;
|
||||
use formatting::format_serror;
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::entities::{
|
||||
@@ -18,7 +19,6 @@ use komodo_client::entities::{
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use mungos::mongodb::Collection;
|
||||
use periphery_client::api::container::RemoveContainer;
|
||||
|
||||
use crate::{
|
||||
@@ -301,9 +301,10 @@ impl super::KomodoResource for Deployment {
|
||||
}
|
||||
|
||||
async fn post_delete(
|
||||
_resource: &Resource<Self::Config, Self::Info>,
|
||||
resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
deployment_status_cache().remove(&resource.id).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -313,36 +314,33 @@ async fn validate_config(
|
||||
config: &mut PartialDeploymentConfig,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
if let Some(server_id) = &config.server_id {
|
||||
if !server_id.is_empty() {
|
||||
let server = get_check_permissions::<Server>(
|
||||
server_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Deployment to this Server")?;
|
||||
config.server_id = Some(server.id);
|
||||
}
|
||||
if let Some(server_id) = &config.server_id
|
||||
&& !server_id.is_empty()
|
||||
{
|
||||
let server = get_check_permissions::<Server>(
|
||||
server_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot attach Deployment to this Server")?;
|
||||
config.server_id = Some(server.id);
|
||||
}
|
||||
if let Some(DeploymentImage::Build { build_id, version }) =
|
||||
&config.image
|
||||
&& !build_id.is_empty()
|
||||
{
|
||||
if !build_id.is_empty() {
|
||||
let build = get_check_permissions::<Build>(
|
||||
build_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Cannot update deployment with this build attached.",
|
||||
)?;
|
||||
config.image = Some(DeploymentImage::Build {
|
||||
build_id: build.id,
|
||||
version: *version,
|
||||
});
|
||||
}
|
||||
let build = get_check_permissions::<Build>(
|
||||
build_id,
|
||||
user,
|
||||
PermissionLevel::Read.attach(),
|
||||
)
|
||||
.await
|
||||
.context("Cannot update deployment with this build attached.")?;
|
||||
config.image = Some(DeploymentImage::Build {
|
||||
build_id: build.id,
|
||||
version: *version,
|
||||
});
|
||||
}
|
||||
if let Some(volumes) = &config.volumes {
|
||||
conversions_from_str(volumes).context("Invalid volumes")?;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user